LCOV - code coverage report
Current view: top level - src/heap - spaces.h (source / functions) Hit Total Coverage
Test: app.info Lines: 302 321 94.1 %
Date: 2019-04-19 Functions: 68 92 73.9 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_HEAP_SPACES_H_
       6             : #define V8_HEAP_SPACES_H_
       7             : 
       8             : #include <list>
       9             : #include <map>
      10             : #include <memory>
      11             : #include <unordered_map>
      12             : #include <unordered_set>
      13             : #include <vector>
      14             : 
      15             : #include "src/allocation.h"
      16             : #include "src/base/atomic-utils.h"
      17             : #include "src/base/bounded-page-allocator.h"
      18             : #include "src/base/export-template.h"
      19             : #include "src/base/iterator.h"
      20             : #include "src/base/list.h"
      21             : #include "src/base/platform/mutex.h"
      22             : #include "src/cancelable-task.h"
      23             : #include "src/flags.h"
      24             : #include "src/globals.h"
      25             : #include "src/heap/heap.h"
      26             : #include "src/heap/invalidated-slots.h"
      27             : #include "src/heap/marking.h"
      28             : #include "src/objects.h"
      29             : #include "src/objects/free-space.h"
      30             : #include "src/objects/heap-object.h"
      31             : #include "src/objects/map.h"
      32             : #include "src/utils.h"
      33             : 
      34             : namespace v8 {
      35             : namespace internal {
      36             : 
      37             : namespace heap {
      38             : class HeapTester;
      39             : class TestCodePageAllocatorScope;
      40             : }  // namespace heap
      41             : 
      42             : class AllocationObserver;
      43             : class CompactionSpace;
      44             : class CompactionSpaceCollection;
      45             : class FreeList;
      46             : class Isolate;
      47             : class LinearAllocationArea;
      48             : class LocalArrayBufferTracker;
      49             : class MemoryAllocator;
      50             : class MemoryChunk;
      51             : class MemoryChunkLayout;
      52             : class Page;
      53             : class PagedSpace;
      54             : class SemiSpace;
      55             : class SkipList;
      56             : class SlotsBuffer;
      57             : class SlotSet;
      58             : class TypedSlotSet;
      59             : class Space;
      60             : 
      61             : // -----------------------------------------------------------------------------
      62             : // Heap structures:
      63             : //
      64             : // A JS heap consists of a young generation, an old generation, and a large
      65             : // object space. The young generation is divided into two semispaces. A
      66             : // scavenger implements Cheney's copying algorithm. The old generation is
      67             : // separated into a map space and an old object space. The map space contains
      68             : // all (and only) map objects, the rest of old objects go into the old space.
      69             : // The old generation is collected by a mark-sweep-compact collector.
      70             : //
      71             : // The semispaces of the young generation are contiguous.  The old and map
      72             : // spaces consists of a list of pages. A page has a page header and an object
      73             : // area.
      74             : //
      75             : // There is a separate large object space for objects larger than
      76             : // kMaxRegularHeapObjectSize, so that they do not have to move during
      77             : // collection. The large object space is paged. Pages in large object space
      78             : // may be larger than the page size.
      79             : //
      80             : // A store-buffer based write barrier is used to keep track of intergenerational
      81             : // references.  See heap/store-buffer.h.
      82             : //
      83             : // During scavenges and mark-sweep collections we sometimes (after a store
      84             : // buffer overflow) iterate intergenerational pointers without decoding heap
      85             : // object maps so if the page belongs to old space or large object space
      86             : // it is essential to guarantee that the page does not contain any
      87             : // garbage pointers to new space: every pointer aligned word which satisfies
      88             : // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
      89             : // new space. Thus objects in old space and large object spaces should have a
      90             : // special layout (e.g. no bare integer fields). This requirement does not
      91             : // apply to map space which is iterated in a special fashion. However we still
      92             : // require pointer fields of dead maps to be cleaned.
      93             : //
      94             : // To enable lazy cleaning of old space pages we can mark chunks of the page
      95             : // as being garbage.  Garbage sections are marked with a special map.  These
      96             : // sections are skipped when scanning the page, even if we are otherwise
      97             : // scanning without regard for object boundaries.  Garbage sections are chained
      98             : // together to form a free list after a GC.  Garbage sections created outside
      99             : // of GCs by object trunctation etc. may not be in the free list chain.  Very
     100             : // small free spaces are ignored, they need only be cleaned of bogus pointers
     101             : // into new space.
     102             : //
     103             : // Each page may have up to one special garbage section.  The start of this
     104             : // section is denoted by the top field in the space.  The end of the section
     105             : // is denoted by the limit field in the space.  This special garbage section
     106             : // is not marked with a free space map in the data.  The point of this section
     107             : // is to enable linear allocation without having to constantly update the byte
     108             : // array every time the top field is updated and a new object is created.  The
     109             : // special garbage section is not in the chain of garbage sections.
     110             : //
     111             : // Since the top and limit fields are in the space, not the page, only one page
     112             : // has a special garbage section, and if the top and limit are equal then there
     113             : // is no special garbage section.
     114             : 
     115             : // Some assertion macros used in the debugging mode.
     116             : 
     117             : #define DCHECK_OBJECT_SIZE(size) \
     118             :   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
     119             : 
     120             : #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
     121             :   DCHECK((0 < size) && (size <= code_space->AreaSize()))
     122             : 
     123             : enum FreeListCategoryType {
     124             :   kTiniest,
     125             :   kTiny,
     126             :   kSmall,
     127             :   kMedium,
     128             :   kLarge,
     129             :   kHuge,
     130             : 
     131             :   kFirstCategory = kTiniest,
     132             :   kLastCategory = kHuge,
     133             :   kNumberOfCategories = kLastCategory + 1,
     134             :   kInvalidCategory
     135             : };
     136             : 
     137             : enum FreeMode { kLinkCategory, kDoNotLinkCategory };
     138             : 
     139             : enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
     140             : 
     141             : enum RememberedSetType {
     142             :   OLD_TO_NEW,
     143             :   OLD_TO_OLD,
     144             :   NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
     145             : };
     146             : 
     147             : // A free list category maintains a linked list of free memory blocks.
     148             : class FreeListCategory {
     149             :  public:
     150             :   FreeListCategory(FreeList* free_list, Page* page)
     151             :       : free_list_(free_list),
     152             :         page_(page),
     153             :         type_(kInvalidCategory),
     154             :         available_(0),
     155             :         prev_(nullptr),
     156     5614474 :         next_(nullptr) {}
     157             : 
     158             :   void Initialize(FreeListCategoryType type) {
     159     2807856 :     type_ = type;
     160     2807856 :     available_ = 0;
     161     2807856 :     prev_ = nullptr;
     162     2807856 :     next_ = nullptr;
     163             :   }
     164             : 
     165             :   void Reset();
     166             : 
     167           0 :   void ResetStats() { Reset(); }
     168             : 
     169             :   void RepairFreeList(Heap* heap);
     170             : 
     171             :   // Relinks the category into the currently owning free list. Requires that the
     172             :   // category is currently unlinked.
     173             :   void Relink();
     174             : 
     175             :   void Free(Address address, size_t size_in_bytes, FreeMode mode);
     176             : 
     177             :   // Performs a single try to pick a node of at least |minimum_size| from the
     178             :   // category. Stores the actual size in |node_size|. Returns nullptr if no
     179             :   // node is found.
     180             :   FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
     181             : 
     182             :   // Picks a node of at least |minimum_size| from the category. Stores the
     183             :   // actual size in |node_size|. Returns nullptr if no node is found.
     184             :   FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
     185             : 
     186             :   inline FreeList* owner();
     187             :   inline Page* page() const { return page_; }
     188             :   inline bool is_linked();
     189             :   bool is_empty() { return top().is_null(); }
     190             :   size_t available() const { return available_; }
     191             : 
     192     7449648 :   void set_free_list(FreeList* free_list) { free_list_ = free_list; }
     193             : 
     194             : #ifdef DEBUG
     195             :   size_t SumFreeList();
     196             :   int FreeListLength();
     197             : #endif
     198             : 
     199             :  private:
     200             :   // For debug builds we accurately compute free lists lengths up until
     201             :   // {kVeryLongFreeList} by manually walking the list.
     202             :   static const int kVeryLongFreeList = 500;
     203             : 
     204             :   FreeSpace top() { return top_; }
     205    21418083 :   void set_top(FreeSpace top) { top_ = top; }
     206             :   FreeListCategory* prev() { return prev_; }
     207     4061483 :   void set_prev(FreeListCategory* prev) { prev_ = prev; }
     208             :   FreeListCategory* next() { return next_; }
     209     5414743 :   void set_next(FreeListCategory* next) { next_ = next; }
     210             : 
     211             :   // This FreeListCategory is owned by the given free_list_.
     212             :   FreeList* free_list_;
     213             : 
     214             :   // This FreeListCategory holds free list entries of the given page_.
     215             :   Page* const page_;
     216             : 
     217             :   // |type_|: The type of this free list category.
     218             :   FreeListCategoryType type_;
     219             : 
     220             :   // |available_|: Total available bytes in all blocks of this free list
     221             :   // category.
     222             :   size_t available_;
     223             : 
     224             :   // |top_|: Points to the top FreeSpace in the free list category.
     225             :   FreeSpace top_;
     226             : 
     227             :   FreeListCategory* prev_;
     228             :   FreeListCategory* next_;
     229             : 
     230             :   friend class FreeList;
     231             :   friend class PagedSpace;
     232             : 
     233             :   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
     234             : };
     235             : 
     236             : class V8_EXPORT_PRIVATE MemoryChunkLayout {
     237             :  public:
     238             :   static size_t CodePageGuardStartOffset();
     239             :   static size_t CodePageGuardSize();
     240             :   static intptr_t ObjectStartOffsetInCodePage();
     241             :   static intptr_t ObjectEndOffsetInCodePage();
     242             :   static size_t AllocatableMemoryInCodePage();
     243             :   static intptr_t ObjectStartOffsetInDataPage();
     244             :   static size_t AllocatableMemoryInDataPage();
     245             :   static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
     246             :   static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
     247             : };
     248             : 
     249             : // MemoryChunk represents a memory region owned by a specific space.
     250             : // It is divided into the header and the body. Chunk start is always
     251             : // 1MB aligned. Start of the body is aligned so it can accommodate
     252             : // any heap object.
     253             : class MemoryChunk {
     254             :  public:
     255             :   // Use with std data structures.
     256             :   struct Hasher {
     257             :     size_t operator()(MemoryChunk* const chunk) const {
     258   424852348 :       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
     259             :     }
     260             :   };
     261             : 
     262             :   enum Flag {
     263             :     NO_FLAGS = 0u,
     264             :     IS_EXECUTABLE = 1u << 0,
     265             :     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
     266             :     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
     267             :     // A page in the from-space or a young large page that was not scavenged
     268             :     // yet.
     269             :     FROM_PAGE = 1u << 3,
     270             :     // A page in the to-space or a young large page that was scavenged.
     271             :     TO_PAGE = 1u << 4,
     272             :     LARGE_PAGE = 1u << 5,
     273             :     EVACUATION_CANDIDATE = 1u << 6,
     274             :     NEVER_EVACUATE = 1u << 7,
     275             : 
     276             :     // Large objects can have a progress bar in their page header. These object
     277             :     // are scanned in increments and will be kept black while being scanned.
     278             :     // Even if the mutator writes to them they will be kept black and a white
     279             :     // to grey transition is performed in the value.
     280             :     HAS_PROGRESS_BAR = 1u << 8,
     281             : 
     282             :     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
     283             :     // from new to old space during evacuation.
     284             :     PAGE_NEW_OLD_PROMOTION = 1u << 9,
     285             : 
     286             :     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
     287             :     // within the new space during evacuation.
     288             :     PAGE_NEW_NEW_PROMOTION = 1u << 10,
     289             : 
     290             :     // This flag is intended to be used for testing. Works only when both
     291             :     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
     292             :     // are set. It forces the page to become an evacuation candidate at next
     293             :     // candidates selection cycle.
     294             :     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
     295             : 
     296             :     // This flag is intended to be used for testing.
     297             :     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
     298             : 
     299             :     // The memory chunk is already logically freed, however the actual freeing
     300             :     // still has to be performed.
     301             :     PRE_FREED = 1u << 13,
     302             : 
     303             :     // |POOLED|: When actually freeing this chunk, only uncommit and do not
     304             :     // give up the reservation as we still reuse the chunk at some point.
     305             :     POOLED = 1u << 14,
     306             : 
     307             :     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
     308             :     //   has been aborted and needs special handling by the sweeper.
     309             :     COMPACTION_WAS_ABORTED = 1u << 15,
     310             : 
     311             :     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
     312             :     // on pages is sometimes aborted. The flag is used to avoid repeatedly
     313             :     // triggering on the same page.
     314             :     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
     315             : 
     316             :     // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
     317             :     // to iterate the page.
     318             :     SWEEP_TO_ITERATE = 1u << 17,
     319             : 
     320             :     // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
     321             :     // enabled.
     322             :     INCREMENTAL_MARKING = 1u << 18,
     323             :     NEW_SPACE_BELOW_AGE_MARK = 1u << 19
     324             :   };
     325             : 
     326             :   using Flags = uintptr_t;
     327             : 
     328             :   static const Flags kPointersToHereAreInterestingMask =
     329             :       POINTERS_TO_HERE_ARE_INTERESTING;
     330             : 
     331             :   static const Flags kPointersFromHereAreInterestingMask =
     332             :       POINTERS_FROM_HERE_ARE_INTERESTING;
     333             : 
     334             :   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
     335             : 
     336             :   static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
     337             : 
     338             :   static const Flags kIsLargePageMask = LARGE_PAGE;
     339             : 
     340             :   static const Flags kSkipEvacuationSlotsRecordingMask =
     341             :       kEvacuationCandidateMask | kIsInYoungGenerationMask;
     342             : 
     343             :   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
     344             :   //   not be performed on that page. Sweeper threads that are done with their
     345             :   //   work will set this value and not touch the page anymore.
     346             :   // |kSweepingPending|: This page is ready for parallel sweeping.
     347             :   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
     348             :   enum ConcurrentSweepingState {
     349             :     kSweepingDone,
     350             :     kSweepingPending,
     351             :     kSweepingInProgress,
     352             :   };
     353             : 
     354             :   static const intptr_t kAlignment =
     355             :       (static_cast<uintptr_t>(1) << kPageSizeBits);
     356             : 
     357             :   static const intptr_t kAlignmentMask = kAlignment - 1;
     358             : 
     359             :   static const intptr_t kSizeOffset = 0;
     360             :   static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
     361             :   static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
     362             :   static const intptr_t kReservationOffset =
     363             :       kMarkBitmapOffset + kSystemPointerSize;
     364             :   static const intptr_t kHeapOffset =
     365             :       kReservationOffset + 3 * kSystemPointerSize;
     366             :   static const intptr_t kHeaderSentinelOffset =
     367             :       kHeapOffset + kSystemPointerSize;
     368             :   static const intptr_t kOwnerOffset =
     369             :       kHeaderSentinelOffset + kSystemPointerSize;
     370             : 
     371             :   static const size_t kHeaderSize =
     372             :       kSizeOffset               // NOLINT
     373             :       + kSizetSize              // size_t size
     374             :       + kUIntptrSize            // uintptr_t flags_
     375             :       + kSystemPointerSize      // Bitmap* marking_bitmap_
     376             :       + 3 * kSystemPointerSize  // VirtualMemory reservation_
     377             :       + kSystemPointerSize      // Heap* heap_
     378             :       + kSystemPointerSize      // Address header_sentinel_
     379             :       + kSystemPointerSize      // Address area_start_
     380             :       + kSystemPointerSize      // Address area_end_
     381             :       + kSystemPointerSize      // Address owner_
     382             :       + kSizetSize              // size_t progress_bar_
     383             :       + kIntptrSize             // intptr_t live_byte_count_
     384             :       + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
     385             :       + kSystemPointerSize *
     386             :             NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
     387             :       + kSystemPointerSize  // InvalidatedSlots* invalidated_slots_
     388             :       + kSystemPointerSize  // SkipList* skip_list_
     389             :       + kSystemPointerSize  // std::atomic<intptr_t> high_water_mark_
     390             :       + kSystemPointerSize  // base::Mutex* mutex_
     391             :       + kSystemPointerSize  // std::atomic<ConcurrentSweepingState>
     392             :                             // concurrent_sweeping_
     393             :       + kSystemPointerSize  // base::Mutex* page_protection_change_mutex_
     394             :       + kSystemPointerSize  // unitptr_t write_unprotect_counter_
     395             :       + kSizetSize * ExternalBackingStoreType::kNumTypes
     396             :       // std::atomic<size_t> external_backing_store_bytes_
     397             :       + kSizetSize              // size_t allocated_bytes_
     398             :       + kSizetSize              // size_t wasted_memory_
     399             :       + kSystemPointerSize * 2  // base::ListNode
     400             :       + kSystemPointerSize * kNumberOfCategories
     401             :       // FreeListCategory categories_[kNumberOfCategories]
     402             :       + kSystemPointerSize  // LocalArrayBufferTracker* local_tracker_
     403             :       + kIntptrSize  // std::atomic<intptr_t> young_generation_live_byte_count_
     404             :       + kSystemPointerSize;  // Bitmap* young_generation_bitmap_
     405             : 
     406             :   // Page size in bytes.  This must be a multiple of the OS page size.
     407             :   static const int kPageSize = 1 << kPageSizeBits;
     408             : 
     409             :   // Maximum number of nested code memory modification scopes.
     410             :   static const int kMaxWriteUnprotectCounter = 3;
     411             : 
     412  7811479797 :   static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
     413             : 
     414             :   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
     415             :   static MemoryChunk* FromAddress(Address a) {
     416   387882032 :     return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
     417             :   }
     418             :   // Only works if the object is in the first kPageSize of the MemoryChunk.
     419     1998282 :   static MemoryChunk* FromHeapObject(const HeapObject o) {
     420  7139711460 :     return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
     421             :   }
     422             : 
     423             :   void SetOldGenerationPageFlags(bool is_marking);
     424             :   void SetYoungGenerationPageFlags(bool is_marking);
     425             : 
     426             :   static inline MemoryChunk* FromAnyPointerAddress(Address addr);
     427             : 
     428     3018169 :   static inline void UpdateHighWaterMark(Address mark) {
     429     4386806 :     if (mark == kNullAddress) return;
     430             :     // Need to subtract one from the mark because when a chunk is full the
     431             :     // top points to the next address after the chunk, which effectively belongs
     432             :     // to another chunk. See the comment to Page::FromAllocationAreaAddress.
     433     1649532 :     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
     434     1649532 :     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
     435     1649532 :     intptr_t old_mark = 0;
     436     1649532 :     do {
     437     1649532 :       old_mark = chunk->high_water_mark_;
     438             :     } while (
     439     2388140 :         (new_mark > old_mark) &&
     440             :         !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
     441             :   }
     442             : 
     443             :   static inline void MoveExternalBackingStoreBytes(
     444             :       ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
     445             :       size_t amount);
     446             : 
     447             :   void DiscardUnusedMemory(Address addr, size_t size);
     448             : 
     449             :   Address address() const {
     450   984737015 :     return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
     451             :   }
     452             : 
     453             :   base::Mutex* mutex() { return mutex_; }
     454             : 
     455             :   bool Contains(Address addr) {
     456      712603 :     return addr >= area_start() && addr < area_end();
     457             :   }
     458             : 
     459             :   // Checks whether |addr| can be a limit of addresses in this page. It's a
     460             :   // limit if it's in the page, or if it's just after the last byte of the page.
     461             :   bool ContainsLimit(Address addr) {
     462    71570383 :     return addr >= area_start() && addr <= area_end();
     463             :   }
     464             : 
     465             :   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
     466             :     concurrent_sweeping_ = state;
     467             :   }
     468             : 
     469             :   ConcurrentSweepingState concurrent_sweeping_state() {
     470             :     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
     471             :   }
     472             : 
     473      155397 :   bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
     474             : 
     475             :   size_t size() const { return size_; }
     476             :   void set_size(size_t size) { size_ = size; }
     477             : 
     478             :   inline Heap* heap() const { return heap_; }
     479             : 
     480             :   Heap* synchronized_heap();
     481             : 
     482             :   inline SkipList* skip_list() { return skip_list_; }
     483             : 
     484       91047 :   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
     485             : 
     486             :   template <RememberedSetType type>
     487        1504 :   bool ContainsSlots() {
     488             :     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
     489        2476 :            invalidated_slots() != nullptr;
     490             :   }
     491             : 
     492             :   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
     493             :   SlotSet* slot_set() {
     494             :     if (access_mode == AccessMode::ATOMIC)
     495   204213094 :       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
     496             :     return slot_set_[type];
     497             :   }
     498             : 
     499             :   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
     500             :   TypedSlotSet* typed_slot_set() {
     501             :     if (access_mode == AccessMode::ATOMIC)
     502     4346644 :       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
     503             :     return typed_slot_set_[type];
     504             :   }
     505             : 
     506             :   template <RememberedSetType type>
     507             :   V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
     508             :   // Not safe to be called concurrently.
     509             :   template <RememberedSetType type>
     510             :   void ReleaseSlotSet();
     511             :   template <RememberedSetType type>
     512             :   TypedSlotSet* AllocateTypedSlotSet();
     513             :   // Not safe to be called concurrently.
     514             :   template <RememberedSetType type>
     515             :   void ReleaseTypedSlotSet();
     516             : 
     517             :   InvalidatedSlots* AllocateInvalidatedSlots();
     518             :   void ReleaseInvalidatedSlots();
     519             :   V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
     520             :                                                             int size);
     521             :   // Updates invalidated_slots after array left-trimming.
     522             :   void MoveObjectWithInvalidatedSlots(HeapObject old_start,
     523             :                                       HeapObject new_start);
     524             :   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
     525             :   InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
     526             : 
     527             :   void ReleaseLocalTracker();
     528             : 
     529             :   void AllocateYoungGenerationBitmap();
     530             :   void ReleaseYoungGenerationBitmap();
     531             : 
     532             :   void AllocateMarkingBitmap();
     533             :   void ReleaseMarkingBitmap();
     534             : 
     535             :   Address area_start() { return area_start_; }
     536             :   Address area_end() { return area_end_; }
     537     9236145 :   size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
     538             : 
     539             :   // Approximate amount of physical memory committed for this chunk.
     540             :   V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
     541             : 
     542      187032 :   Address HighWaterMark() { return address() + high_water_mark_; }
     543             : 
     544             :   size_t ProgressBar() {
     545             :     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
     546             :     return progress_bar_.load(std::memory_order_acquire);
     547             :   }
     548             : 
     549             :   bool TrySetProgressBar(size_t old_value, size_t new_value) {
     550             :     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
     551             :     return progress_bar_.compare_exchange_strong(old_value, new_value,
     552             :                                                  std::memory_order_acq_rel);
     553             :   }
     554             : 
     555             :   void ResetProgressBar() {
     556       49606 :     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
     557             :       progress_bar_.store(0, std::memory_order_release);
     558             :     }
     559             :   }
     560             : 
     561             :   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
     562             :                                                  size_t amount);
     563             : 
     564             :   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
     565             :                                                  size_t amount);
     566             : 
     567             :   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
     568     1632061 :     return external_backing_store_bytes_[type];
     569             :   }
     570             : 
     571             :   // Some callers rely on the fact that this can operate on both
     572             :   // tagged and aligned object addresses.
     573     1998282 :   inline uint32_t AddressToMarkbitIndex(Address addr) const {
     574  7146956277 :     return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
     575             :   }
     576             : 
     577             :   inline Address MarkbitIndexToAddress(uint32_t index) const {
     578             :     return this->address() + (index << kTaggedSizeLog2);
     579             :   }
     580             : 
     581             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     582             :   void SetFlag(Flag flag) {
     583             :     if (access_mode == AccessMode::NON_ATOMIC) {
     584     5614455 :       flags_ |= flag;
     585             :     } else {
     586       12805 :       base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
     587             :     }
     588             :   }
     589             : 
     590             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     591             :   bool IsFlagSet(Flag flag) {
     592  5069323485 :     return (GetFlags<access_mode>() & flag) != 0;
     593             :   }
     594             : 
     595     3818185 :   void ClearFlag(Flag flag) { flags_ &= ~flag; }
     596             :   // Set or clear multiple flags at a time. The flags in the mask are set to
     597             :   // the value in "flags", the rest retain the current value in |flags_|.
     598             :   void SetFlags(uintptr_t flags, uintptr_t mask) {
     599     1326289 :     flags_ = (flags_ & ~mask) | (flags & mask);
     600             :   }
     601             : 
     602             :   // Return all current flags.
     603             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     604             :   uintptr_t GetFlags() {
     605             :     if (access_mode == AccessMode::NON_ATOMIC) {
     606             :       return flags_;
     607             :     } else {
     608  4567671936 :       return base::AsAtomicWord::Relaxed_Load(&flags_);
     609             :     }
     610             :   }
     611             : 
     612             :   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
     613             : 
     614             :   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
     615             : 
     616             :   bool CanAllocate() {
     617      290452 :     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
     618             :   }
     619             : 
     620             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     621    13136071 :   bool IsEvacuationCandidate() {
     622             :     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
     623             :              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
     624    13136071 :     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
     625             :   }
     626             : 
     627             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     628      403094 :   bool ShouldSkipEvacuationSlotRecording() {
     629             :     uintptr_t flags = GetFlags<access_mode>();
     630             :     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
     631    38019680 :            ((flags & COMPACTION_WAS_ABORTED) == 0);
     632             :   }
     633             : 
     634             :   Executability executable() {
     635     3558328 :     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
     636             :   }
     637             : 
     638  1416363612 :   bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
     639    42503137 :   bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
     640   120135631 :   bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
     641             : 
     642             :   bool InYoungGeneration() const {
     643  1630300278 :     return (flags_ & kIsInYoungGenerationMask) != 0;
     644             :   }
     645        4088 :   bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
     646             :   bool InNewLargeObjectSpace() const {
     647   228502916 :     return InYoungGeneration() && IsLargePage();
     648             :   }
     649             :   bool InOldSpace() const;
     650             :   V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
     651             : 
     652             :   Space* owner() const { return owner_; }
     653             : 
     654             :   void set_owner(Space* space) { owner_ = space; }
     655             : 
     656             :   static inline bool HasHeaderSentinel(Address slot_addr);
     657             : 
     658             :   // Emits a memory barrier. For TSAN builds the other thread needs to perform
     659             :   // MemoryChunk::synchronized_heap() to simulate the barrier.
     660             :   void InitializationMemoryFence();
     661             : 
     662             :   V8_EXPORT_PRIVATE void SetReadable();
     663             :   V8_EXPORT_PRIVATE void SetReadAndExecutable();
     664             :   V8_EXPORT_PRIVATE void SetReadAndWritable();
     665             : 
     666     2649879 :   void SetDefaultCodePermissions() {
     667     2649879 :     if (FLAG_jitless) {
     668       72452 :       SetReadable();
     669             :     } else {
     670     2577427 :       SetReadAndExecutable();
     671             :     }
     672     2649879 :   }
     673             : 
     674             :   base::ListNode<MemoryChunk>& list_node() { return list_node_; }
     675             : 
     676             :  protected:
     677             :   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
     678             :                                  Address area_start, Address area_end,
     679             :                                  Executability executable, Space* owner,
     680             :                                  VirtualMemory reservation);
     681             : 
     682             :   // Should be called when memory chunk is about to be freed.
     683             :   void ReleaseAllocatedMemory();
     684             : 
     685             :   // Sets the requested page permissions only if the write unprotect counter
     686             :   // has reached 0.
     687             :   void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     688             :       PageAllocator::Permission permission);
     689             : 
     690     1131595 :   VirtualMemory* reserved_memory() { return &reservation_; }
     691             : 
     692             :   template <AccessMode mode>
     693             :   ConcurrentBitmap<mode>* marking_bitmap() const {
     694             :     return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
     695             :   }
     696             : 
     697             :   template <AccessMode mode>
     698             :   ConcurrentBitmap<mode>* young_generation_bitmap() const {
     699             :     return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
     700             :   }
     701             : 
     702             :   size_t size_;
     703             :   uintptr_t flags_;
     704             : 
     705             :   Bitmap* marking_bitmap_;
     706             : 
     707             :   // If the chunk needs to remember its memory reservation, it is stored here.
     708             :   VirtualMemory reservation_;
     709             : 
     710             :   Heap* heap_;
     711             : 
     712             :   // This is used to distinguish the memory chunk header from the interior of a
     713             :   // large page. The memory chunk header stores here an impossible tagged
     714             :   // pointer: the tagger pointer of the page start. A field in a large object is
     715             :   // guaranteed to not contain such a pointer.
     716             :   Address header_sentinel_;
     717             : 
     718             :   // The space owning this memory chunk.
     719             :   std::atomic<Space*> owner_;
     720             : 
     721             :   // Start and end of allocatable memory on this chunk.
     722             :   Address area_start_;
     723             :   Address area_end_;
     724             : 
     725             :   // Used by the incremental marker to keep track of the scanning progress in
     726             :   // large objects that have a progress bar and are scanned in increments.
     727             :   std::atomic<size_t> progress_bar_;
     728             : 
     729             :   // Count of bytes marked black on page.
     730             :   intptr_t live_byte_count_;
     731             : 
     732             :   // A single slot set for small pages (of size kPageSize) or an array of slot
     733             :   // set for large pages. In the latter case the number of entries in the array
     734             :   // is ceil(size() / kPageSize).
     735             :   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
     736             :   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
     737             :   InvalidatedSlots* invalidated_slots_;
     738             : 
     739             :   SkipList* skip_list_;
     740             : 
     741             :   // Assuming the initial allocation on a page is sequential,
     742             :   // count highest number of bytes ever allocated on the page.
     743             :   std::atomic<intptr_t> high_water_mark_;
     744             : 
     745             :   base::Mutex* mutex_;
     746             : 
     747             :   std::atomic<intptr_t> concurrent_sweeping_;
     748             : 
     749             :   base::Mutex* page_protection_change_mutex_;
     750             : 
     751             :   // This field is only relevant for code pages. It depicts the number of
     752             :   // times a component requested this page to be read+writeable. The
     753             :   // counter is decremented when a component resets to read+executable.
     754             :   // If Value() == 0 => The memory is read and executable.
     755             :   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
     756             :   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
     757             :   // excessive nesting of scopes.
     758             :   // All executable MemoryChunks are allocated rw based on the assumption that
     759             :   // they will be used immediatelly for an allocation. They are initialized
     760             :   // with the number of open CodeSpaceMemoryModificationScopes. The caller
     761             :   // that triggers the page allocation is responsible for decrementing the
     762             :   // counter.
     763             :   uintptr_t write_unprotect_counter_;
     764             : 
     765             :   // Byte allocated on the page, which includes all objects on the page
     766             :   // and the linear allocation area.
     767             :   size_t allocated_bytes_;
     768             : 
     769             :   // Tracks off-heap memory used by this memory chunk.
     770             :   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
     771             : 
     772             :   // Freed memory that was not added to the free list.
     773             :   size_t wasted_memory_;
     774             : 
     775             :   base::ListNode<MemoryChunk> list_node_;
     776             : 
     777             :   FreeListCategory* categories_[kNumberOfCategories];
     778             : 
     779             :   LocalArrayBufferTracker* local_tracker_;
     780             : 
     781             :   std::atomic<intptr_t> young_generation_live_byte_count_;
     782             :   Bitmap* young_generation_bitmap_;
     783             : 
     784             :  private:
     785      965180 :   void InitializeReservedMemory() { reservation_.Reset(); }
     786             : 
     787             :   friend class ConcurrentMarkingState;
     788             :   friend class IncrementalMarkingState;
     789             :   friend class MajorAtomicMarkingState;
     790             :   friend class MajorMarkingState;
     791             :   friend class MajorNonAtomicMarkingState;
     792             :   friend class MemoryAllocator;
     793             :   friend class MemoryChunkValidator;
     794             :   friend class MinorMarkingState;
     795             :   friend class MinorNonAtomicMarkingState;
     796             :   friend class PagedSpace;
     797             : };
     798             : 
     799             : static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
     800             :               "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
     801             : 
     802             : // -----------------------------------------------------------------------------
     803             : // A page is a memory chunk of a size 512K. Large object pages may be larger.
     804             : //
     805             : // The only way to get a page pointer is by calling factory methods:
     806             : //   Page* p = Page::FromAddress(addr); or
     807             : //   Page* p = Page::FromAllocationAreaAddress(address);
     808             : class Page : public MemoryChunk {
     809             :  public:
     810             :   static const intptr_t kCopyAllFlags = ~0;
     811             : 
     812             :   // Page flags copied from from-space to to-space when flipping semispaces.
     813             :   static const intptr_t kCopyOnFlipFlagsMask =
     814             :       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
     815             :       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
     816             :       static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
     817             : 
     818             :   // Returns the page containing a given address. The address ranges
     819             :   // from [page_addr .. page_addr + kPageSize[. This only works if the object
     820             :   // is in fact in a page.
     821             :   static Page* FromAddress(Address addr) {
     822   291221076 :     return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
     823             :   }
     824           0 :   static Page* FromHeapObject(const HeapObject o) {
     825  4580176974 :     return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
     826             :   }
     827             : 
     828             :   // Returns the page containing the address provided. The address can
     829             :   // potentially point righter after the page. To be also safe for tagged values
     830             :   // we subtract a hole word. The valid address ranges from
     831             :   // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
     832             :   static Page* FromAllocationAreaAddress(Address address) {
     833     1154177 :     return Page::FromAddress(address - kTaggedSize);
     834             :   }
     835             : 
     836             :   // Checks if address1 and address2 are on the same new space page.
     837             :   static bool OnSamePage(Address address1, Address address2) {
     838             :     return Page::FromAddress(address1) == Page::FromAddress(address2);
     839             :   }
     840             : 
     841             :   // Checks whether an address is page aligned.
     842             :   static bool IsAlignedToPageSize(Address addr) {
     843     3051944 :     return (addr & kPageAlignmentMask) == 0;
     844             :   }
     845             : 
     846             :   static Page* ConvertNewToOld(Page* old_page);
     847             : 
     848             :   inline void MarkNeverAllocateForTesting();
     849             :   inline void MarkEvacuationCandidate();
     850             :   inline void ClearEvacuationCandidate();
     851             : 
     852             :   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
     853             :   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
     854             : 
     855             :   template <typename Callback>
     856     1241609 :   inline void ForAllFreeListCategories(Callback callback) {
     857    19009552 :     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     858     8773638 :       callback(categories_[i]);
     859             :     }
     860     1241609 :   }
     861             : 
     862             :   // Returns the offset of a given address to this page.
     863         580 :   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
     864             : 
     865             :   // Returns the address for a given offset to the this page.
     866             :   Address OffsetToAddress(size_t offset) {
     867         314 :     Address address_in_page = address() + offset;
     868             :     DCHECK_GE(address_in_page, area_start_);
     869             :     DCHECK_LT(address_in_page, area_end_);
     870             :     return address_in_page;
     871             :   }
     872             : 
     873             :   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
     874             :   // progress. In particular, when we know that right before this call a
     875             :   // sweeper thread was sweeping this page.
     876             :   void WaitUntilSweepingCompleted() {
     877           0 :     mutex_->Lock();
     878           0 :     mutex_->Unlock();
     879             :     DCHECK(SweepingDone());
     880             :   }
     881             : 
     882             :   void AllocateLocalTracker();
     883             :   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
     884             :   bool contains_array_buffers();
     885             : 
     886             :   void ResetFreeListStatistics();
     887             : 
     888             :   size_t AvailableInFreeList();
     889             : 
     890             :   size_t AvailableInFreeListFromAllocatedBytes() {
     891             :     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
     892             :     return area_size() - wasted_memory() - allocated_bytes();
     893             :   }
     894             : 
     895             :   FreeListCategory* free_list_category(FreeListCategoryType type) {
     896    17791029 :     return categories_[type];
     897             :   }
     898             : 
     899             :   size_t wasted_memory() { return wasted_memory_; }
     900      413574 :   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
     901             :   size_t allocated_bytes() { return allocated_bytes_; }
     902             :   void IncreaseAllocatedBytes(size_t bytes) {
     903             :     DCHECK_LE(bytes, area_size());
     904     1305526 :     allocated_bytes_ += bytes;
     905             :   }
     906             :   void DecreaseAllocatedBytes(size_t bytes) {
     907             :     DCHECK_LE(bytes, area_size());
     908             :     DCHECK_GE(allocated_bytes(), bytes);
     909    18207310 :     allocated_bytes_ -= bytes;
     910             :   }
     911             : 
     912             :   void ResetAllocatedBytes();
     913             : 
     914             :   size_t ShrinkToHighWaterMark();
     915             : 
     916             :   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
     917             :   void DestroyBlackArea(Address start, Address end);
     918             : 
     919             :   void InitializeFreeListCategories();
     920             :   void AllocateFreeListCategories();
     921             :   void ReleaseFreeListCategories();
     922             : 
     923             : #ifdef DEBUG
     924             :   void Print();
     925             : #endif  // DEBUG
     926             : 
     927             :  private:
     928             :   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
     929             : 
     930             :   friend class MemoryAllocator;
     931             : };
     932             : 
     933             : class ReadOnlyPage : public Page {
     934             :  public:
     935             :   // Clears any pointers in the header that point out of the page that would
     936             :   // otherwise make the header non-relocatable.
     937             :   void MakeHeaderRelocatable();
     938             : 
     939             :  private:
     940             :   friend class ReadOnlySpace;
     941             : };
     942             : 
     943             : class LargePage : public MemoryChunk {
     944             :  public:
     945             :   // A limit to guarantee that we do not overflow typed slot offset in
     946             :   // the old to old remembered set.
     947             :   // Note that this limit is higher than what assembler already imposes on
     948             :   // x64 and ia32 architectures.
     949             :   static const int kMaxCodePageSize = 512 * MB;
     950             : 
     951             :   static LargePage* FromHeapObject(const HeapObject o) {
     952             :     return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
     953             :   }
     954             : 
     955             :   inline HeapObject GetObject();
     956             : 
     957             :   inline LargePage* next_page() {
     958             :     return static_cast<LargePage*>(list_node_.next());
     959             :   }
     960             : 
     961             :   // Uncommit memory that is not in use anymore by the object. If the object
     962             :   // cannot be shrunk 0 is returned.
     963             :   Address GetAddressToShrink(Address object_address, size_t object_size);
     964             : 
     965             :   void ClearOutOfLiveRangeSlots(Address free_start);
     966             : 
     967             :  private:
     968             :   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
     969             :                                Executability executable);
     970             : 
     971             :   friend class MemoryAllocator;
     972             : };
     973             : 
     974             : 
     975             : // ----------------------------------------------------------------------------
     976             : // Space is the abstract superclass for all allocation spaces.
     977             : class V8_EXPORT_PRIVATE Space : public Malloced {
     978             :  public:
     979      879371 :   Space(Heap* heap, AllocationSpace id)
     980             :       : allocation_observers_paused_(false),
     981             :         heap_(heap),
     982             :         id_(id),
     983             :         committed_(0),
     984     1758742 :         max_committed_(0) {
     985             :     external_backing_store_bytes_ =
     986      879371 :         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
     987             :     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
     988      879372 :     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
     989             :         0;
     990             :     CheckOffsetsAreConsistent();
     991      879372 :   }
     992             : 
     993             :   void CheckOffsetsAreConsistent() const;
     994             : 
     995             :   static inline void MoveExternalBackingStoreBytes(
     996             :       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
     997             : 
     998     1758444 :   virtual ~Space() {
     999      879222 :     delete[] external_backing_store_bytes_;
    1000      879222 :     external_backing_store_bytes_ = nullptr;
    1001      879222 :   }
    1002             : 
    1003             :   Heap* heap() const { return heap_; }
    1004             : 
    1005             :   // Identity used in error reporting.
    1006             :   AllocationSpace identity() { return id_; }
    1007             : 
    1008           0 :   const char* name() { return Heap::GetSpaceName(id_); }
    1009             : 
    1010             :   virtual void AddAllocationObserver(AllocationObserver* observer);
    1011             : 
    1012             :   virtual void RemoveAllocationObserver(AllocationObserver* observer);
    1013             : 
    1014             :   virtual void PauseAllocationObservers();
    1015             : 
    1016             :   virtual void ResumeAllocationObservers();
    1017             : 
    1018      167594 :   virtual void StartNextInlineAllocationStep() {}
    1019             : 
    1020             :   void AllocationStep(int bytes_since_last, Address soon_object, int size);
    1021             : 
    1022             :   // Return the total amount committed memory for this space, i.e., allocatable
    1023             :   // memory and page headers.
    1024     4602282 :   virtual size_t CommittedMemory() { return committed_; }
    1025             : 
    1026           0 :   virtual size_t MaximumCommittedMemory() { return max_committed_; }
    1027             : 
    1028             :   // Returns allocated size.
    1029             :   virtual size_t Size() = 0;
    1030             : 
    1031             :   // Returns size of objects. Can differ from the allocated size
    1032             :   // (e.g. see LargeObjectSpace).
    1033           0 :   virtual size_t SizeOfObjects() { return Size(); }
    1034             : 
    1035             :   // Approximate amount of physical memory committed for this space.
    1036             :   virtual size_t CommittedPhysicalMemory() = 0;
    1037             : 
    1038             :   // Return the available bytes without growing.
    1039             :   virtual size_t Available() = 0;
    1040             : 
    1041    22527105 :   virtual int RoundSizeDownToObjectAlignment(int size) {
    1042    22527105 :     if (id_ == CODE_SPACE) {
    1043           0 :       return RoundDown(size, kCodeAlignment);
    1044             :     } else {
    1045    22527105 :       return RoundDown(size, kTaggedSize);
    1046             :     }
    1047             :   }
    1048             : 
    1049             :   virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
    1050             : 
    1051             :   void AccountCommitted(size_t bytes) {
    1052             :     DCHECK_GE(committed_ + bytes, committed_);
    1053      814946 :     committed_ += bytes;
    1054      814946 :     if (committed_ > max_committed_) {
    1055      711015 :       max_committed_ = committed_;
    1056             :     }
    1057             :   }
    1058             : 
    1059             :   void AccountUncommitted(size_t bytes) {
    1060             :     DCHECK_GE(committed_, committed_ - bytes);
    1061      489530 :     committed_ -= bytes;
    1062             :   }
    1063             : 
    1064             :   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
    1065             :                                                  size_t amount);
    1066             : 
    1067             :   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
    1068             :                                                  size_t amount);
    1069             : 
    1070             :   // Returns amount of off-heap memory in-use by objects in this Space.
    1071          65 :   virtual size_t ExternalBackingStoreBytes(
    1072             :       ExternalBackingStoreType type) const {
    1073         160 :     return external_backing_store_bytes_[type];
    1074             :   }
    1075             : 
    1076             :   void* GetRandomMmapAddr();
    1077             : 
    1078             :   MemoryChunk* first_page() { return memory_chunk_list_.front(); }
    1079             :   MemoryChunk* last_page() { return memory_chunk_list_.back(); }
    1080             : 
    1081             :   base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
    1082             : 
    1083             : #ifdef DEBUG
    1084             :   virtual void Print() = 0;
    1085             : #endif
    1086             : 
    1087             :  protected:
    1088             :   intptr_t GetNextInlineAllocationStepSize();
    1089             :   bool AllocationObserversActive() {
    1090   276169061 :     return !allocation_observers_paused_ && !allocation_observers_.empty();
    1091             :   }
    1092             : 
    1093             :   std::vector<AllocationObserver*> allocation_observers_;
    1094             : 
    1095             :   // The List manages the pages that belong to the given space.
    1096             :   base::List<MemoryChunk> memory_chunk_list_;
    1097             : 
    1098             :   // Tracks off-heap memory used by this space.
    1099             :   std::atomic<size_t>* external_backing_store_bytes_;
    1100             : 
    1101             :  private:
    1102             :   static const intptr_t kIdOffset = 9 * kSystemPointerSize;
    1103             : 
    1104             :   bool allocation_observers_paused_;
    1105             :   Heap* heap_;
    1106             :   AllocationSpace id_;
    1107             : 
    1108             :   // Keeps track of committed memory in a space.
    1109             :   size_t committed_;
    1110             :   size_t max_committed_;
    1111             : 
    1112             :   DISALLOW_COPY_AND_ASSIGN(Space);
    1113             : };
    1114             : 
    1115             : class MemoryChunkValidator {
    1116             :   // Computed offsets should match the compiler generated ones.
    1117             :   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
    1118             : 
    1119             :   // Validate our estimates on the header size.
    1120             :   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
    1121             :   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
    1122             :   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
    1123             : };
    1124             : 
    1125             : 
    1126             : // The process-wide singleton that keeps track of code range regions with the
    1127             : // intention to reuse free code range regions as a workaround for CFG memory
    1128             : // leaks (see crbug.com/870054).
    1129       59171 : class CodeRangeAddressHint {
    1130             :  public:
    1131             :   // Returns the most recently freed code range start address for the given
    1132             :   // size. If there is no such entry, then a random address is returned.
    1133             :   V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
    1134             : 
    1135             :   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
    1136             :                                               size_t code_range_size);
    1137             : 
    1138             :  private:
    1139             :   base::Mutex mutex_;
    1140             :   // A map from code range size to an array of recently freed code range
    1141             :   // addresses. There should be O(1) different code range sizes.
    1142             :   // The length of each array is limited by the peak number of code ranges,
    1143             :   // which should be also O(1).
    1144             :   std::unordered_map<size_t, std::vector<Address>> recently_freed_;
    1145             : };
    1146             : 
    1147             : class SkipList {
    1148             :  public:
    1149             :   SkipList() { Clear(); }
    1150             : 
    1151             :   void Clear() {
    1152    11948820 :     for (int idx = 0; idx < kSize; idx++) {
    1153     5882496 :       starts_[idx] = static_cast<Address>(-1);
    1154             :     }
    1155             :   }
    1156             : 
    1157      532633 :   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
    1158             : 
    1159             :   void AddObject(Address addr, int size) {
    1160             :     int start_region = RegionNumber(addr);
    1161    97026592 :     int end_region = RegionNumber(addr + size - kTaggedSize);
    1162   298506680 :     for (int idx = start_region; idx <= end_region; idx++) {
    1163   100061960 :       if (starts_[idx] > addr) {
    1164     2667837 :         starts_[idx] = addr;
    1165             :       } else {
    1166             :         // In the first region, there may already be an object closer to the
    1167             :         // start of the region. Do not change the start in that case. If this
    1168             :         // is not the first region, you probably added overlapping objects.
    1169             :         DCHECK_EQ(start_region, idx);
    1170             :       }
    1171             :     }
    1172             :   }
    1173             : 
    1174             :   static inline int RegionNumber(Address addr) {
    1175   405367023 :     return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
    1176             :   }
    1177             : 
    1178    97026592 :   static void Update(Address addr, int size) {
    1179             :     Page* page = Page::FromAddress(addr);
    1180             :     SkipList* list = page->skip_list();
    1181    97026592 :     if (list == nullptr) {
    1182       91047 :       list = new SkipList();
    1183             :       page->set_skip_list(list);
    1184             :     }
    1185             : 
    1186             :     list->AddObject(addr, size);
    1187    97026592 :   }
    1188             : 
    1189             :  private:
    1190             :   static const int kRegionSizeLog2 = 13;
    1191             :   static const int kRegionSize = 1 << kRegionSizeLog2;
    1192             :   static const int kSize = Page::kPageSize / kRegionSize;
    1193             : 
    1194             :   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
    1195             : 
    1196             :   Address starts_[kSize];
    1197             : };
    1198             : 
    1199             : 
    1200             : // ----------------------------------------------------------------------------
    1201             : // A space acquires chunks of memory from the operating system. The memory
    1202             : // allocator allocates and deallocates pages for the paged heap spaces and large
    1203             : // pages for large object space.
    1204      190326 : class MemoryAllocator {
    1205             :  public:
    1206             :   // Unmapper takes care of concurrently unmapping and uncommitting memory
    1207             :   // chunks.
    1208      126884 :   class Unmapper {
    1209             :    public:
    1210             :     class UnmapFreeMemoryTask;
    1211             : 
    1212       63457 :     Unmapper(Heap* heap, MemoryAllocator* allocator)
    1213             :         : heap_(heap),
    1214             :           allocator_(allocator),
    1215             :           pending_unmapping_tasks_semaphore_(0),
    1216             :           pending_unmapping_tasks_(0),
    1217      253828 :           active_unmapping_tasks_(0) {
    1218       63457 :       chunks_[kRegular].reserve(kReservedQueueingSlots);
    1219       63457 :       chunks_[kPooled].reserve(kReservedQueueingSlots);
    1220       63457 :     }
    1221             : 
    1222      456385 :     void AddMemoryChunkSafe(MemoryChunk* chunk) {
    1223      900866 :       if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
    1224      442815 :         AddMemoryChunkSafe<kRegular>(chunk);
    1225             :       } else {
    1226       13570 :         AddMemoryChunkSafe<kNonRegular>(chunk);
    1227             :       }
    1228      456385 :     }
    1229             : 
    1230      424782 :     MemoryChunk* TryGetPooledMemoryChunkSafe() {
    1231             :       // Procedure:
    1232             :       // (1) Try to get a chunk that was declared as pooled and already has
    1233             :       // been uncommitted.
    1234             :       // (2) Try to steal any memory chunk of kPageSize that would've been
    1235             :       // unmapped.
    1236      424782 :       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
    1237      424781 :       if (chunk == nullptr) {
    1238      398684 :         chunk = GetMemoryChunkSafe<kRegular>();
    1239      398685 :         if (chunk != nullptr) {
    1240             :           // For stolen chunks we need to manually free any allocated memory.
    1241       20544 :           chunk->ReleaseAllocatedMemory();
    1242             :         }
    1243             :       }
    1244      424782 :       return chunk;
    1245             :     }
    1246             : 
    1247             :     V8_EXPORT_PRIVATE void FreeQueuedChunks();
    1248             :     void CancelAndWaitForPendingTasks();
    1249             :     void PrepareForGC();
    1250             :     V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
    1251             :     V8_EXPORT_PRIVATE void TearDown();
    1252             :     size_t NumberOfCommittedChunks();
    1253             :     V8_EXPORT_PRIVATE int NumberOfChunks();
    1254             :     size_t CommittedBufferedMemory();
    1255             : 
    1256             :    private:
    1257             :     static const int kReservedQueueingSlots = 64;
    1258             :     static const int kMaxUnmapperTasks = 4;
    1259             : 
    1260             :     enum ChunkQueueType {
    1261             :       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
    1262             :                     // can thus be used for stealing.
    1263             :       kNonRegular,  // Large chunks and executable chunks.
    1264             :       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
    1265             :       kNumberOfChunkQueues,
    1266             :     };
    1267             : 
    1268             :     enum class FreeMode {
    1269             :       kUncommitPooled,
    1270             :       kReleasePooled,
    1271             :     };
    1272             : 
    1273             :     template <ChunkQueueType type>
    1274      859093 :     void AddMemoryChunkSafe(MemoryChunk* chunk) {
    1275      859093 :       base::MutexGuard guard(&mutex_);
    1276      859109 :       chunks_[type].push_back(chunk);
    1277      859109 :     }
    1278             : 
    1279             :     template <ChunkQueueType type>
    1280     2579849 :     MemoryChunk* GetMemoryChunkSafe() {
    1281     2579849 :       base::MutexGuard guard(&mutex_);
    1282     2580166 :       if (chunks_[type].empty()) return nullptr;
    1283      859109 :       MemoryChunk* chunk = chunks_[type].back();
    1284             :       chunks_[type].pop_back();
    1285      859109 :       return chunk;
    1286             :     }
    1287             : 
    1288             :     bool MakeRoomForNewTasks();
    1289             : 
    1290             :     template <FreeMode mode>
    1291             :     void PerformFreeMemoryOnQueuedChunks();
    1292             : 
    1293             :     void PerformFreeMemoryOnQueuedNonRegularChunks();
    1294             : 
    1295             :     Heap* const heap_;
    1296             :     MemoryAllocator* const allocator_;
    1297             :     base::Mutex mutex_;
    1298             :     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
    1299             :     CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
    1300             :     base::Semaphore pending_unmapping_tasks_semaphore_;
    1301             :     intptr_t pending_unmapping_tasks_;
    1302             :     std::atomic<intptr_t> active_unmapping_tasks_;
    1303             : 
    1304             :     friend class MemoryAllocator;
    1305             :   };
    1306             : 
    1307             :   enum AllocationMode {
    1308             :     kRegular,
    1309             :     kPooled,
    1310             :   };
    1311             : 
    1312             :   enum FreeMode {
    1313             :     kFull,
    1314             :     kAlreadyPooled,
    1315             :     kPreFreeAndQueue,
    1316             :     kPooledAndQueue,
    1317             :   };
    1318             : 
    1319             :   V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
    1320             : 
    1321             :   // Computes the memory area of discardable memory within a given memory area
    1322             :   // [addr, addr+size) and returns the result as base::AddressRegion. If the
    1323             :   // memory is not discardable base::AddressRegion is an empty region.
    1324             :   V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
    1325             :       Address addr, size_t size);
    1326             : 
    1327             :   V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
    1328             :                                     size_t code_range_size);
    1329             : 
    1330             :   V8_EXPORT_PRIVATE void TearDown();
    1331             : 
    1332             :   // Allocates a Page from the allocator. AllocationMode is used to indicate
    1333             :   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
    1334             :   // should be tried first.
    1335             :   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
    1336             :             typename SpaceType>
    1337             :   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1338             :   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
    1339             : 
    1340             :   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
    1341             :                                Executability executable);
    1342             : 
    1343             :   template <MemoryAllocator::FreeMode mode = kFull>
    1344             :   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1345             :   void Free(MemoryChunk* chunk);
    1346             : 
    1347             :   // Returns allocated spaces in bytes.
    1348             :   size_t Size() { return size_; }
    1349             : 
    1350             :   // Returns allocated executable spaces in bytes.
    1351             :   size_t SizeExecutable() { return size_executable_; }
    1352             : 
    1353             :   // Returns the maximum available bytes of heaps.
    1354             :   size_t Available() {
    1355             :     const size_t size = Size();
    1356         325 :     return capacity_ < size ? 0 : capacity_ - size;
    1357             :   }
    1358             : 
    1359             :   // Returns an indication of whether a pointer is in a space that has
    1360             :   // been allocated by this MemoryAllocator.
    1361             :   V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
    1362     3869088 :     return address < lowest_ever_allocated_ ||
    1363             :            address >= highest_ever_allocated_;
    1364             :   }
    1365             : 
    1366             :   // Returns a MemoryChunk in which the memory region from commit_area_size to
    1367             :   // reserve_area_size of the chunk area is reserved but not committed, it
    1368             :   // could be committed later by calling MemoryChunk::CommitArea.
    1369             :   V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
    1370             :                                                size_t commit_area_size,
    1371             :                                                Executability executable,
    1372             :                                                Space* space);
    1373             : 
    1374             :   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
    1375             :                                 size_t alignment, Executability executable,
    1376             :                                 void* hint, VirtualMemory* controller);
    1377             : 
    1378             :   void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
    1379             : 
    1380             :   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
    1381             :   // internally memory is freed from |start_free| to the end of the reservation.
    1382             :   // Additional memory beyond the page is not accounted though, so
    1383             :   // |bytes_to_free| is computed by the caller.
    1384             :   void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1385             :                          size_t bytes_to_free, Address new_area_end);
    1386             : 
    1387             :   // Checks if an allocated MemoryChunk was intended to be used for executable
    1388             :   // memory.
    1389             :   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
    1390             :     return executable_memory_.find(chunk) != executable_memory_.end();
    1391             :   }
    1392             : 
    1393             :   // Commit memory region owned by given reservation object.  Returns true if
    1394             :   // it succeeded and false otherwise.
    1395             :   bool CommitMemory(VirtualMemory* reservation);
    1396             : 
    1397             :   // Uncommit memory region owned by given reservation object. Returns true if
    1398             :   // it succeeded and false otherwise.
    1399             :   bool UncommitMemory(VirtualMemory* reservation);
    1400             : 
    1401             :   // Zaps a contiguous block of memory [start..(start+size)[ with
    1402             :   // a given zap value.
    1403             :   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
    1404             : 
    1405             :   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
    1406             :                                                     Address start,
    1407             :                                                     size_t commit_size,
    1408             :                                                     size_t reserved_size);
    1409             : 
    1410             :   // Page allocator instance for allocating non-executable pages.
    1411             :   // Guaranteed to be a valid pointer.
    1412             :   v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
    1413             : 
    1414             :   // Page allocator instance for allocating executable pages.
    1415             :   // Guaranteed to be a valid pointer.
    1416             :   v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
    1417             : 
    1418             :   // Returns page allocator suitable for allocating pages with requested
    1419             :   // executability.
    1420             :   v8::PageAllocator* page_allocator(Executability executable) {
    1421             :     return executable == EXECUTABLE ? code_page_allocator_
    1422     1135296 :                                     : data_page_allocator_;
    1423             :   }
    1424             : 
    1425             :   // A region of memory that may contain executable code including reserved
    1426             :   // OS page with read-write access in the beginning.
    1427             :   const base::AddressRegion& code_range() const {
    1428             :     // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
    1429             :     DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
    1430             :     DCHECK_IMPLIES(!code_range_.is_empty(),
    1431             :                    code_range_.contains(code_page_allocator_instance_->begin(),
    1432             :                                         code_page_allocator_instance_->size()));
    1433             :     return code_range_;
    1434             :   }
    1435             : 
    1436     1339673 :   Unmapper* unmapper() { return &unmapper_; }
    1437             : 
    1438             :   // PreFree logically frees the object, i.e., it takes care of the size
    1439             :   // bookkeeping and calls the allocation callback.
    1440             :   void PreFreeMemory(MemoryChunk* chunk);
    1441             : 
    1442             :  private:
    1443             :   void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
    1444             :                                    size_t requested);
    1445             : 
    1446             :   // FreeMemory can be called concurrently when PreFree was executed before.
    1447             :   void PerformFreeMemory(MemoryChunk* chunk);
    1448             : 
    1449             :   // See AllocatePage for public interface. Note that currently we only support
    1450             :   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
    1451             :   template <typename SpaceType>
    1452             :   MemoryChunk* AllocatePagePooled(SpaceType* owner);
    1453             : 
    1454             :   // Initializes pages in a chunk. Returns the first page address.
    1455             :   // This function and GetChunkId() are provided for the mark-compact
    1456             :   // collector to rebuild page headers in the from space, which is
    1457             :   // used as a marking stack and its page headers are destroyed.
    1458             :   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
    1459             :                                PagedSpace* owner);
    1460             : 
    1461      965180 :   void UpdateAllocatedSpaceLimits(Address low, Address high) {
    1462             :     // The use of atomic primitives does not guarantee correctness (wrt.
    1463             :     // desired semantics) by default. The loop here ensures that we update the
    1464             :     // values only if they did not change in between.
    1465      965180 :     Address ptr = kNullAddress;
    1466      965180 :     do {
    1467      965180 :       ptr = lowest_ever_allocated_;
    1468     1149637 :     } while ((low < ptr) &&
    1469             :              !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
    1470      965180 :     do {
    1471      965180 :       ptr = highest_ever_allocated_;
    1472     1215587 :     } while ((high > ptr) &&
    1473             :              !highest_ever_allocated_.compare_exchange_weak(ptr, high));
    1474      965180 :   }
    1475             : 
    1476             :   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
    1477             :     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
    1478             :     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
    1479             :     executable_memory_.insert(chunk);
    1480             :   }
    1481             : 
    1482      131712 :   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
    1483             :     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
    1484             :     executable_memory_.erase(chunk);
    1485      131712 :     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
    1486      131712 :   }
    1487             : 
    1488             :   Isolate* isolate_;
    1489             : 
    1490             :   // This object controls virtual space reserved for V8 heap instance.
    1491             :   // Depending on the configuration it may contain the following:
    1492             :   // - no reservation (on 32-bit architectures)
    1493             :   // - code range reservation used by bounded code page allocator (on 64-bit
    1494             :   //   architectures without pointers compression in V8 heap)
    1495             :   // - data + code range reservation (on 64-bit architectures with pointers
    1496             :   //   compression in V8 heap)
    1497             :   VirtualMemory heap_reservation_;
    1498             : 
    1499             :   // Page allocator used for allocating data pages. Depending on the
    1500             :   // configuration it may be a page allocator instance provided by v8::Platform
    1501             :   // or a BoundedPageAllocator (when pointer compression is enabled).
    1502             :   v8::PageAllocator* data_page_allocator_;
    1503             : 
    1504             :   // Page allocator used for allocating code pages. Depending on the
    1505             :   // configuration it may be a page allocator instance provided by v8::Platform
    1506             :   // or a BoundedPageAllocator (when pointer compression is enabled or
    1507             :   // on those 64-bit architectures where pc-relative 32-bit displacement
    1508             :   // can be used for call and jump instructions).
    1509             :   v8::PageAllocator* code_page_allocator_;
    1510             : 
    1511             :   // A part of the |heap_reservation_| that may contain executable code
    1512             :   // including reserved page with read-write access in the beginning.
    1513             :   // See details below.
    1514             :   base::AddressRegion code_range_;
    1515             : 
    1516             :   // This unique pointer owns the instance of bounded code allocator
    1517             :   // that controls executable pages allocation. It does not control the
    1518             :   // optionally existing page in the beginning of the |code_range_|.
    1519             :   // So, summarizing all above, the following conditions hold:
    1520             :   // 1) |heap_reservation_| >= |code_range_|
    1521             :   // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
    1522             :   // 3) |heap_reservation_| is AllocatePageSize()-aligned
    1523             :   // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
    1524             :   // 5) |code_range_| is CommitPageSize()-aligned
    1525             :   std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
    1526             : 
    1527             :   // Maximum space size in bytes.
    1528             :   size_t capacity_;
    1529             : 
    1530             :   // Allocated space size in bytes.
    1531             :   std::atomic<size_t> size_;
    1532             :   // Allocated executable space size in bytes.
    1533             :   std::atomic<size_t> size_executable_;
    1534             : 
    1535             :   // We keep the lowest and highest addresses allocated as a quick way
    1536             :   // of determining that pointers are outside the heap. The estimate is
    1537             :   // conservative, i.e. not all addresses in 'allocated' space are allocated
    1538             :   // to our heap. The range is [lowest, highest[, inclusive on the low end
    1539             :   // and exclusive on the high end.
    1540             :   std::atomic<Address> lowest_ever_allocated_;
    1541             :   std::atomic<Address> highest_ever_allocated_;
    1542             : 
    1543             :   VirtualMemory last_chunk_;
    1544             :   Unmapper unmapper_;
    1545             : 
    1546             :   // Data structure to remember allocated executable memory chunks.
    1547             :   std::unordered_set<MemoryChunk*> executable_memory_;
    1548             : 
    1549             :   friend class heap::TestCodePageAllocatorScope;
    1550             : 
    1551             :   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
    1552             : };
    1553             : 
    1554             : extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1555             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1556             :         size_t size, PagedSpace* owner, Executability executable);
    1557             : extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1558             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1559             :         size_t size, SemiSpace* owner, Executability executable);
    1560             : extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1561             :     Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1562             :         size_t size, SemiSpace* owner, Executability executable);
    1563             : 
    1564             : extern template EXPORT_TEMPLATE_DECLARE(
    1565             :     V8_EXPORT_PRIVATE) void MemoryAllocator::
    1566             :     Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
    1567             : extern template EXPORT_TEMPLATE_DECLARE(
    1568             :     V8_EXPORT_PRIVATE) void MemoryAllocator::
    1569             :     Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
    1570             : extern template EXPORT_TEMPLATE_DECLARE(
    1571             :     V8_EXPORT_PRIVATE) void MemoryAllocator::
    1572             :     Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
    1573             : extern template EXPORT_TEMPLATE_DECLARE(
    1574             :     V8_EXPORT_PRIVATE) void MemoryAllocator::
    1575             :     Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
    1576             : 
    1577             : // -----------------------------------------------------------------------------
    1578             : // Interface for heap object iterator to be implemented by all object space
    1579             : // object iterators.
    1580             : 
    1581       63454 : class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
    1582             :  public:
    1583       63448 :   virtual ~ObjectIterator() = default;
    1584             :   virtual HeapObject Next() = 0;
    1585             : };
    1586             : 
    1587             : template <class PAGE_TYPE>
    1588             : class PageIteratorImpl
    1589             :     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
    1590             :  public:
    1591       84189 :   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
    1592             :   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
    1593             :   PAGE_TYPE* operator*() { return p_; }
    1594             :   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    1595       89383 :     return rhs.p_ == p_;
    1596             :   }
    1597             :   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    1598     1780499 :     return rhs.p_ != p_;
    1599             :   }
    1600             :   inline PageIteratorImpl<PAGE_TYPE>& operator++();
    1601             :   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
    1602             : 
    1603             :  private:
    1604             :   PAGE_TYPE* p_;
    1605             : };
    1606             : 
    1607             : using PageIterator = PageIteratorImpl<Page>;
    1608             : using LargePageIterator = PageIteratorImpl<LargePage>;
    1609             : 
    1610             : class PageRange {
    1611             :  public:
    1612             :   using iterator = PageIterator;
    1613       31954 :   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
    1614             :   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
    1615             :   inline PageRange(Address start, Address limit);
    1616             : 
    1617             :   iterator begin() { return iterator(begin_); }
    1618             :   iterator end() { return iterator(end_); }
    1619             : 
    1620             :  private:
    1621             :   Page* begin_;
    1622             :   Page* end_;
    1623             : };
    1624             : 
    1625             : // -----------------------------------------------------------------------------
    1626             : // Heap object iterator in new/old/map spaces.
    1627             : //
    1628             : // A HeapObjectIterator iterates objects from the bottom of the given space
    1629             : // to its top or from the bottom of the given page to its top.
    1630             : //
    1631             : // If objects are allocated in the page during iteration the iterator may
    1632             : // or may not iterate over those objects.  The caller must create a new
    1633             : // iterator in order to be sure to visit these new objects.
    1634       95396 : class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
    1635             :  public:
    1636             :   // Creates a new object iterator in a given space.
    1637             :   explicit HeapObjectIterator(PagedSpace* space);
    1638             :   explicit HeapObjectIterator(Page* page);
    1639             : 
    1640             :   // Advance to the next object, skipping free spaces and other fillers and
    1641             :   // skipping the special garbage section of which there is one per space.
    1642             :   // Returns nullptr when the iteration has ended.
    1643             :   inline HeapObject Next() override;
    1644             : 
    1645             :  private:
    1646             :   // Fast (inlined) path of next().
    1647             :   inline HeapObject FromCurrentPage();
    1648             : 
    1649             :   // Slow path of next(), goes into the next page.  Returns false if the
    1650             :   // iteration has ended.
    1651             :   bool AdvanceToNextPage();
    1652             : 
    1653             :   Address cur_addr_;  // Current iteration point.
    1654             :   Address cur_end_;   // End iteration point.
    1655             :   PagedSpace* space_;
    1656             :   PageRange page_range_;
    1657             :   PageRange::iterator current_page_;
    1658             : };
    1659             : 
    1660             : 
    1661             : // -----------------------------------------------------------------------------
    1662             : // A space has a circular list of pages. The next page can be accessed via
    1663             : // Page::next_page() call.
    1664             : 
    1665             : // An abstraction of allocation and relocation pointers in a page-structured
    1666             : // space.
    1667             : class LinearAllocationArea {
    1668             :  public:
    1669      766088 :   LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
    1670      406440 :   LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
    1671             : 
    1672             :   void Reset(Address top, Address limit) {
    1673             :     set_top(top);
    1674             :     set_limit(limit);
    1675             :   }
    1676             : 
    1677             :   V8_INLINE void set_top(Address top) {
    1678             :     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
    1679   528633127 :     top_ = top;
    1680             :   }
    1681             : 
    1682             :   V8_INLINE Address top() const {
    1683             :     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
    1684  1099543057 :     return top_;
    1685             :   }
    1686             : 
    1687      369114 :   Address* top_address() { return &top_; }
    1688             : 
    1689     4136407 :   V8_INLINE void set_limit(Address limit) { limit_ = limit; }
    1690             : 
    1691   598092592 :   V8_INLINE Address limit() const { return limit_; }
    1692             : 
    1693      358666 :   Address* limit_address() { return &limit_; }
    1694             : 
    1695             : #ifdef DEBUG
    1696             :   bool VerifyPagedAllocation() {
    1697             :     return (Page::FromAllocationAreaAddress(top_) ==
    1698             :             Page::FromAllocationAreaAddress(limit_)) &&
    1699             :            (top_ <= limit_);
    1700             :   }
    1701             : #endif
    1702             : 
    1703             :  private:
    1704             :   // Current allocation top.
    1705             :   Address top_;
    1706             :   // Current allocation limit.
    1707             :   Address limit_;
    1708             : };
    1709             : 
    1710             : 
    1711             : // An abstraction of the accounting statistics of a page-structured space.
    1712             : //
    1713             : // The stats are only set by functions that ensure they stay balanced. These
    1714             : // functions increase or decrease one of the non-capacity stats in conjunction
    1715             : // with capacity, or else they always balance increases and decreases to the
    1716             : // non-capacity stats.
    1717             : class AllocationStats {
    1718             :  public:
    1719             :   AllocationStats() { Clear(); }
    1720             : 
    1721             :   // Zero out all the allocation statistics (i.e., no capacity).
    1722             :   void Clear() {
    1723             :     capacity_ = 0;
    1724     1514054 :     max_capacity_ = 0;
    1725             :     ClearSize();
    1726             :   }
    1727             : 
    1728             :   void ClearSize() {
    1729     1720592 :     size_ = 0;
    1730             : #ifdef DEBUG
    1731             :     allocated_on_page_.clear();
    1732             : #endif
    1733             :   }
    1734             : 
    1735             :   // Accessors for the allocation statistics.
    1736             :   size_t Capacity() { return capacity_; }
    1737             :   size_t MaxCapacity() { return max_capacity_; }
    1738             :   size_t Size() { return size_; }
    1739             : #ifdef DEBUG
    1740             :   size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
    1741             : #endif
    1742             : 
    1743             :   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    1744             :     DCHECK_GE(size_ + bytes, size_);
    1745     2416356 :     size_ += bytes;
    1746             : #ifdef DEBUG
    1747             :     allocated_on_page_[page] += bytes;
    1748             : #endif
    1749             :   }
    1750             : 
    1751             :   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    1752             :     DCHECK_GE(size_, bytes);
    1753     1722737 :     size_ -= bytes;
    1754             : #ifdef DEBUG
    1755             :     DCHECK_GE(allocated_on_page_[page], bytes);
    1756             :     allocated_on_page_[page] -= bytes;
    1757             : #endif
    1758             :   }
    1759             : 
    1760             :   void DecreaseCapacity(size_t bytes) {
    1761             :     DCHECK_GE(capacity_, bytes);
    1762             :     DCHECK_GE(capacity_ - bytes, size_);
    1763             :     capacity_ -= bytes;
    1764             :   }
    1765             : 
    1766      638494 :   void IncreaseCapacity(size_t bytes) {
    1767             :     DCHECK_GE(capacity_ + bytes, capacity_);
    1768             :     capacity_ += bytes;
    1769      638494 :     if (capacity_ > max_capacity_) {
    1770      561173 :       max_capacity_ = capacity_;
    1771             :     }
    1772      638494 :   }
    1773             : 
    1774             :  private:
    1775             :   // |capacity_|: The number of object-area bytes (i.e., not including page
    1776             :   // bookkeeping structures) currently in the space.
    1777             :   // During evacuation capacity of the main spaces is accessed from multiple
    1778             :   // threads to check the old generation hard limit.
    1779             :   std::atomic<size_t> capacity_;
    1780             : 
    1781             :   // |max_capacity_|: The maximum capacity ever observed.
    1782             :   size_t max_capacity_;
    1783             : 
    1784             :   // |size_|: The number of allocated bytes.
    1785             :   size_t size_;
    1786             : 
    1787             : #ifdef DEBUG
    1788             :   std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
    1789             : #endif
    1790             : };
    1791             : 
    1792             : // A free list maintaining free blocks of memory. The free list is organized in
    1793             : // a way to encourage objects allocated around the same time to be near each
    1794             : // other. The normal way to allocate is intended to be by bumping a 'top'
    1795             : // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
    1796             : // find a new space to allocate from. This is done with the free list, which is
    1797             : // divided up into rough categories to cut down on waste. Having finer
    1798             : // categories would scatter allocation more.
    1799             : 
    1800             : // The free list is organized in categories as follows:
    1801             : // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
    1802             : //   allocation, when categories >= small do not have entries anymore.
    1803             : // 11-31 words (tiny): The tiny blocks are only used for allocation, when
    1804             : //   categories >= small do not have entries anymore.
    1805             : // 32-255 words (small): Used for allocating free space between 1-31 words in
    1806             : //   size.
    1807             : // 256-2047 words (medium): Used for allocating free space between 32-255 words
    1808             : //   in size.
    1809             : // 1048-16383 words (large): Used for allocating free space between 256-2047
    1810             : //   words in size.
    1811             : // At least 16384 words (huge): This list is for objects of 2048 words or
    1812             : //   larger. Empty pages are also added to this list.
    1813             : class FreeList {
    1814             :  public:
    1815             :   // This method returns how much memory can be allocated after freeing
    1816             :   // maximum_freed memory.
    1817             :   static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
    1818      472201 :     if (maximum_freed <= kTiniestListMax) {
    1819             :       // Since we are not iterating over all list entries, we cannot guarantee
    1820             :       // that we can find the maximum freed block in that free list.
    1821             :       return 0;
    1822      440696 :     } else if (maximum_freed <= kTinyListMax) {
    1823             :       return kTinyAllocationMax;
    1824      427664 :     } else if (maximum_freed <= kSmallListMax) {
    1825             :       return kSmallAllocationMax;
    1826      366854 :     } else if (maximum_freed <= kMediumListMax) {
    1827             :       return kMediumAllocationMax;
    1828      231461 :     } else if (maximum_freed <= kLargeListMax) {
    1829             :       return kLargeAllocationMax;
    1830             :     }
    1831             :     return maximum_freed;
    1832             :   }
    1833             : 
    1834             :   static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
    1835    18296075 :     if (size_in_bytes <= kTiniestListMax) {
    1836             :       return kTiniest;
    1837     9338386 :     } else if (size_in_bytes <= kTinyListMax) {
    1838             :       return kTiny;
    1839     4449734 :     } else if (size_in_bytes <= kSmallListMax) {
    1840             :       return kSmall;
    1841     1842962 :     } else if (size_in_bytes <= kMediumListMax) {
    1842             :       return kMedium;
    1843     1342058 :     } else if (size_in_bytes <= kLargeListMax) {
    1844             :       return kLarge;
    1845             :     }
    1846             :     return kHuge;
    1847             :   }
    1848             : 
    1849             :   FreeList();
    1850             : 
    1851             :   // Adds a node on the free list. The block of size {size_in_bytes} starting
    1852             :   // at {start} is placed on the free list. The return value is the number of
    1853             :   // bytes that were not added to the free list, because they freed memory block
    1854             :   // was too small. Bookkeeping information will be written to the block, i.e.,
    1855             :   // its contents will be destroyed. The start address should be word aligned,
    1856             :   // and the size should be a non-zero multiple of the word size.
    1857             :   size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
    1858             : 
    1859             :   // Allocates a free space node frome the free list of at least size_in_bytes
    1860             :   // bytes. Returns the actual node size in node_size which can be bigger than
    1861             :   // size_in_bytes. This method returns null if the allocation request cannot be
    1862             :   // handled by the free list.
    1863             :   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
    1864             :                                            size_t* node_size);
    1865             : 
    1866             :   // Clear the free list.
    1867             :   void Reset();
    1868             : 
    1869      711242 :   void ResetStats() {
    1870             :     wasted_bytes_ = 0;
    1871             :     ForAllFreeListCategories(
    1872      206538 :         [](FreeListCategory* category) { category->ResetStats(); });
    1873      711242 :   }
    1874             : 
    1875             :   // Return the number of bytes available on the free list.
    1876             :   size_t Available() {
    1877             :     size_t available = 0;
    1878             :     ForAllFreeListCategories([&available](FreeListCategory* category) {
    1879     2622967 :       available += category->available();
    1880             :     });
    1881             :     return available;
    1882             :   }
    1883             : 
    1884             :   bool IsEmpty() {
    1885             :     bool empty = true;
    1886             :     ForAllFreeListCategories([&empty](FreeListCategory* category) {
    1887             :       if (!category->is_empty()) empty = false;
    1888             :     });
    1889             :     return empty;
    1890             :   }
    1891             : 
    1892             :   // Used after booting the VM.
    1893             :   void RepairLists(Heap* heap);
    1894             : 
    1895             :   V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
    1896             :   bool ContainsPageFreeListItems(Page* page);
    1897             : 
    1898             :   size_t wasted_bytes() { return wasted_bytes_; }
    1899             : 
    1900             :   template <typename Callback>
    1901             :   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
    1902    15282414 :     FreeListCategory* current = categories_[type];
    1903    18966234 :     while (current != nullptr) {
    1904             :       FreeListCategory* next = current->next();
    1905             :       callback(current);
    1906             :       current = next;
    1907             :     }
    1908             :   }
    1909             : 
    1910             :   template <typename Callback>
    1911      268924 :   void ForAllFreeListCategories(Callback callback) {
    1912    33111898 :     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    1913    15282414 :       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
    1914             :     }
    1915      268924 :   }
    1916             : 
    1917             :   bool AddCategory(FreeListCategory* category);
    1918             :   V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
    1919             :   void PrintCategories(FreeListCategoryType type);
    1920             : 
    1921             :   // Returns a page containing an entry for a given type, or nullptr otherwise.
    1922             :   inline Page* GetPageForCategoryType(FreeListCategoryType type);
    1923             : 
    1924             : #ifdef DEBUG
    1925             :   size_t SumFreeLists();
    1926             :   bool IsVeryLong();
    1927             : #endif
    1928             : 
    1929             :  private:
    1930             :   class FreeListCategoryIterator {
    1931             :    public:
    1932             :     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
    1933     4532991 :         : current_(free_list->categories_[type]) {}
    1934             : 
    1935             :     bool HasNext() { return current_ != nullptr; }
    1936             : 
    1937             :     FreeListCategory* Next() {
    1938             :       DCHECK(HasNext());
    1939             :       FreeListCategory* tmp = current_;
    1940             :       current_ = current_->next();
    1941             :       return tmp;
    1942             :     }
    1943             : 
    1944             :    private:
    1945             :     FreeListCategory* current_;
    1946             :   };
    1947             : 
    1948             :   // The size range of blocks, in bytes.
    1949             :   static const size_t kMinBlockSize = 3 * kTaggedSize;
    1950             : 
    1951             :   // This is a conservative upper bound. The actual maximum block size takes
    1952             :   // padding and alignment of data and code pages into account.
    1953             :   static const size_t kMaxBlockSize = Page::kPageSize;
    1954             : 
    1955             :   static const size_t kTiniestListMax = 0xa * kTaggedSize;
    1956             :   static const size_t kTinyListMax = 0x1f * kTaggedSize;
    1957             :   static const size_t kSmallListMax = 0xff * kTaggedSize;
    1958             :   static const size_t kMediumListMax = 0x7ff * kTaggedSize;
    1959             :   static const size_t kLargeListMax = 0x1fff * kTaggedSize;
    1960             :   static const size_t kTinyAllocationMax = kTiniestListMax;
    1961             :   static const size_t kSmallAllocationMax = kTinyListMax;
    1962             :   static const size_t kMediumAllocationMax = kSmallListMax;
    1963             :   static const size_t kLargeAllocationMax = kMediumListMax;
    1964             : 
    1965             :   // Walks all available categories for a given |type| and tries to retrieve
    1966             :   // a node. Returns nullptr if the category is empty.
    1967             :   FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    1968             :                        size_t* node_size);
    1969             : 
    1970             :   // Tries to retrieve a node from the first category in a given |type|.
    1971             :   // Returns nullptr if the category is empty or the top entry is smaller
    1972             :   // than minimum_size.
    1973             :   FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
    1974             :                           size_t* node_size);
    1975             : 
    1976             :   // Searches a given |type| for a node of at least |minimum_size|.
    1977             :   FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
    1978             :                                 size_t minimum_size);
    1979             : 
    1980             :   // The tiny categories are not used for fast allocation.
    1981             :   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
    1982             :       size_t size_in_bytes) {
    1983     1934274 :     if (size_in_bytes <= kSmallAllocationMax) {
    1984             :       return kSmall;
    1985      650287 :     } else if (size_in_bytes <= kMediumAllocationMax) {
    1986             :       return kMedium;
    1987      527369 :     } else if (size_in_bytes <= kLargeAllocationMax) {
    1988             :       return kLarge;
    1989             :     }
    1990             :     return kHuge;
    1991             :   }
    1992             : 
    1993             :   FreeListCategory* top(FreeListCategoryType type) const {
    1994      211136 :     return categories_[type];
    1995             :   }
    1996             : 
    1997             :   std::atomic<size_t> wasted_bytes_;
    1998             :   FreeListCategory* categories_[kNumberOfCategories];
    1999             : 
    2000             :   friend class FreeListCategory;
    2001             : };
    2002             : 
    2003             : // LocalAllocationBuffer represents a linear allocation area that is created
    2004             : // from a given {AllocationResult} and can be used to allocate memory without
    2005             : // synchronization.
    2006             : //
    2007             : // The buffer is properly closed upon destruction and reassignment.
    2008             : // Example:
    2009             : //   {
    2010             : //     AllocationResult result = ...;
    2011             : //     LocalAllocationBuffer a(heap, result, size);
    2012             : //     LocalAllocationBuffer b = a;
    2013             : //     CHECK(!a.IsValid());
    2014             : //     CHECK(b.IsValid());
    2015             : //     // {a} is invalid now and cannot be used for further allocations.
    2016             : //   }
    2017             : //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
    2018             : //   // filler object for the remaining area.
    2019             : class LocalAllocationBuffer {
    2020             :  public:
    2021             :   // Indicates that a buffer cannot be used for allocations anymore. Can result
    2022             :   // from either reassigning a buffer, or trying to construct it from an
    2023             :   // invalid {AllocationResult}.
    2024             :   static LocalAllocationBuffer InvalidBuffer() {
    2025             :     return LocalAllocationBuffer(
    2026      207716 :         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
    2027             :   }
    2028             : 
    2029             :   // Creates a new LAB from a given {AllocationResult}. Results in
    2030             :   // InvalidBuffer if the result indicates a retry.
    2031             :   static inline LocalAllocationBuffer FromResult(Heap* heap,
    2032             :                                                  AllocationResult result,
    2033             :                                                  intptr_t size);
    2034             : 
    2035      605362 :   ~LocalAllocationBuffer() { Close(); }
    2036             : 
    2037             :   // Convert to C++11 move-semantics once allowed by the style guide.
    2038             :   LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
    2039             :   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
    2040             :       V8_NOEXCEPT;
    2041             : 
    2042             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
    2043             :       int size_in_bytes, AllocationAlignment alignment);
    2044             : 
    2045          10 :   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
    2046             : 
    2047             :   // Try to merge LABs, which is only possible when they are adjacent in memory.
    2048             :   // Returns true if the merge was successful, false otherwise.
    2049             :   inline bool TryMerge(LocalAllocationBuffer* other);
    2050             : 
    2051             :   inline bool TryFreeLast(HeapObject object, int object_size);
    2052             : 
    2053             :   // Close a LAB, effectively invalidating it. Returns the unused area.
    2054             :   V8_EXPORT_PRIVATE LinearAllocationArea Close();
    2055             : 
    2056             :  private:
    2057             :   V8_EXPORT_PRIVATE LocalAllocationBuffer(
    2058             :       Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
    2059             : 
    2060             :   Heap* heap_;
    2061             :   LinearAllocationArea allocation_info_;
    2062             : };
    2063             : 
    2064      567077 : class SpaceWithLinearArea : public Space {
    2065             :  public:
    2066             :   SpaceWithLinearArea(Heap* heap, AllocationSpace id)
    2067     1134303 :       : Space(heap, id), top_on_previous_step_(0) {
    2068             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    2069             :   }
    2070             : 
    2071             :   virtual bool SupportsInlineAllocation() = 0;
    2072             : 
    2073             :   // Returns the allocation pointer in this space.
    2074             :   Address top() { return allocation_info_.top(); }
    2075             :   Address limit() { return allocation_info_.limit(); }
    2076             : 
    2077             :   // The allocation top address.
    2078             :   Address* allocation_top_address() { return allocation_info_.top_address(); }
    2079             : 
    2080             :   // The allocation limit address.
    2081             :   Address* allocation_limit_address() {
    2082             :     return allocation_info_.limit_address();
    2083             :   }
    2084             : 
    2085             :   V8_EXPORT_PRIVATE void AddAllocationObserver(
    2086             :       AllocationObserver* observer) override;
    2087             :   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
    2088             :       AllocationObserver* observer) override;
    2089             :   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
    2090             :   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
    2091             : 
    2092             :   // When allocation observers are active we may use a lower limit to allow the
    2093             :   // observers to 'interrupt' earlier than the natural limit. Given a linear
    2094             :   // area bounded by [start, end), this function computes the limit to use to
    2095             :   // allow proper observation based on existing observers. min_size specifies
    2096             :   // the minimum size that the limited area should have.
    2097             :   Address ComputeLimit(Address start, Address end, size_t min_size);
    2098             :   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
    2099             :       size_t min_size) = 0;
    2100             : 
    2101             :  protected:
    2102             :   // If we are doing inline allocation in steps, this method performs the 'step'
    2103             :   // operation. top is the memory address of the bump pointer at the last
    2104             :   // inline allocation (i.e. it determines the numbers of bytes actually
    2105             :   // allocated since the last step.) top_for_next_step is the address of the
    2106             :   // bump pointer where the next byte is going to be allocated from. top and
    2107             :   // top_for_next_step may be different when we cross a page boundary or reset
    2108             :   // the space.
    2109             :   // TODO(ofrobots): clarify the precise difference between this and
    2110             :   // Space::AllocationStep.
    2111             :   void InlineAllocationStep(Address top, Address top_for_next_step,
    2112             :                             Address soon_object, size_t size);
    2113             :   V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
    2114             : 
    2115             :   // TODO(ofrobots): make these private after refactoring is complete.
    2116             :   LinearAllocationArea allocation_info_;
    2117             :   Address top_on_previous_step_;
    2118             : };
    2119             : 
    2120             : class V8_EXPORT_PRIVATE PagedSpace
    2121             :     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
    2122             :  public:
    2123             :   using iterator = PageIterator;
    2124             : 
    2125             :   static const size_t kCompactionMemoryWanted = 500 * KB;
    2126             : 
    2127             :   // Creates a space with an id.
    2128             :   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
    2129             : 
    2130     1009290 :   ~PagedSpace() override { TearDown(); }
    2131             : 
    2132             :   // Checks whether an object/address is in this space.
    2133             :   inline bool Contains(Address a);
    2134             :   inline bool Contains(Object o);
    2135             :   bool ContainsSlow(Address addr);
    2136             : 
    2137             :   // Does the space need executable memory?
    2138             :   Executability executable() { return executable_; }
    2139             : 
    2140             :   // Prepares for a mark-compact GC.
    2141             :   void PrepareForMarkCompact();
    2142             : 
    2143             :   // Current capacity without growing (Size() + Available()).
    2144             :   size_t Capacity() { return accounting_stats_.Capacity(); }
    2145             : 
    2146             :   // Approximate amount of physical memory committed for this space.
    2147             :   size_t CommittedPhysicalMemory() override;
    2148             : 
    2149             :   void ResetFreeListStatistics();
    2150             : 
    2151             :   // Sets the capacity, the available space and the wasted space to zero.
    2152             :   // The stats are rebuilt during sweeping by adding each page to the
    2153             :   // capacity and the size when it is encountered.  As free spaces are
    2154             :   // discovered during the sweeping they are subtracted from the size and added
    2155             :   // to the available and wasted totals.
    2156      206538 :   void ClearStats() {
    2157             :     accounting_stats_.ClearSize();
    2158      206538 :     free_list_.ResetStats();
    2159      206538 :     ResetFreeListStatistics();
    2160      206538 :   }
    2161             : 
    2162             :   // Available bytes without growing.  These are the bytes on the free list.
    2163             :   // The bytes in the linear allocation area are not included in this total
    2164             :   // because updating the stats would slow down allocation.  New pages are
    2165             :   // immediately added to the free list so they show up here.
    2166     1711322 :   size_t Available() override { return free_list_.Available(); }
    2167             : 
    2168             :   // Allocated bytes in this space.  Garbage bytes that were not found due to
    2169             :   // concurrent sweeping are counted as being allocated!  The bytes in the
    2170             :   // current linear allocation area (between top and limit) are also counted
    2171             :   // here.
    2172    10641970 :   size_t Size() override { return accounting_stats_.Size(); }
    2173             : 
    2174             :   // As size, but the bytes in lazily swept pages are estimated and the bytes
    2175             :   // in the current linear allocation area are not included.
    2176             :   size_t SizeOfObjects() override;
    2177             : 
    2178             :   // Wasted bytes in this space.  These are just the bytes that were thrown away
    2179             :   // due to being too small to use for allocation.
    2180     1139088 :   virtual size_t Waste() { return free_list_.wasted_bytes(); }
    2181             : 
    2182             :   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
    2183             : 
    2184             :   // Allocate the requested number of bytes in the space if possible, return a
    2185             :   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
    2186             :   // to be manually updated later.
    2187             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
    2188             :       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
    2189             : 
    2190             :   // Allocate the requested number of bytes in the space double aligned if
    2191             :   // possible, return a failure object if not.
    2192             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
    2193             :       int size_in_bytes, AllocationAlignment alignment);
    2194             : 
    2195             :   // Allocate the requested number of bytes in the space and consider allocation
    2196             :   // alignment if needed.
    2197             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
    2198             :       int size_in_bytes, AllocationAlignment alignment);
    2199             : 
    2200    18562520 :   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
    2201    18562520 :     if (size_in_bytes == 0) return 0;
    2202             :     heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
    2203    18199456 :                                  ClearRecordedSlots::kNo);
    2204    18198464 :     if (mode == SpaceAccountingMode::kSpaceAccounted) {
    2205     1538688 :       return AccountedFree(start, size_in_bytes);
    2206             :     } else {
    2207    16652223 :       return UnaccountedFree(start, size_in_bytes);
    2208             :     }
    2209             :   }
    2210             : 
    2211             :   // Give a block of memory to the space's free list.  It might be added to
    2212             :   // the free list or accounted as waste.
    2213             :   // If add_to_freelist is false then just accounting stats are updated and
    2214             :   // no attempt to add area to free list is made.
    2215             :   size_t AccountedFree(Address start, size_t size_in_bytes) {
    2216     1538701 :     size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
    2217             :     Page* page = Page::FromAddress(start);
    2218             :     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
    2219             :     DCHECK_GE(size_in_bytes, wasted);
    2220     1538688 :     return size_in_bytes - wasted;
    2221             :   }
    2222             : 
    2223             :   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
    2224    16659763 :     size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
    2225             :     DCHECK_GE(size_in_bytes, wasted);
    2226    16652223 :     return size_in_bytes - wasted;
    2227             :   }
    2228             : 
    2229             :   inline bool TryFreeLast(HeapObject object, int object_size);
    2230             : 
    2231             :   void ResetFreeList();
    2232             : 
    2233             :   // Empty space linear allocation area, returning unused area to free list.
    2234             :   void FreeLinearAllocationArea();
    2235             : 
    2236             :   void MarkLinearAllocationAreaBlack();
    2237             :   void UnmarkLinearAllocationArea();
    2238             : 
    2239             :   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    2240             :     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
    2241             :   }
    2242             :   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    2243             :     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
    2244             :   }
    2245             :   void DecreaseCapacity(size_t bytes) {
    2246             :     accounting_stats_.DecreaseCapacity(bytes);
    2247             :   }
    2248             :   void IncreaseCapacity(size_t bytes) {
    2249      638495 :     accounting_stats_.IncreaseCapacity(bytes);
    2250             :   }
    2251             : 
    2252             :   void RefineAllocatedBytesAfterSweeping(Page* page);
    2253             : 
    2254             :   Page* InitializePage(MemoryChunk* chunk, Executability executable);
    2255             : 
    2256             :   void ReleasePage(Page* page);
    2257             : 
    2258             :   // Adds the page to this space and returns the number of bytes added to the
    2259             :   // free list of the space.
    2260             :   size_t AddPage(Page* page);
    2261             :   void RemovePage(Page* page);
    2262             :   // Remove a page if it has at least |size_in_bytes| bytes available that can
    2263             :   // be used for allocation.
    2264             :   Page* RemovePageSafe(int size_in_bytes);
    2265             : 
    2266             :   void SetReadable();
    2267             :   void SetReadAndExecutable();
    2268             :   void SetReadAndWritable();
    2269             : 
    2270      281836 :   void SetDefaultCodePermissions() {
    2271      281836 :     if (FLAG_jitless) {
    2272       15146 :       SetReadable();
    2273             :     } else {
    2274      266690 :       SetReadAndExecutable();
    2275             :     }
    2276      281837 :   }
    2277             : 
    2278             : #ifdef VERIFY_HEAP
    2279             :   // Verify integrity of this space.
    2280             :   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
    2281             : 
    2282             :   void VerifyLiveBytes();
    2283             : 
    2284             :   // Overridden by subclasses to verify space-specific object
    2285             :   // properties (e.g., only maps or free-list nodes are in map space).
    2286             :   virtual void VerifyObject(HeapObject obj) {}
    2287             : #endif
    2288             : 
    2289             : #ifdef DEBUG
    2290             :   void VerifyCountersAfterSweeping();
    2291             :   void VerifyCountersBeforeConcurrentSweeping();
    2292             :   // Print meta info and objects in this space.
    2293             :   void Print() override;
    2294             : 
    2295             :   // Report code object related statistics
    2296             :   static void ReportCodeStatistics(Isolate* isolate);
    2297             :   static void ResetCodeStatistics(Isolate* isolate);
    2298             : #endif
    2299             : 
    2300             :   bool CanExpand(size_t size);
    2301             : 
    2302             :   // Returns the number of total pages in this space.
    2303             :   int CountTotalPages();
    2304             : 
    2305             :   // Return size of allocatable area on a page in this space.
    2306     2539399 :   inline int AreaSize() { return static_cast<int>(area_size_); }
    2307             : 
    2308   151050569 :   virtual bool is_local() { return false; }
    2309             : 
    2310             :   // Merges {other} into the current space. Note that this modifies {other},
    2311             :   // e.g., removes its bump pointer area and resets statistics.
    2312             :   void MergeCompactionSpace(CompactionSpace* other);
    2313             : 
    2314             :   // Refills the free list from the corresponding free list filled by the
    2315             :   // sweeper.
    2316             :   virtual void RefillFreeList();
    2317             : 
    2318     2817240 :   FreeList* free_list() { return &free_list_; }
    2319             : 
    2320     1269046 :   base::Mutex* mutex() { return &space_mutex_; }
    2321             : 
    2322             :   inline void UnlinkFreeListCategories(Page* page);
    2323             :   inline size_t RelinkFreeListCategories(Page* page);
    2324             : 
    2325             :   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
    2326             : 
    2327             :   iterator begin() { return iterator(first_page()); }
    2328             :   iterator end() { return iterator(nullptr); }
    2329             : 
    2330             :   // Shrink immortal immovable pages of the space to be exactly the size needed
    2331             :   // using the high water mark.
    2332             :   void ShrinkImmortalImmovablePages();
    2333             : 
    2334             :   size_t ShrinkPageToHighWaterMark(Page* page);
    2335             : 
    2336             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2337             : 
    2338             :   void SetLinearAllocationArea(Address top, Address limit);
    2339             : 
    2340             :  private:
    2341             :   // Set space linear allocation area.
    2342             :   void SetTopAndLimit(Address top, Address limit) {
    2343             :     DCHECK(top == limit ||
    2344             :            Page::FromAddress(top) == Page::FromAddress(limit - 1));
    2345     2518859 :     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2346             :     allocation_info_.Reset(top, limit);
    2347             :   }
    2348             :   void DecreaseLimit(Address new_limit);
    2349             :   void UpdateInlineAllocationLimit(size_t min_size) override;
    2350    23944495 :   bool SupportsInlineAllocation() override {
    2351    23944495 :     return identity() == OLD_SPACE && !is_local();
    2352             :   }
    2353             : 
    2354             :  protected:
    2355             :   // PagedSpaces that should be included in snapshots have different, i.e.,
    2356             :   // smaller, initial pages.
    2357           0 :   virtual bool snapshotable() { return true; }
    2358             : 
    2359             :   bool HasPages() { return first_page() != nullptr; }
    2360             : 
    2361             :   // Cleans up the space, frees all pages in this space except those belonging
    2362             :   // to the initial chunk, uncommits addresses in the initial chunk.
    2363             :   void TearDown();
    2364             : 
    2365             :   // Expands the space by allocating a fixed number of pages. Returns false if
    2366             :   // it cannot allocate requested number of pages from OS, or if the hard heap
    2367             :   // size limit has been hit.
    2368             :   bool Expand();
    2369             : 
    2370             :   // Sets up a linear allocation area that fits the given number of bytes.
    2371             :   // Returns false if there is not enough space and the caller has to retry
    2372             :   // after collecting garbage.
    2373             :   inline bool EnsureLinearAllocationArea(int size_in_bytes);
    2374             :   // Allocates an object from the linear allocation area. Assumes that the
    2375             :   // linear allocation area is large enought to fit the object.
    2376             :   inline HeapObject AllocateLinearly(int size_in_bytes);
    2377             :   // Tries to allocate an aligned object from the linear allocation area.
    2378             :   // Returns nullptr if the linear allocation area does not fit the object.
    2379             :   // Otherwise, returns the object pointer and writes the allocation size
    2380             :   // (object size + alignment filler size) to the size_in_bytes.
    2381             :   inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
    2382             :                                                AllocationAlignment alignment);
    2383             : 
    2384             :   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
    2385             :       size_t size_in_bytes);
    2386             : 
    2387             :   // If sweeping is still in progress try to sweep unswept pages. If that is
    2388             :   // not successful, wait for the sweeper threads and retry free-list
    2389             :   // allocation. Returns false if there is not enough space and the caller
    2390             :   // has to retry after collecting garbage.
    2391             :   V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
    2392             : 
    2393             :   // Slow path of AllocateRaw. This function is space-dependent. Returns false
    2394             :   // if there is not enough space and the caller has to retry after
    2395             :   // collecting garbage.
    2396             :   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
    2397             :       int size_in_bytes);
    2398             : 
    2399             :   // Implementation of SlowAllocateRaw. Returns false if there is not enough
    2400             :   // space and the caller has to retry after collecting garbage.
    2401             :   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
    2402             :       int size_in_bytes);
    2403             : 
    2404             :   Executability executable_;
    2405             : 
    2406             :   size_t area_size_;
    2407             : 
    2408             :   // Accounting information for this space.
    2409             :   AllocationStats accounting_stats_;
    2410             : 
    2411             :   // The space's free list.
    2412             :   FreeList free_list_;
    2413             : 
    2414             :   // Mutex guarding any concurrent access to the space.
    2415             :   base::Mutex space_mutex_;
    2416             : 
    2417             :   friend class IncrementalMarking;
    2418             :   friend class MarkCompactCollector;
    2419             : 
    2420             :   // Used in cctest.
    2421             :   friend class heap::HeapTester;
    2422             : };
    2423             : 
    2424             : enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
    2425             : 
    2426             : // -----------------------------------------------------------------------------
    2427             : // SemiSpace in young generation
    2428             : //
    2429             : // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
    2430             : // The mark-compact collector  uses the memory of the first page in the from
    2431             : // space as a marking stack when tracing live objects.
    2432       62432 : class SemiSpace : public Space {
    2433             :  public:
    2434             :   using iterator = PageIterator;
    2435             : 
    2436             :   static void Swap(SemiSpace* from, SemiSpace* to);
    2437             : 
    2438             :   SemiSpace(Heap* heap, SemiSpaceId semispace)
    2439             :       : Space(heap, NEW_SPACE),
    2440             :         current_capacity_(0),
    2441             :         maximum_capacity_(0),
    2442             :         minimum_capacity_(0),
    2443             :         age_mark_(kNullAddress),
    2444             :         committed_(false),
    2445             :         id_(semispace),
    2446             :         current_page_(nullptr),
    2447       62447 :         pages_used_(0) {}
    2448             : 
    2449             :   inline bool Contains(HeapObject o);
    2450             :   inline bool Contains(Object o);
    2451             :   inline bool ContainsSlow(Address a);
    2452             : 
    2453             :   void SetUp(size_t initial_capacity, size_t maximum_capacity);
    2454             :   void TearDown();
    2455             : 
    2456             :   bool Commit();
    2457             :   bool Uncommit();
    2458             :   bool is_committed() { return committed_; }
    2459             : 
    2460             :   // Grow the semispace to the new capacity.  The new capacity requested must
    2461             :   // be larger than the current capacity and less than the maximum capacity.
    2462             :   bool GrowTo(size_t new_capacity);
    2463             : 
    2464             :   // Shrinks the semispace to the new capacity.  The new capacity requested
    2465             :   // must be more than the amount of used memory in the semispace and less
    2466             :   // than the current capacity.
    2467             :   bool ShrinkTo(size_t new_capacity);
    2468             : 
    2469             :   bool EnsureCurrentCapacity();
    2470             : 
    2471             :   Address space_end() { return memory_chunk_list_.back()->area_end(); }
    2472             : 
    2473             :   // Returns the start address of the first page of the space.
    2474             :   Address space_start() {
    2475             :     DCHECK_NE(memory_chunk_list_.front(), nullptr);
    2476             :     return memory_chunk_list_.front()->area_start();
    2477             :   }
    2478             : 
    2479             :   Page* current_page() { return current_page_; }
    2480             :   int pages_used() { return pages_used_; }
    2481             : 
    2482             :   // Returns the start address of the current page of the space.
    2483             :   Address page_low() { return current_page_->area_start(); }
    2484             : 
    2485             :   // Returns one past the end address of the current page of the space.
    2486             :   Address page_high() { return current_page_->area_end(); }
    2487             : 
    2488             :   bool AdvancePage() {
    2489      173183 :     Page* next_page = current_page_->next_page();
    2490             :     // We cannot expand if we reached the maximum number of pages already. Note
    2491             :     // that we need to account for the next page already for this check as we
    2492             :     // could potentially fill the whole page after advancing.
    2493      346366 :     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
    2494      173183 :     if (next_page == nullptr || reached_max_pages) {
    2495             :       return false;
    2496             :     }
    2497      153660 :     current_page_ = next_page;
    2498      153660 :     pages_used_++;
    2499             :     return true;
    2500             :   }
    2501             : 
    2502             :   // Resets the space to using the first page.
    2503             :   void Reset();
    2504             : 
    2505             :   void RemovePage(Page* page);
    2506             :   void PrependPage(Page* page);
    2507             : 
    2508             :   Page* InitializePage(MemoryChunk* chunk, Executability executable);
    2509             : 
    2510             :   // Age mark accessors.
    2511             :   Address age_mark() { return age_mark_; }
    2512             :   void set_age_mark(Address mark);
    2513             : 
    2514             :   // Returns the current capacity of the semispace.
    2515             :   size_t current_capacity() { return current_capacity_; }
    2516             : 
    2517             :   // Returns the maximum capacity of the semispace.
    2518             :   size_t maximum_capacity() { return maximum_capacity_; }
    2519             : 
    2520             :   // Returns the initial capacity of the semispace.
    2521             :   size_t minimum_capacity() { return minimum_capacity_; }
    2522             : 
    2523             :   SemiSpaceId id() { return id_; }
    2524             : 
    2525             :   // Approximate amount of physical memory committed for this space.
    2526             :   size_t CommittedPhysicalMemory() override;
    2527             : 
    2528             :   // If we don't have these here then SemiSpace will be abstract.  However
    2529             :   // they should never be called:
    2530             : 
    2531           0 :   size_t Size() override {
    2532           0 :     UNREACHABLE();
    2533             :   }
    2534             : 
    2535           0 :   size_t SizeOfObjects() override { return Size(); }
    2536             : 
    2537           0 :   size_t Available() override {
    2538           0 :     UNREACHABLE();
    2539             :   }
    2540             : 
    2541             :   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
    2542             :   Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
    2543             : 
    2544             :   iterator begin() { return iterator(first_page()); }
    2545             :   iterator end() { return iterator(nullptr); }
    2546             : 
    2547             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2548             : 
    2549             : #ifdef DEBUG
    2550             :   V8_EXPORT_PRIVATE void Print() override;
    2551             :   // Validate a range of of addresses in a SemiSpace.
    2552             :   // The "from" address must be on a page prior to the "to" address,
    2553             :   // in the linked page order, or it must be earlier on the same page.
    2554             :   static void AssertValidRange(Address from, Address to);
    2555             : #else
    2556             :   // Do nothing.
    2557             :   inline static void AssertValidRange(Address from, Address to) {}
    2558             : #endif
    2559             : 
    2560             : #ifdef VERIFY_HEAP
    2561             :   virtual void Verify();
    2562             : #endif
    2563             : 
    2564             :  private:
    2565             :   void RewindPages(int num_pages);
    2566             : 
    2567             :   inline int max_pages() {
    2568      173183 :     return static_cast<int>(current_capacity_ / Page::kPageSize);
    2569             :   }
    2570             : 
    2571             :   // Copies the flags into the masked positions on all pages in the space.
    2572             :   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
    2573             : 
    2574             :   // The currently committed space capacity.
    2575             :   size_t current_capacity_;
    2576             : 
    2577             :   // The maximum capacity that can be used by this space. A space cannot grow
    2578             :   // beyond that size.
    2579             :   size_t maximum_capacity_;
    2580             : 
    2581             :   // The minimum capacity for the space. A space cannot shrink below this size.
    2582             :   size_t minimum_capacity_;
    2583             : 
    2584             :   // Used to govern object promotion during mark-compact collection.
    2585             :   Address age_mark_;
    2586             : 
    2587             :   bool committed_;
    2588             :   SemiSpaceId id_;
    2589             : 
    2590             :   Page* current_page_;
    2591             : 
    2592             :   int pages_used_;
    2593             : 
    2594             :   friend class NewSpace;
    2595             :   friend class SemiSpaceIterator;
    2596             : };
    2597             : 
    2598             : 
    2599             : // A SemiSpaceIterator is an ObjectIterator that iterates over the active
    2600             : // semispace of the heap's new space.  It iterates over the objects in the
    2601             : // semispace from a given start address (defaulting to the bottom of the
    2602             : // semispace) to the top of the semispace.  New objects allocated after the
    2603             : // iterator is created are not iterated.
    2604       23625 : class SemiSpaceIterator : public ObjectIterator {
    2605             :  public:
    2606             :   // Create an iterator over the allocated objects in the given to-space.
    2607             :   explicit SemiSpaceIterator(NewSpace* space);
    2608             : 
    2609             :   inline HeapObject Next() override;
    2610             : 
    2611             :  private:
    2612             :   void Initialize(Address start, Address end);
    2613             : 
    2614             :   // The current iteration point.
    2615             :   Address current_;
    2616             :   // The end of iteration.
    2617             :   Address limit_;
    2618             : };
    2619             : 
    2620             : // -----------------------------------------------------------------------------
    2621             : // The young generation space.
    2622             : //
    2623             : // The new space consists of a contiguous pair of semispaces.  It simply
    2624             : // forwards most functions to the appropriate semispace.
    2625             : 
    2626             : class V8_EXPORT_PRIVATE NewSpace
    2627             :     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
    2628             :  public:
    2629             :   using iterator = PageIterator;
    2630             : 
    2631             :   NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2632             :            size_t initial_semispace_capacity, size_t max_semispace_capacity);
    2633             : 
    2634      312150 :   ~NewSpace() override { TearDown(); }
    2635             : 
    2636             :   inline bool ContainsSlow(Address a);
    2637             :   inline bool Contains(Object o);
    2638             :   inline bool Contains(HeapObject o);
    2639             : 
    2640             :   // Tears down the space.  Heap memory was not allocated by the space, so it
    2641             :   // is not deallocated here.
    2642             :   void TearDown();
    2643             : 
    2644             :   // Flip the pair of spaces.
    2645             :   void Flip();
    2646             : 
    2647             :   // Grow the capacity of the semispaces.  Assumes that they are not at
    2648             :   // their maximum capacity.
    2649             :   void Grow();
    2650             : 
    2651             :   // Shrink the capacity of the semispaces.
    2652             :   void Shrink();
    2653             : 
    2654             :   // Return the allocated bytes in the active semispace.
    2655     1043499 :   size_t Size() override {
    2656             :     DCHECK_GE(top(), to_space_.page_low());
    2657     2069765 :     return to_space_.pages_used() *
    2658     1026266 :                MemoryChunkLayout::AllocatableMemoryInDataPage() +
    2659     1043499 :            static_cast<size_t>(top() - to_space_.page_low());
    2660             :   }
    2661             : 
    2662      737362 :   size_t SizeOfObjects() override { return Size(); }
    2663             : 
    2664             :   // Return the allocatable capacity of a semispace.
    2665             :   size_t Capacity() {
    2666             :     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    2667      627125 :     return (to_space_.current_capacity() / Page::kPageSize) *
    2668      627125 :            MemoryChunkLayout::AllocatableMemoryInDataPage();
    2669             :   }
    2670             : 
    2671             :   // Return the current size of a semispace, allocatable and non-allocatable
    2672             :   // memory.
    2673             :   size_t TotalCapacity() {
    2674             :     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    2675             :     return to_space_.current_capacity();
    2676             :   }
    2677             : 
    2678             :   // Committed memory for NewSpace is the committed memory of both semi-spaces
    2679             :   // combined.
    2680      635322 :   size_t CommittedMemory() override {
    2681      635322 :     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
    2682             :   }
    2683             : 
    2684           0 :   size_t MaximumCommittedMemory() override {
    2685             :     return from_space_.MaximumCommittedMemory() +
    2686           0 :            to_space_.MaximumCommittedMemory();
    2687             :   }
    2688             : 
    2689             :   // Approximate amount of physical memory committed for this space.
    2690             :   size_t CommittedPhysicalMemory() override;
    2691             : 
    2692             :   // Return the available bytes without growing.
    2693       95279 :   size_t Available() override {
    2694             :     DCHECK_GE(Capacity(), Size());
    2695       95279 :     return Capacity() - Size();
    2696             :   }
    2697             : 
    2698          30 :   size_t ExternalBackingStoreBytes(
    2699             :       ExternalBackingStoreType type) const override {
    2700             :     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
    2701          30 :     return to_space_.ExternalBackingStoreBytes(type);
    2702             :   }
    2703             : 
    2704      190488 :   size_t AllocatedSinceLastGC() {
    2705             :     const Address age_mark = to_space_.age_mark();
    2706             :     DCHECK_NE(age_mark, kNullAddress);
    2707             :     DCHECK_NE(top(), kNullAddress);
    2708             :     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
    2709             :     Page* const last_page = Page::FromAllocationAreaAddress(top());
    2710             :     Page* current_page = age_mark_page;
    2711             :     size_t allocated = 0;
    2712      190488 :     if (current_page != last_page) {
    2713             :       DCHECK_EQ(current_page, age_mark_page);
    2714             :       DCHECK_GE(age_mark_page->area_end(), age_mark);
    2715       54934 :       allocated += age_mark_page->area_end() - age_mark;
    2716             :       current_page = current_page->next_page();
    2717             :     } else {
    2718             :       DCHECK_GE(top(), age_mark);
    2719      135554 :       return top() - age_mark;
    2720             :     }
    2721      405872 :     while (current_page != last_page) {
    2722             :       DCHECK_NE(current_page, age_mark_page);
    2723      175469 :       allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
    2724             :       current_page = current_page->next_page();
    2725             :     }
    2726             :     DCHECK_GE(top(), current_page->area_start());
    2727       54934 :     allocated += top() - current_page->area_start();
    2728             :     DCHECK_LE(allocated, Size());
    2729       54934 :     return allocated;
    2730             :   }
    2731             : 
    2732             :   void MovePageFromSpaceToSpace(Page* page) {
    2733             :     DCHECK(page->IsFromPage());
    2734        2707 :     from_space_.RemovePage(page);
    2735        2707 :     to_space_.PrependPage(page);
    2736             :   }
    2737             : 
    2738             :   bool Rebalance();
    2739             : 
    2740             :   // Return the maximum capacity of a semispace.
    2741             :   size_t MaximumCapacity() {
    2742             :     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
    2743             :     return to_space_.maximum_capacity();
    2744             :   }
    2745             : 
    2746             :   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
    2747             : 
    2748             :   // Returns the initial capacity of a semispace.
    2749             :   size_t InitialTotalCapacity() {
    2750             :     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
    2751             :     return to_space_.minimum_capacity();
    2752             :   }
    2753             : 
    2754             :   void ResetOriginalTop() {
    2755             :     DCHECK_GE(top(), original_top_);
    2756             :     DCHECK_LE(top(), original_limit_);
    2757             :     original_top_.store(top(), std::memory_order_release);
    2758             :   }
    2759             : 
    2760             :   Address original_top_acquire() {
    2761             :     return original_top_.load(std::memory_order_acquire);
    2762             :   }
    2763             :   Address original_limit_relaxed() {
    2764             :     return original_limit_.load(std::memory_order_relaxed);
    2765             :   }
    2766             : 
    2767             :   // Return the address of the first allocatable address in the active
    2768             :   // semispace. This may be the address where the first object resides.
    2769             :   Address first_allocatable_address() { return to_space_.space_start(); }
    2770             : 
    2771             :   // Get the age mark of the inactive semispace.
    2772             :   Address age_mark() { return from_space_.age_mark(); }
    2773             :   // Set the age mark in the active semispace.
    2774       94944 :   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
    2775             : 
    2776             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2777             :   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
    2778             : 
    2779             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2780             :   AllocateRawUnaligned(int size_in_bytes);
    2781             : 
    2782             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2783             :   AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
    2784             : 
    2785             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
    2786             :       int size_in_bytes, AllocationAlignment alignment);
    2787             : 
    2788             :   // Reset the allocation pointer to the beginning of the active semispace.
    2789             :   void ResetLinearAllocationArea();
    2790             : 
    2791             :   // When inline allocation stepping is active, either because of incremental
    2792             :   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
    2793             :   // inline allocation every once in a while. This is done by setting
    2794             :   // allocation_info_.limit to be lower than the actual limit and and increasing
    2795             :   // it in steps to guarantee that the observers are notified periodically.
    2796             :   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
    2797             : 
    2798             :   inline bool ToSpaceContainsSlow(Address a);
    2799             :   inline bool ToSpaceContains(Object o);
    2800             :   inline bool FromSpaceContains(Object o);
    2801             : 
    2802             :   // Try to switch the active semispace to a new, empty, page.
    2803             :   // Returns false if this isn't possible or reasonable (i.e., there
    2804             :   // are no pages, or the current page is already empty), or true
    2805             :   // if successful.
    2806             :   bool AddFreshPage();
    2807             :   bool AddFreshPageSynchronized();
    2808             : 
    2809             : #ifdef VERIFY_HEAP
    2810             :   // Verify the active semispace.
    2811             :   virtual void Verify(Isolate* isolate);
    2812             : #endif
    2813             : 
    2814             : #ifdef DEBUG
    2815             :   // Print the active semispace.
    2816             :   void Print() override { to_space_.Print(); }
    2817             : #endif
    2818             : 
    2819             :   // Return whether the operation succeeded.
    2820             :   bool CommitFromSpaceIfNeeded() {
    2821       94944 :     if (from_space_.is_committed()) return true;
    2822       30370 :     return from_space_.Commit();
    2823             :   }
    2824             : 
    2825             :   bool UncommitFromSpace() {
    2826       17203 :     if (!from_space_.is_committed()) return true;
    2827       15940 :     return from_space_.Uncommit();
    2828             :   }
    2829             : 
    2830             :   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
    2831             : 
    2832             :   SemiSpace* active_space() { return &to_space_; }
    2833             : 
    2834             :   Page* first_page() { return to_space_.first_page(); }
    2835             :   Page* last_page() { return to_space_.last_page(); }
    2836             : 
    2837             :   iterator begin() { return to_space_.begin(); }
    2838             :   iterator end() { return to_space_.end(); }
    2839             : 
    2840             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2841             : 
    2842        1486 :   SemiSpace& from_space() { return from_space_; }
    2843             :   SemiSpace& to_space() { return to_space_; }
    2844             : 
    2845             :  private:
    2846             :   // Update linear allocation area to match the current to-space page.
    2847             :   void UpdateLinearAllocationArea();
    2848             : 
    2849             :   base::Mutex mutex_;
    2850             : 
    2851             :   // The top and the limit at the time of setting the linear allocation area.
    2852             :   // These values can be accessed by background tasks.
    2853             :   std::atomic<Address> original_top_;
    2854             :   std::atomic<Address> original_limit_;
    2855             : 
    2856             :   // The semispaces.
    2857             :   SemiSpace to_space_;
    2858             :   SemiSpace from_space_;
    2859             :   VirtualMemory reservation_;
    2860             : 
    2861             :   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
    2862      620336 :   bool SupportsInlineAllocation() override { return true; }
    2863             : 
    2864             :   friend class SemiSpaceIterator;
    2865             : };
    2866             : 
    2867             : class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
    2868             :  public:
    2869             :   explicit PauseAllocationObserversScope(Heap* heap);
    2870             :   ~PauseAllocationObserversScope();
    2871             : 
    2872             :  private:
    2873             :   Heap* heap_;
    2874             :   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
    2875             : };
    2876             : 
    2877             : // -----------------------------------------------------------------------------
    2878             : // Compaction space that is used temporarily during compaction.
    2879             : 
    2880      127465 : class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
    2881             :  public:
    2882             :   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
    2883      127464 :       : PagedSpace(heap, id, executable) {}
    2884             : 
    2885    85635391 :   bool is_local() override { return true; }
    2886             : 
    2887             :  protected:
    2888             :   // The space is temporary and not included in any snapshots.
    2889           0 :   bool snapshotable() override { return false; }
    2890             : 
    2891             :   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
    2892             :       int size_in_bytes) override;
    2893             : 
    2894             :   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
    2895             :       int size_in_bytes) override;
    2896             : };
    2897             : 
    2898             : // A collection of |CompactionSpace|s used by a single compaction task.
    2899      127463 : class CompactionSpaceCollection : public Malloced {
    2900             :  public:
    2901      127463 :   explicit CompactionSpaceCollection(Heap* heap)
    2902             :       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
    2903      127463 :         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
    2904             : 
    2905             :   CompactionSpace* Get(AllocationSpace space) {
    2906             :     switch (space) {
    2907             :       case OLD_SPACE:
    2908      127463 :         return &old_space_;
    2909             :       case CODE_SPACE:
    2910      127463 :         return &code_space_;
    2911             :       default:
    2912             :         UNREACHABLE();
    2913             :     }
    2914             :     UNREACHABLE();
    2915             :   }
    2916             : 
    2917             :  private:
    2918             :   CompactionSpace old_space_;
    2919             :   CompactionSpace code_space_;
    2920             : };
    2921             : 
    2922             : // -----------------------------------------------------------------------------
    2923             : // Old generation regular object space.
    2924             : 
    2925      124869 : class OldSpace : public PagedSpace {
    2926             :  public:
    2927             :   // Creates an old space object. The constructor does not allocate pages
    2928             :   // from OS.
    2929       62452 :   explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
    2930             : 
    2931             :   static bool IsAtPageStart(Address addr) {
    2932             :     return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
    2933             :            MemoryChunkLayout::ObjectStartOffsetInDataPage();
    2934             :   }
    2935             : };
    2936             : 
    2937             : // -----------------------------------------------------------------------------
    2938             : // Old generation code object space.
    2939             : 
    2940      124854 : class CodeSpace : public PagedSpace {
    2941             :  public:
    2942             :   // Creates an old space object. The constructor does not allocate pages
    2943             :   // from OS.
    2944       62442 :   explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
    2945             : };
    2946             : 
    2947             : // For contiguous spaces, top should be in the space (or at the end) and limit
    2948             : // should be the end of the space.
    2949             : #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
    2950             :   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
    2951             :               (info).top() <= (space).page_high() &&  \
    2952             :               (info).limit() <= (space).page_high())
    2953             : 
    2954             : 
    2955             : // -----------------------------------------------------------------------------
    2956             : // Old space for all map objects
    2957             : 
    2958      124854 : class MapSpace : public PagedSpace {
    2959             :  public:
    2960             :   // Creates a map space object.
    2961       62442 :   explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
    2962             : 
    2963           0 :   int RoundSizeDownToObjectAlignment(int size) override {
    2964             :     if (base::bits::IsPowerOfTwo(Map::kSize)) {
    2965             :       return RoundDown(size, Map::kSize);
    2966             :     } else {
    2967           0 :       return (size / Map::kSize) * Map::kSize;
    2968             :     }
    2969             :   }
    2970             : 
    2971             : #ifdef VERIFY_HEAP
    2972             :   void VerifyObject(HeapObject obj) override;
    2973             : #endif
    2974             : };
    2975             : 
    2976             : // -----------------------------------------------------------------------------
    2977             : // Read Only space for all Immortal Immovable and Immutable objects
    2978             : 
    2979             : class ReadOnlySpace : public PagedSpace {
    2980             :  public:
    2981             :   class WritableScope {
    2982             :    public:
    2983         448 :     explicit WritableScope(ReadOnlySpace* space) : space_(space) {
    2984             :       space_->MarkAsReadWrite();
    2985             :     }
    2986             : 
    2987         896 :     ~WritableScope() { space_->MarkAsReadOnly(); }
    2988             : 
    2989             :    private:
    2990             :     ReadOnlySpace* space_;
    2991             :   };
    2992             : 
    2993             :   explicit ReadOnlySpace(Heap* heap);
    2994             : 
    2995             :   // TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
    2996             :   // memory_chunk_list_.
    2997      124854 :   ~ReadOnlySpace() override { MarkAsReadWrite(); }
    2998             : 
    2999             :   bool writable() const { return !is_marked_read_only_; }
    3000             : 
    3001             :   V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
    3002             :   void MarkAsReadOnly();
    3003             :   // Make the heap forget the space for memory bookkeeping purposes
    3004             :   // (e.g. prevent space's memory from registering as leaked).
    3005             :   void Forget();
    3006             : 
    3007             :   // During boot the free_space_map is created, and afterwards we may need
    3008             :   // to write it into the free list nodes that were already created.
    3009             :   void RepairFreeListsAfterDeserialization();
    3010             : 
    3011             :  private:
    3012             :   void MarkAsReadWrite();
    3013             :   void SetPermissionsForPages(PageAllocator::Permission access);
    3014             : 
    3015             :   bool is_marked_read_only_ = false;
    3016             :   //
    3017             :   // String padding must be cleared just before serialization and therefore the
    3018             :   // string padding in the space will already have been cleared if the space was
    3019             :   // deserialized.
    3020             :   bool is_string_padding_cleared_;
    3021             : };
    3022             : 
    3023             : // -----------------------------------------------------------------------------
    3024             : // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
    3025             : // managed by the large object space.
    3026             : // Large objects do not move during garbage collections.
    3027             : 
    3028             : class LargeObjectSpace : public Space {
    3029             :  public:
    3030             :   using iterator = LargePageIterator;
    3031             : 
    3032             :   explicit LargeObjectSpace(Heap* heap);
    3033             :   LargeObjectSpace(Heap* heap, AllocationSpace id);
    3034             : 
    3035      249708 :   ~LargeObjectSpace() override { TearDown(); }
    3036             : 
    3037             :   // Releases internal resources, frees objects in this space.
    3038             :   void TearDown();
    3039             : 
    3040             :   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
    3041             :   AllocateRaw(int object_size);
    3042             : 
    3043             :   // Available bytes for objects in this space.
    3044             :   size_t Available() override;
    3045             : 
    3046     2017657 :   size_t Size() override { return size_; }
    3047     9596262 :   size_t SizeOfObjects() override { return objects_size_; }
    3048             : 
    3049             :   // Approximate amount of physical memory committed for this space.
    3050             :   size_t CommittedPhysicalMemory() override;
    3051             : 
    3052             :   int PageCount() { return page_count_; }
    3053             : 
    3054             :   // Clears the marking state of live objects.
    3055             :   void ClearMarkingStateOfLiveObjects();
    3056             : 
    3057             :   // Frees unmarked objects.
    3058             :   void FreeUnmarkedObjects();
    3059             : 
    3060             :   void PromoteNewLargeObject(LargePage* page);
    3061             : 
    3062             :   // Checks whether a heap object is in this space; O(1).
    3063             :   V8_EXPORT_PRIVATE bool Contains(HeapObject obj);
    3064             :   // Checks whether an address is in the object area in this space. Iterates
    3065             :   // all objects in the space. May be slow.
    3066             :   bool ContainsSlow(Address addr);
    3067             : 
    3068             :   // Checks whether the space is empty.
    3069           5 :   bool IsEmpty() { return first_page() == nullptr; }
    3070             : 
    3071             :   virtual void AddPage(LargePage* page, size_t object_size);
    3072             :   virtual void RemovePage(LargePage* page, size_t object_size);
    3073             : 
    3074             :   LargePage* first_page() {
    3075             :     return reinterpret_cast<LargePage*>(Space::first_page());
    3076             :   }
    3077             : 
    3078             :   // Collect code statistics.
    3079             :   void CollectCodeStatistics();
    3080             : 
    3081             :   iterator begin() { return iterator(first_page()); }
    3082             :   iterator end() { return iterator(nullptr); }
    3083             : 
    3084             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    3085             : 
    3086             : #ifdef VERIFY_HEAP
    3087             :   virtual void Verify(Isolate* isolate);
    3088             : #endif
    3089             : 
    3090             : #ifdef DEBUG
    3091             :   void Print() override;
    3092             : #endif
    3093             : 
    3094             :  protected:
    3095             :   LargePage* AllocateLargePage(int object_size, Executability executable);
    3096             :   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
    3097             :                                                      Executability executable);
    3098             : 
    3099             :   size_t size_;          // allocated bytes
    3100             :   int page_count_;       // number of chunks
    3101             :   size_t objects_size_;  // size of objects
    3102             : 
    3103             :  private:
    3104             :   friend class LargeObjectIterator;
    3105             : };
    3106             : 
    3107      187281 : class NewLargeObjectSpace : public LargeObjectSpace {
    3108             :  public:
    3109             :   NewLargeObjectSpace(Heap* heap, size_t capacity);
    3110             : 
    3111             :   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
    3112             :   AllocateRaw(int object_size);
    3113             : 
    3114             :   // Available bytes for objects in this space.
    3115             :   size_t Available() override;
    3116             : 
    3117             :   void Flip();
    3118             : 
    3119             :   void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
    3120             : 
    3121             :   void SetCapacity(size_t capacity);
    3122             : 
    3123             :   // The last allocated object that is not guaranteed to be initialized when
    3124             :   // the concurrent marker visits it.
    3125             :   Address pending_object() {
    3126             :     return pending_object_.load(std::memory_order_relaxed);
    3127             :   }
    3128             : 
    3129             :   void ResetPendingObject() { pending_object_.store(0); }
    3130             : 
    3131             :  private:
    3132             :   std::atomic<Address> pending_object_;
    3133             :   size_t capacity_;
    3134             : };
    3135             : 
    3136      312135 : class CodeLargeObjectSpace : public LargeObjectSpace {
    3137             :  public:
    3138             :   explicit CodeLargeObjectSpace(Heap* heap);
    3139             : 
    3140             :   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
    3141             :   AllocateRaw(int object_size);
    3142             : 
    3143             :   // Finds a large object page containing the given address, returns nullptr
    3144             :   // if such a page doesn't exist.
    3145             :   LargePage* FindPage(Address a);
    3146             : 
    3147             :  protected:
    3148             :   void AddPage(LargePage* page, size_t object_size) override;
    3149             :   void RemovePage(LargePage* page, size_t object_size) override;
    3150             : 
    3151             :  private:
    3152             :   static const size_t kInitialChunkMapCapacity = 1024;
    3153             :   void InsertChunkMapEntries(LargePage* page);
    3154             :   void RemoveChunkMapEntries(LargePage* page);
    3155             : 
    3156             :   // Page-aligned addresses to their corresponding LargePage.
    3157             :   std::unordered_map<Address, LargePage*> chunk_map_;
    3158             : };
    3159             : 
    3160       70875 : class LargeObjectIterator : public ObjectIterator {
    3161             :  public:
    3162             :   explicit LargeObjectIterator(LargeObjectSpace* space);
    3163             : 
    3164             :   HeapObject Next() override;
    3165             : 
    3166             :  private:
    3167             :   LargePage* current_;
    3168             : };
    3169             : 
    3170             : // Iterates over the chunks (pages and large object pages) that can contain
    3171             : // pointers to new space or to evacuation candidates.
    3172             : class OldGenerationMemoryChunkIterator {
    3173             :  public:
    3174             :   inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
    3175             : 
    3176             :   // Return nullptr when the iterator is done.
    3177             :   inline MemoryChunk* next();
    3178             : 
    3179             :  private:
    3180             :   enum State {
    3181             :     kOldSpaceState,
    3182             :     kMapState,
    3183             :     kCodeState,
    3184             :     kLargeObjectState,
    3185             :     kCodeLargeObjectState,
    3186             :     kFinishedState
    3187             :   };
    3188             :   Heap* heap_;
    3189             :   State state_;
    3190             :   PageIterator old_iterator_;
    3191             :   PageIterator code_iterator_;
    3192             :   PageIterator map_iterator_;
    3193             :   LargePageIterator lo_iterator_;
    3194             :   LargePageIterator code_lo_iterator_;
    3195             : };
    3196             : 
    3197             : }  // namespace internal
    3198             : }  // namespace v8
    3199             : 
    3200             : #endif  // V8_HEAP_SPACES_H_

Generated by: LCOV version 1.10