LCOV - code coverage report
Current view: top level - src/heap - spaces.h (source / functions) Hit Total Coverage
Test: app.info Lines: 318 344 92.4 %
Date: 2019-01-20 Functions: 74 103 71.8 %

          Line data    Source code
       1             : // Copyright 2011 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_HEAP_SPACES_H_
       6             : #define V8_HEAP_SPACES_H_
       7             : 
       8             : #include <list>
       9             : #include <map>
      10             : #include <memory>
      11             : #include <unordered_map>
      12             : #include <unordered_set>
      13             : #include <vector>
      14             : 
      15             : #include "src/allocation.h"
      16             : #include "src/base/atomic-utils.h"
      17             : #include "src/base/bounded-page-allocator.h"
      18             : #include "src/base/export-template.h"
      19             : #include "src/base/iterator.h"
      20             : #include "src/base/list.h"
      21             : #include "src/base/platform/mutex.h"
      22             : #include "src/cancelable-task.h"
      23             : #include "src/flags.h"
      24             : #include "src/globals.h"
      25             : #include "src/heap/heap.h"
      26             : #include "src/heap/invalidated-slots.h"
      27             : #include "src/heap/marking.h"
      28             : #include "src/objects.h"
      29             : #include "src/objects/free-space.h"
      30             : #include "src/objects/heap-object.h"
      31             : #include "src/objects/map.h"
      32             : #include "src/utils.h"
      33             : 
      34             : namespace v8 {
      35             : namespace internal {
      36             : 
      37             : namespace heap {
      38             : class HeapTester;
      39             : class TestCodePageAllocatorScope;
      40             : }  // namespace heap
      41             : 
      42             : class AllocationObserver;
      43             : class CompactionSpace;
      44             : class CompactionSpaceCollection;
      45             : class FreeList;
      46             : class Isolate;
      47             : class LinearAllocationArea;
      48             : class LocalArrayBufferTracker;
      49             : class MemoryAllocator;
      50             : class MemoryChunk;
      51             : class MemoryChunkLayout;
      52             : class Page;
      53             : class PagedSpace;
      54             : class SemiSpace;
      55             : class SkipList;
      56             : class SlotsBuffer;
      57             : class SlotSet;
      58             : class TypedSlotSet;
      59             : class Space;
      60             : 
      61             : // -----------------------------------------------------------------------------
      62             : // Heap structures:
      63             : //
      64             : // A JS heap consists of a young generation, an old generation, and a large
      65             : // object space. The young generation is divided into two semispaces. A
      66             : // scavenger implements Cheney's copying algorithm. The old generation is
      67             : // separated into a map space and an old object space. The map space contains
      68             : // all (and only) map objects, the rest of old objects go into the old space.
      69             : // The old generation is collected by a mark-sweep-compact collector.
      70             : //
      71             : // The semispaces of the young generation are contiguous.  The old and map
      72             : // spaces consists of a list of pages. A page has a page header and an object
      73             : // area.
      74             : //
      75             : // There is a separate large object space for objects larger than
      76             : // kMaxRegularHeapObjectSize, so that they do not have to move during
      77             : // collection. The large object space is paged. Pages in large object space
      78             : // may be larger than the page size.
      79             : //
      80             : // A store-buffer based write barrier is used to keep track of intergenerational
      81             : // references.  See heap/store-buffer.h.
      82             : //
      83             : // During scavenges and mark-sweep collections we sometimes (after a store
      84             : // buffer overflow) iterate intergenerational pointers without decoding heap
      85             : // object maps so if the page belongs to old space or large object space
      86             : // it is essential to guarantee that the page does not contain any
      87             : // garbage pointers to new space: every pointer aligned word which satisfies
      88             : // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
      89             : // new space. Thus objects in old space and large object spaces should have a
      90             : // special layout (e.g. no bare integer fields). This requirement does not
      91             : // apply to map space which is iterated in a special fashion. However we still
      92             : // require pointer fields of dead maps to be cleaned.
      93             : //
      94             : // To enable lazy cleaning of old space pages we can mark chunks of the page
      95             : // as being garbage.  Garbage sections are marked with a special map.  These
      96             : // sections are skipped when scanning the page, even if we are otherwise
      97             : // scanning without regard for object boundaries.  Garbage sections are chained
      98             : // together to form a free list after a GC.  Garbage sections created outside
      99             : // of GCs by object trunctation etc. may not be in the free list chain.  Very
     100             : // small free spaces are ignored, they need only be cleaned of bogus pointers
     101             : // into new space.
     102             : //
     103             : // Each page may have up to one special garbage section.  The start of this
     104             : // section is denoted by the top field in the space.  The end of the section
     105             : // is denoted by the limit field in the space.  This special garbage section
     106             : // is not marked with a free space map in the data.  The point of this section
     107             : // is to enable linear allocation without having to constantly update the byte
     108             : // array every time the top field is updated and a new object is created.  The
     109             : // special garbage section is not in the chain of garbage sections.
     110             : //
     111             : // Since the top and limit fields are in the space, not the page, only one page
     112             : // has a special garbage section, and if the top and limit are equal then there
     113             : // is no special garbage section.
     114             : 
     115             : // Some assertion macros used in the debugging mode.
     116             : 
     117             : #define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask)
     118             : 
     119             : #define DCHECK_OBJECT_ALIGNED(address) \
     120             :   DCHECK_EQ(0, (address)&kObjectAlignmentMask)
     121             : 
     122             : #define DCHECK_OBJECT_SIZE(size) \
     123             :   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
     124             : 
     125             : #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
     126             :   DCHECK((0 < size) && (size <= code_space->AreaSize()))
     127             : 
     128             : enum FreeListCategoryType {
     129             :   kTiniest,
     130             :   kTiny,
     131             :   kSmall,
     132             :   kMedium,
     133             :   kLarge,
     134             :   kHuge,
     135             : 
     136             :   kFirstCategory = kTiniest,
     137             :   kLastCategory = kHuge,
     138             :   kNumberOfCategories = kLastCategory + 1,
     139             :   kInvalidCategory
     140             : };
     141             : 
     142             : enum FreeMode { kLinkCategory, kDoNotLinkCategory };
     143             : 
     144             : enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
     145             : 
     146             : enum RememberedSetType {
     147             :   OLD_TO_NEW,
     148             :   OLD_TO_OLD,
     149             :   NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
     150             : };
     151             : 
     152             : // A free list category maintains a linked list of free memory blocks.
     153             : class FreeListCategory {
     154             :  public:
     155             :   FreeListCategory(FreeList* free_list, Page* page)
     156             :       : free_list_(free_list),
     157             :         page_(page),
     158             :         type_(kInvalidCategory),
     159             :         available_(0),
     160             :         prev_(nullptr),
     161     2663864 :         next_(nullptr) {}
     162             : 
     163             :   void Initialize(FreeListCategoryType type) {
     164     2664150 :     type_ = type;
     165     2664150 :     available_ = 0;
     166     2664150 :     prev_ = nullptr;
     167     2664150 :     next_ = nullptr;
     168             :   }
     169             : 
     170             :   void Reset();
     171             : 
     172           0 :   void ResetStats() { Reset(); }
     173             : 
     174             :   void RepairFreeList(Heap* heap);
     175             : 
     176             :   // Relinks the category into the currently owning free list. Requires that the
     177             :   // category is currently unlinked.
     178             :   void Relink();
     179             : 
     180             :   void Free(Address address, size_t size_in_bytes, FreeMode mode);
     181             : 
     182             :   // Performs a single try to pick a node of at least |minimum_size| from the
     183             :   // category. Stores the actual size in |node_size|. Returns nullptr if no
     184             :   // node is found.
     185             :   FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
     186             : 
     187             :   // Picks a node of at least |minimum_size| from the category. Stores the
     188             :   // actual size in |node_size|. Returns nullptr if no node is found.
     189             :   FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
     190             : 
     191             :   inline FreeList* owner();
     192             :   inline Page* page() const { return page_; }
     193             :   inline bool is_linked();
     194             :   bool is_empty() { return top().is_null(); }
     195             :   size_t available() const { return available_; }
     196             : 
     197     7457417 :   void set_free_list(FreeList* free_list) { free_list_ = free_list; }
     198             : 
     199             : #ifdef DEBUG
     200             :   size_t SumFreeList();
     201             :   int FreeListLength();
     202             : #endif
     203             : 
     204             :  private:
     205             :   // For debug builds we accurately compute free lists lengths up until
     206             :   // {kVeryLongFreeList} by manually walking the list.
     207             :   static const int kVeryLongFreeList = 500;
     208             : 
     209             :   FreeSpace top() { return top_; }
     210    26497466 :   void set_top(FreeSpace top) { top_ = top; }
     211             :   FreeListCategory* prev() { return prev_; }
     212     4004054 :   void set_prev(FreeListCategory* prev) { prev_ = prev; }
     213             :   FreeListCategory* next() { return next_; }
     214     5514884 :   void set_next(FreeListCategory* next) { next_ = next; }
     215             : 
     216             :   // This FreeListCategory is owned by the given free_list_.
     217             :   FreeList* free_list_;
     218             : 
     219             :   // This FreeListCategory holds free list entries of the given page_.
     220             :   Page* const page_;
     221             : 
     222             :   // |type_|: The type of this free list category.
     223             :   FreeListCategoryType type_;
     224             : 
     225             :   // |available_|: Total available bytes in all blocks of this free list
     226             :   // category.
     227             :   size_t available_;
     228             : 
     229             :   // |top_|: Points to the top FreeSpace in the free list category.
     230             :   FreeSpace top_;
     231             : 
     232             :   FreeListCategory* prev_;
     233             :   FreeListCategory* next_;
     234             : 
     235             :   friend class FreeList;
     236             :   friend class PagedSpace;
     237             : 
     238             :   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
     239             : };
     240             : 
     241             : class MemoryChunkLayout {
     242             :  public:
     243             :   static size_t CodePageGuardStartOffset();
     244             :   static size_t CodePageGuardSize();
     245             :   static intptr_t ObjectStartOffsetInCodePage();
     246             :   static intptr_t ObjectEndOffsetInCodePage();
     247             :   static size_t AllocatableMemoryInCodePage();
     248             :   static intptr_t ObjectStartOffsetInDataPage();
     249             :   V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
     250             :   static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
     251             :   static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
     252             : };
     253             : 
     254             : // MemoryChunk represents a memory region owned by a specific space.
     255             : // It is divided into the header and the body. Chunk start is always
     256             : // 1MB aligned. Start of the body is aligned so it can accommodate
     257             : // any heap object.
     258             : class MemoryChunk {
     259             :  public:
     260             :   // Use with std data structures.
     261             :   struct Hasher {
     262             :     size_t operator()(MemoryChunk* const chunk) const {
     263   518412058 :       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
     264             :     }
     265             :   };
     266             : 
     267             :   enum Flag {
     268             :     NO_FLAGS = 0u,
     269             :     IS_EXECUTABLE = 1u << 0,
     270             :     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
     271             :     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
     272             :     // A page in new space has one of the next two flags set.
     273             :     IN_FROM_SPACE = 1u << 3,
     274             :     IN_TO_SPACE = 1u << 4,
     275             :     NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
     276             :     EVACUATION_CANDIDATE = 1u << 6,
     277             :     NEVER_EVACUATE = 1u << 7,
     278             : 
     279             :     // Large objects can have a progress bar in their page header. These object
     280             :     // are scanned in increments and will be kept black while being scanned.
     281             :     // Even if the mutator writes to them they will be kept black and a white
     282             :     // to grey transition is performed in the value.
     283             :     HAS_PROGRESS_BAR = 1u << 8,
     284             : 
     285             :     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
     286             :     // from new to old space during evacuation.
     287             :     PAGE_NEW_OLD_PROMOTION = 1u << 9,
     288             : 
     289             :     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
     290             :     // within the new space during evacuation.
     291             :     PAGE_NEW_NEW_PROMOTION = 1u << 10,
     292             : 
     293             :     // This flag is intended to be used for testing. Works only when both
     294             :     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
     295             :     // are set. It forces the page to become an evacuation candidate at next
     296             :     // candidates selection cycle.
     297             :     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
     298             : 
     299             :     // This flag is intended to be used for testing.
     300             :     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
     301             : 
     302             :     // The memory chunk is already logically freed, however the actual freeing
     303             :     // still has to be performed.
     304             :     PRE_FREED = 1u << 13,
     305             : 
     306             :     // |POOLED|: When actually freeing this chunk, only uncommit and do not
     307             :     // give up the reservation as we still reuse the chunk at some point.
     308             :     POOLED = 1u << 14,
     309             : 
     310             :     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
     311             :     //   has been aborted and needs special handling by the sweeper.
     312             :     COMPACTION_WAS_ABORTED = 1u << 15,
     313             : 
     314             :     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
     315             :     // on pages is sometimes aborted. The flag is used to avoid repeatedly
     316             :     // triggering on the same page.
     317             :     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
     318             : 
     319             :     // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
     320             :     // to iterate the page.
     321             :     SWEEP_TO_ITERATE = 1u << 17,
     322             : 
     323             :     // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
     324             :     // enabled.
     325             :     INCREMENTAL_MARKING = 1u << 18
     326             :   };
     327             : 
     328             :   using Flags = uintptr_t;
     329             : 
     330             :   static const Flags kPointersToHereAreInterestingMask =
     331             :       POINTERS_TO_HERE_ARE_INTERESTING;
     332             : 
     333             :   static const Flags kPointersFromHereAreInterestingMask =
     334             :       POINTERS_FROM_HERE_ARE_INTERESTING;
     335             : 
     336             :   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
     337             : 
     338             :   static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
     339             : 
     340             :   static const Flags kSkipEvacuationSlotsRecordingMask =
     341             :       kEvacuationCandidateMask | kIsInNewSpaceMask;
     342             : 
     343             :   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
     344             :   //   not be performed on that page. Sweeper threads that are done with their
     345             :   //   work will set this value and not touch the page anymore.
     346             :   // |kSweepingPending|: This page is ready for parallel sweeping.
     347             :   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
     348             :   enum ConcurrentSweepingState {
     349             :     kSweepingDone,
     350             :     kSweepingPending,
     351             :     kSweepingInProgress,
     352             :   };
     353             : 
     354             :   static const intptr_t kAlignment =
     355             :       (static_cast<uintptr_t>(1) << kPageSizeBits);
     356             : 
     357             :   static const intptr_t kAlignmentMask = kAlignment - 1;
     358             : 
     359             :   static const intptr_t kSizeOffset = 0;
     360             :   static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
     361             :   static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
     362             :   static const intptr_t kReservationOffset =
     363             :       kMarkBitmapOffset + kSystemPointerSize;
     364             :   static const intptr_t kHeapOffset =
     365             :       kReservationOffset + 3 * kSystemPointerSize;
     366             :   static const intptr_t kHeaderSentinelOffset =
     367             :       kHeapOffset + kSystemPointerSize;
     368             : 
     369             :   static const size_t kHeaderSize =
     370             :       kSizeOffset               // NOLINT
     371             :       + kSizetSize              // size_t size
     372             :       + kUIntptrSize            // uintptr_t flags_
     373             :       + kSystemPointerSize      // Bitmap* marking_bitmap_
     374             :       + 3 * kSystemPointerSize  // VirtualMemory reservation_
     375             :       + kSystemPointerSize      // Heap* heap_
     376             :       + kSystemPointerSize      // Address header_sentinel_
     377             :       + kSystemPointerSize      // Address area_start_
     378             :       + kSystemPointerSize      // Address area_end_
     379             :       + kSystemPointerSize      // Address owner_
     380             :       + kIntptrSize             // intptr_t progress_bar_
     381             :       + kIntptrSize             // std::atomic<intptr_t> live_byte_count_
     382             :       + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
     383             :       + kSystemPointerSize *
     384             :             NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
     385             :       + kSystemPointerSize  // InvalidatedSlots* invalidated_slots_
     386             :       + kSystemPointerSize  // SkipList* skip_list_
     387             :       + kSystemPointerSize  // std::atomic<intptr_t> high_water_mark_
     388             :       + kSystemPointerSize  // base::Mutex* mutex_
     389             :       + kSystemPointerSize  // std::atomic<ConcurrentSweepingState>
     390             :                             // concurrent_sweeping_
     391             :       + kSystemPointerSize  // base::Mutex* page_protection_change_mutex_
     392             :       + kSystemPointerSize  // unitptr_t write_unprotect_counter_
     393             :       + kSizetSize * ExternalBackingStoreType::kNumTypes
     394             :       // std::atomic<size_t> external_backing_store_bytes_
     395             :       + kSizetSize              // size_t allocated_bytes_
     396             :       + kSizetSize              // size_t wasted_memory_
     397             :       + kSystemPointerSize * 2  // base::ListNode
     398             :       + kSystemPointerSize * kNumberOfCategories
     399             :       // FreeListCategory categories_[kNumberOfCategories]
     400             :       + kSystemPointerSize  // LocalArrayBufferTracker* local_tracker_
     401             :       + kIntptrSize  // std::atomic<intptr_t> young_generation_live_byte_count_
     402             :       + kSystemPointerSize;  // Bitmap* young_generation_bitmap_
     403             : 
     404             :   // Page size in bytes.  This must be a multiple of the OS page size.
     405             :   static const int kPageSize = 1 << kPageSizeBits;
     406             : 
     407             :   // Maximum number of nested code memory modification scopes.
     408             :   // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
     409             :   static const int kMaxWriteUnprotectCounter = 4;
     410             : 
     411 11570480305 :   static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
     412             : 
     413             :   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
     414             :   static MemoryChunk* FromAddress(Address a) {
     415    25494635 :     return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
     416             :   }
     417             :   // Only works if the object is in the first kPageSize of the MemoryChunk.
     418  8304544686 :   static MemoryChunk* FromHeapObject(const HeapObject o) {
     419 11282416528 :     return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
     420             :   }
     421             : 
     422             :   void SetOldGenerationPageFlags(bool is_marking);
     423             :   void SetYoungGenerationPageFlags(bool is_marking);
     424             : 
     425             :   static inline MemoryChunk* FromAnyPointerAddress(Address addr);
     426             : 
     427     3008184 :   static inline void UpdateHighWaterMark(Address mark) {
     428     4403812 :     if (mark == kNullAddress) return;
     429             :     // Need to subtract one from the mark because when a chunk is full the
     430             :     // top points to the next address after the chunk, which effectively belongs
     431             :     // to another chunk. See the comment to Page::FromTopOrLimit.
     432     1612556 :     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
     433     1612556 :     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
     434     1612556 :     intptr_t old_mark = 0;
     435     1612556 :     do {
     436     1612556 :       old_mark = chunk->high_water_mark_;
     437             :     } while (
     438     2320645 :         (new_mark > old_mark) &&
     439      708089 :         !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
     440             :   }
     441             : 
     442             :   static inline void MoveExternalBackingStoreBytes(
     443             :       ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
     444             :       size_t amount);
     445             : 
     446             :   void DiscardUnusedMemory(Address addr, size_t size);
     447             : 
     448             :   Address address() const {
     449  9451438264 :     return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
     450             :   }
     451             : 
     452             :   base::Mutex* mutex() { return mutex_; }
     453             : 
     454     1930229 :   bool Contains(Address addr) {
     455     2015431 :     return addr >= area_start() && addr < area_end();
     456             :   }
     457             : 
     458             :   // Checks whether |addr| can be a limit of addresses in this page. It's a
     459             :   // limit if it's in the page, or if it's just after the last byte of the page.
     460   149131939 :   bool ContainsLimit(Address addr) {
     461   149131939 :     return addr >= area_start() && addr <= area_end();
     462             :   }
     463             : 
     464             :   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
     465             :     concurrent_sweeping_ = state;
     466             :   }
     467             : 
     468             :   ConcurrentSweepingState concurrent_sweeping_state() {
     469             :     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
     470             :   }
     471             : 
     472      488574 :   bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
     473             : 
     474             :   size_t size() const { return size_; }
     475             :   void set_size(size_t size) { size_ = size; }
     476             : 
     477             :   inline Heap* heap() const { return heap_; }
     478             : 
     479             :   Heap* synchronized_heap();
     480             : 
     481             :   inline SkipList* skip_list() { return skip_list_; }
     482             : 
     483       93693 :   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
     484             : 
     485             :   template <RememberedSetType type>
     486        1619 :   bool ContainsSlots() {
     487             :     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
     488        2259 :            invalidated_slots() != nullptr;
     489             :   }
     490             : 
     491             :   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
     492             :   SlotSet* slot_set() {
     493             :     if (access_mode == AccessMode::ATOMIC)
     494   226055179 :       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
     495             :     return slot_set_[type];
     496             :   }
     497             : 
     498             :   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
     499             :   TypedSlotSet* typed_slot_set() {
     500             :     if (access_mode == AccessMode::ATOMIC)
     501     4049995 :       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
     502             :     return typed_slot_set_[type];
     503             :   }
     504             : 
     505             :   template <RememberedSetType type>
     506             :   SlotSet* AllocateSlotSet();
     507             :   // Not safe to be called concurrently.
     508             :   template <RememberedSetType type>
     509             :   void ReleaseSlotSet();
     510             :   template <RememberedSetType type>
     511             :   TypedSlotSet* AllocateTypedSlotSet();
     512             :   // Not safe to be called concurrently.
     513             :   template <RememberedSetType type>
     514             :   void ReleaseTypedSlotSet();
     515             : 
     516             :   InvalidatedSlots* AllocateInvalidatedSlots();
     517             :   void ReleaseInvalidatedSlots();
     518             :   void RegisterObjectWithInvalidatedSlots(HeapObject object, int size);
     519             :   // Updates invalidated_slots after array left-trimming.
     520             :   void MoveObjectWithInvalidatedSlots(HeapObject old_start,
     521             :                                       HeapObject new_start);
     522             :   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
     523             :   InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
     524             : 
     525             :   void ReleaseLocalTracker();
     526             : 
     527             :   void AllocateYoungGenerationBitmap();
     528             :   void ReleaseYoungGenerationBitmap();
     529             : 
     530             :   void AllocateMarkingBitmap();
     531             :   void ReleaseMarkingBitmap();
     532             : 
     533             :   Address area_start() { return area_start_; }
     534             :   Address area_end() { return area_end_; }
     535    10876871 :   size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
     536             : 
     537             :   // Approximate amount of physical memory committed for this chunk.
     538             :   size_t CommittedPhysicalMemory();
     539             : 
     540      188346 :   Address HighWaterMark() { return address() + high_water_mark_; }
     541             : 
     542       49489 :   int progress_bar() {
     543             :     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
     544       65857 :     return static_cast<int>(progress_bar_.load(std::memory_order_relaxed));
     545             :   }
     546             : 
     547       49484 :   void set_progress_bar(int progress_bar) {
     548             :     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
     549             :     progress_bar_.store(progress_bar, std::memory_order_relaxed);
     550       49484 :   }
     551             : 
     552       56216 :   void ResetProgressBar() {
     553       56216 :     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
     554             :       set_progress_bar(0);
     555             :     }
     556       56216 :   }
     557             : 
     558             :   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
     559             :                                                  size_t amount);
     560             : 
     561             :   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
     562             :                                                  size_t amount);
     563             : 
     564             :   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
     565     1538223 :     return external_backing_store_bytes_[type];
     566             :   }
     567             : 
     568             :   // Some callers rely on the fact that this can operate on both
     569             :   // tagged and aligned object addresses.
     570  8309542502 :   inline uint32_t AddressToMarkbitIndex(Address addr) const {
     571 18090249233 :     return static_cast<uint32_t>(addr - this->address()) >>
     572  9045287764 :            kSystemPointerSizeLog2;
     573             :   }
     574             : 
     575             :   inline Address MarkbitIndexToAddress(uint32_t index) const {
     576             :     return this->address() + (index << kSystemPointerSizeLog2);
     577             :   }
     578             : 
     579             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     580             :   void SetFlag(Flag flag) {
     581             :     if (access_mode == AccessMode::NON_ATOMIC) {
     582     3673302 :       flags_ |= flag;
     583             :     } else {
     584        7422 :       base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
     585             :     }
     586             :   }
     587             : 
     588             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     589   626076432 :   bool IsFlagSet(Flag flag) {
     590  7209081682 :     return (GetFlags<access_mode>() & flag) != 0;
     591             :   }
     592             : 
     593     2508736 :   void ClearFlag(Flag flag) { flags_ &= ~flag; }
     594             :   // Set or clear multiple flags at a time. The flags in the mask are set to
     595             :   // the value in "flags", the rest retain the current value in |flags_|.
     596             :   void SetFlags(uintptr_t flags, uintptr_t mask) {
     597      624215 :     flags_ = (flags_ & ~mask) | (flags & mask);
     598             :   }
     599             : 
     600             :   // Return all current flags.
     601             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     602             :   uintptr_t GetFlags() {
     603             :     if (access_mode == AccessMode::NON_ATOMIC) {
     604             :       return flags_;
     605             :     } else {
     606  6508361866 :       return base::AsAtomicWord::Relaxed_Load(&flags_);
     607             :     }
     608             :   }
     609             : 
     610             :   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
     611             : 
     612             :   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
     613             : 
     614             :   bool CanAllocate() {
     615      255648 :     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
     616             :   }
     617             : 
     618             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     619  6448838394 :   bool IsEvacuationCandidate() {
     620             :     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
     621             :              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
     622  6448838394 :     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
     623             :   }
     624             : 
     625             :   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
     626    48271635 :   bool ShouldSkipEvacuationSlotRecording() {
     627             :     uintptr_t flags = GetFlags<access_mode>();
     628             :     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
     629    48271635 :            ((flags & COMPACTION_WAS_ABORTED) == 0);
     630             :   }
     631             : 
     632             :   Executability executable() {
     633     2646410 :     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
     634             :   }
     635             : 
     636  1282468171 :   bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
     637             : 
     638             :   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
     639             : 
     640             :   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
     641             : 
     642             :   bool InOldSpace() const;
     643             : 
     644             :   bool InLargeObjectSpace() const;
     645             : 
     646             :   inline bool IsInNewLargeObjectSpace() const;
     647             : 
     648             :   Space* owner() const { return owner_; }
     649             : 
     650             :   void set_owner(Space* space) { owner_ = space; }
     651             : 
     652             :   static inline bool HasHeaderSentinel(Address slot_addr);
     653             : 
     654             :   // Emits a memory barrier. For TSAN builds the other thread needs to perform
     655             :   // MemoryChunk::synchronized_heap() to simulate the barrier.
     656             :   void InitializationMemoryFence();
     657             : 
     658             :   void SetReadable();
     659             :   void SetReadAndExecutable();
     660             :   void SetReadAndWritable();
     661             : 
     662     3293276 :   void SetDefaultCodePermissions() {
     663     3293276 :     if (FLAG_jitless) {
     664           0 :       SetReadable();
     665             :     } else {
     666     3293276 :       SetReadAndExecutable();
     667             :     }
     668     3293277 :   }
     669             : 
     670             :   base::ListNode<MemoryChunk>& list_node() { return list_node_; }
     671             : 
     672             :  protected:
     673             :   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
     674             :                                  Address area_start, Address area_end,
     675             :                                  Executability executable, Space* owner,
     676             :                                  VirtualMemory reservation);
     677             : 
     678             :   // Should be called when memory chunk is about to be freed.
     679             :   void ReleaseAllocatedMemory();
     680             : 
     681             :   // Sets the requested page permissions only if the write unprotect counter
     682             :   // has reached 0.
     683             :   void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
     684             :       PageAllocator::Permission permission);
     685             : 
     686             :   VirtualMemory* reserved_memory() { return &reservation_; }
     687             : 
     688             :   size_t size_;
     689             :   uintptr_t flags_;
     690             : 
     691             :   Bitmap* marking_bitmap_;
     692             : 
     693             :   // If the chunk needs to remember its memory reservation, it is stored here.
     694             :   VirtualMemory reservation_;
     695             : 
     696             :   Heap* heap_;
     697             : 
     698             :   // This is used to distinguish the memory chunk header from the interior of a
     699             :   // large page. The memory chunk header stores here an impossible tagged
     700             :   // pointer: the tagger pointer of the page start. A field in a large object is
     701             :   // guaranteed to not contain such a pointer.
     702             :   Address header_sentinel_;
     703             : 
     704             :   // Start and end of allocatable memory on this chunk.
     705             :   Address area_start_;
     706             :   Address area_end_;
     707             : 
     708             :   // The space owning this memory chunk.
     709             :   std::atomic<Space*> owner_;
     710             : 
     711             :   // Used by the incremental marker to keep track of the scanning progress in
     712             :   // large objects that have a progress bar and are scanned in increments.
     713             :   std::atomic<intptr_t> progress_bar_;
     714             : 
     715             :   // Count of bytes marked black on page.
     716             :   std::atomic<intptr_t> live_byte_count_;
     717             : 
     718             :   // A single slot set for small pages (of size kPageSize) or an array of slot
     719             :   // set for large pages. In the latter case the number of entries in the array
     720             :   // is ceil(size() / kPageSize).
     721             :   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
     722             :   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
     723             :   InvalidatedSlots* invalidated_slots_;
     724             : 
     725             :   SkipList* skip_list_;
     726             : 
     727             :   // Assuming the initial allocation on a page is sequential,
     728             :   // count highest number of bytes ever allocated on the page.
     729             :   std::atomic<intptr_t> high_water_mark_;
     730             : 
     731             :   base::Mutex* mutex_;
     732             : 
     733             :   std::atomic<intptr_t> concurrent_sweeping_;
     734             : 
     735             :   base::Mutex* page_protection_change_mutex_;
     736             : 
     737             :   // This field is only relevant for code pages. It depicts the number of
     738             :   // times a component requested this page to be read+writeable. The
     739             :   // counter is decremented when a component resets to read+executable.
     740             :   // If Value() == 0 => The memory is read and executable.
     741             :   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
     742             :   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
     743             :   // excessive nesting of scopes.
     744             :   // All executable MemoryChunks are allocated rw based on the assumption that
     745             :   // they will be used immediatelly for an allocation. They are initialized
     746             :   // with the number of open CodeSpaceMemoryModificationScopes. The caller
     747             :   // that triggers the page allocation is responsible for decrementing the
     748             :   // counter.
     749             :   uintptr_t write_unprotect_counter_;
     750             : 
     751             :   // Byte allocated on the page, which includes all objects on the page
     752             :   // and the linear allocation area.
     753             :   size_t allocated_bytes_;
     754             : 
     755             :   // Tracks off-heap memory used by this memory chunk.
     756             :   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
     757             : 
     758             :   // Freed memory that was not added to the free list.
     759             :   size_t wasted_memory_;
     760             : 
     761             :   base::ListNode<MemoryChunk> list_node_;
     762             : 
     763             :   FreeListCategory* categories_[kNumberOfCategories];
     764             : 
     765             :   LocalArrayBufferTracker* local_tracker_;
     766             : 
     767             :   std::atomic<intptr_t> young_generation_live_byte_count_;
     768             :   Bitmap* young_generation_bitmap_;
     769             : 
     770             :  private:
     771      728988 :   void InitializeReservedMemory() { reservation_.Reset(); }
     772             : 
     773             :   friend class ConcurrentMarkingState;
     774             :   friend class IncrementalMarkingState;
     775             :   friend class MajorAtomicMarkingState;
     776             :   friend class MajorMarkingState;
     777             :   friend class MajorNonAtomicMarkingState;
     778             :   friend class MemoryAllocator;
     779             :   friend class MemoryChunkValidator;
     780             :   friend class MinorMarkingState;
     781             :   friend class MinorNonAtomicMarkingState;
     782             :   friend class PagedSpace;
     783             : };
     784             : 
     785             : static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
     786             :               "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
     787             : 
     788             : // -----------------------------------------------------------------------------
     789             : // A page is a memory chunk of a size 512K. Large object pages may be larger.
     790             : //
     791             : // The only way to get a page pointer is by calling factory methods:
     792             : //   Page* p = Page::FromAddress(addr); or
     793             : //   Page* p = Page::FromTopOrLimit(top);
     794             : class Page : public MemoryChunk {
     795             :  public:
     796             :   static const intptr_t kCopyAllFlags = ~0;
     797             : 
     798             :   // Page flags copied from from-space to to-space when flipping semispaces.
     799             :   static const intptr_t kCopyOnFlipFlagsMask =
     800             :       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
     801             :       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
     802             :       static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
     803             : 
     804             :   // Returns the page containing a given address. The address ranges
     805             :   // from [page_addr .. page_addr + kPageSize[. This only works if the object
     806             :   // is in fact in a page.
     807             :   static Page* FromAddress(Address addr) {
     808   421266328 :     return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
     809             :   }
     810 12771379103 :   static Page* FromHeapObject(const HeapObject o) {
     811 12834434462 :     return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
     812             :   }
     813             : 
     814             :   // Returns the page containing the address provided. The address can
     815             :   // potentially point righter after the page. To be also safe for tagged values
     816             :   // we subtract a hole word. The valid address ranges from
     817             :   // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
     818             :   static Page* FromAllocationAreaAddress(Address address) {
     819     1223607 :     return Page::FromAddress(address - kTaggedSize);
     820             :   }
     821             : 
     822             :   // Checks if address1 and address2 are on the same new space page.
     823             :   static bool OnSamePage(Address address1, Address address2) {
     824             :     return Page::FromAddress(address1) == Page::FromAddress(address2);
     825             :   }
     826             : 
     827             :   // Checks whether an address is page aligned.
     828             :   static bool IsAlignedToPageSize(Address addr) {
     829     1795998 :     return (addr & kPageAlignmentMask) == 0;
     830             :   }
     831             : 
     832             :   static Page* ConvertNewToOld(Page* old_page);
     833             : 
     834             :   inline void MarkNeverAllocateForTesting();
     835             :   inline void MarkEvacuationCandidate();
     836             :   inline void ClearEvacuationCandidate();
     837             : 
     838     8045386 :   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
     839         287 :   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
     840             : 
     841             :   template <typename Callback>
     842           0 :   inline void ForAllFreeListCategories(Callback callback) {
     843     8760153 :     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     844     8760153 :       callback(categories_[i]);
     845             :     }
     846           0 :   }
     847             : 
     848             :   // Returns the offset of a given address to this page.
     849         382 :   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
     850             : 
     851             :   // Returns the address for a given offset to the this page.
     852             :   Address OffsetToAddress(size_t offset) {
     853         170 :     Address address_in_page = address() + offset;
     854             :     DCHECK_GE(address_in_page, area_start_);
     855             :     DCHECK_LT(address_in_page, area_end_);
     856             :     return address_in_page;
     857             :   }
     858             : 
     859             :   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
     860             :   // progress. In particular, when we know that right before this call a
     861             :   // sweeper thread was sweeping this page.
     862             :   void WaitUntilSweepingCompleted() {
     863           0 :     mutex_->Lock();
     864           0 :     mutex_->Unlock();
     865             :     DCHECK(SweepingDone());
     866             :   }
     867             : 
     868             :   void AllocateLocalTracker();
     869             :   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
     870             :   bool contains_array_buffers();
     871             : 
     872             :   void ResetFreeListStatistics();
     873             : 
     874             :   size_t AvailableInFreeList();
     875             : 
     876             :   size_t AvailableInFreeListFromAllocatedBytes() {
     877             :     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
     878             :     return area_size() - wasted_memory() - allocated_bytes();
     879             :   }
     880             : 
     881             :   FreeListCategory* free_list_category(FreeListCategoryType type) {
     882    22740637 :     return categories_[type];
     883             :   }
     884             : 
     885             :   size_t wasted_memory() { return wasted_memory_; }
     886      516731 :   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
     887             :   size_t allocated_bytes() { return allocated_bytes_; }
     888             :   void IncreaseAllocatedBytes(size_t bytes) {
     889             :     DCHECK_LE(bytes, area_size());
     890     1332024 :     allocated_bytes_ += bytes;
     891             :   }
     892             :   void DecreaseAllocatedBytes(size_t bytes) {
     893             :     DCHECK_LE(bytes, area_size());
     894             :     DCHECK_GE(allocated_bytes(), bytes);
     895    23259018 :     allocated_bytes_ -= bytes;
     896             :   }
     897             : 
     898             :   void ResetAllocatedBytes();
     899             : 
     900             :   size_t ShrinkToHighWaterMark();
     901             : 
     902             :   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
     903             :   void DestroyBlackArea(Address start, Address end);
     904             : 
     905             :   void InitializeFreeListCategories();
     906             :   void AllocateFreeListCategories();
     907             :   void ReleaseFreeListCategories();
     908             : 
     909             : #ifdef DEBUG
     910             :   void Print();
     911             : #endif  // DEBUG
     912             : 
     913             :  private:
     914             :   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
     915             : 
     916             :   friend class MemoryAllocator;
     917             : };
     918             : 
     919             : class ReadOnlyPage : public Page {
     920             :  public:
     921             :   // Clears any pointers in the header that point out of the page that would
     922             :   // otherwise make the header non-relocatable.
     923             :   void MakeHeaderRelocatable();
     924             : 
     925             :  private:
     926             :   friend class ReadOnlySpace;
     927             : };
     928             : 
     929             : class LargePage : public MemoryChunk {
     930             :  public:
     931             :   // A limit to guarantee that we do not overflow typed slot offset in
     932             :   // the old to old remembered set.
     933             :   // Note that this limit is higher than what assembler already imposes on
     934             :   // x64 and ia32 architectures.
     935             :   static const int kMaxCodePageSize = 512 * MB;
     936             : 
     937             :   static LargePage* FromHeapObject(const HeapObject o) {
     938             :     return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
     939             :   }
     940             : 
     941             :   inline HeapObject GetObject();
     942             : 
     943             :   inline LargePage* next_page() {
     944     1657687 :     return static_cast<LargePage*>(list_node_.next());
     945             :   }
     946             : 
     947             :   // Uncommit memory that is not in use anymore by the object. If the object
     948             :   // cannot be shrunk 0 is returned.
     949             :   Address GetAddressToShrink(Address object_address, size_t object_size);
     950             : 
     951             :   void ClearOutOfLiveRangeSlots(Address free_start);
     952             : 
     953             :  private:
     954             :   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
     955             :                                Executability executable);
     956             : 
     957             :   friend class MemoryAllocator;
     958             : };
     959             : 
     960             : 
     961             : // ----------------------------------------------------------------------------
     962             : // Space is the abstract superclass for all allocation spaces.
     963             : class Space : public Malloced {
     964             :  public:
     965      875213 :   Space(Heap* heap, AllocationSpace id)
     966             :       : allocation_observers_paused_(false),
     967             :         heap_(heap),
     968             :         id_(id),
     969             :         committed_(0),
     970     1750426 :         max_committed_(0) {
     971             :     external_backing_store_bytes_ =
     972      875213 :         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
     973             :     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
     974      875212 :     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
     975             :         0;
     976      875212 :   }
     977             : 
     978             :   static inline void MoveExternalBackingStoreBytes(
     979             :       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
     980             : 
     981      875063 :   virtual ~Space() {
     982      875063 :     delete[] external_backing_store_bytes_;
     983      875064 :     external_backing_store_bytes_ = nullptr;
     984      875064 :   }
     985             : 
     986             :   Heap* heap() const { return heap_; }
     987             : 
     988             :   // Identity used in error reporting.
     989             :   AllocationSpace identity() { return id_; }
     990             : 
     991           0 :   const char* name() { return AllocationSpaceName(id_); }
     992             : 
     993             :   V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
     994             :       AllocationObserver* observer);
     995             : 
     996             :   V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
     997             :       AllocationObserver* observer);
     998             : 
     999             :   V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
    1000             : 
    1001             :   V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
    1002             : 
    1003      189305 :   V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
    1004             : 
    1005             :   void AllocationStep(int bytes_since_last, Address soon_object, int size);
    1006             : 
    1007             :   // Return the total amount committed memory for this space, i.e., allocatable
    1008             :   // memory and page headers.
    1009     5227620 :   virtual size_t CommittedMemory() { return committed_; }
    1010             : 
    1011           0 :   virtual size_t MaximumCommittedMemory() { return max_committed_; }
    1012             : 
    1013             :   // Returns allocated size.
    1014             :   virtual size_t Size() = 0;
    1015             : 
    1016             :   // Returns size of objects. Can differ from the allocated size
    1017             :   // (e.g. see LargeObjectSpace).
    1018           0 :   virtual size_t SizeOfObjects() { return Size(); }
    1019             : 
    1020             :   // Approximate amount of physical memory committed for this space.
    1021             :   virtual size_t CommittedPhysicalMemory() = 0;
    1022             : 
    1023             :   // Return the available bytes without growing.
    1024             :   virtual size_t Available() = 0;
    1025             : 
    1026    21843593 :   virtual int RoundSizeDownToObjectAlignment(int size) {
    1027    21843593 :     if (id_ == CODE_SPACE) {
    1028           0 :       return RoundDown(size, kCodeAlignment);
    1029             :     } else {
    1030    21843593 :       return RoundDown(size, kTaggedSize);
    1031             :     }
    1032             :   }
    1033             : 
    1034             :   virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
    1035             : 
    1036             :   void AccountCommitted(size_t bytes) {
    1037             :     DCHECK_GE(committed_ + bytes, committed_);
    1038      769798 :     committed_ += bytes;
    1039      769798 :     if (committed_ > max_committed_) {
    1040      667952 :       max_committed_ = committed_;
    1041             :     }
    1042             :   }
    1043             : 
    1044             :   void AccountUncommitted(size_t bytes) {
    1045             :     DCHECK_GE(committed_, committed_ - bytes);
    1046      473971 :     committed_ -= bytes;
    1047             :   }
    1048             : 
    1049             :   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
    1050             :                                                  size_t amount);
    1051             : 
    1052             :   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
    1053             :                                                  size_t amount);
    1054             : 
    1055             :   // Returns amount of off-heap memory in-use by objects in this Space.
    1056          65 :   virtual size_t ExternalBackingStoreBytes(
    1057             :       ExternalBackingStoreType type) const {
    1058         160 :     return external_backing_store_bytes_[type];
    1059             :   }
    1060             : 
    1061             :   V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
    1062             : 
    1063     6232458 :   MemoryChunk* first_page() { return memory_chunk_list_.front(); }
    1064       17637 :   MemoryChunk* last_page() { return memory_chunk_list_.back(); }
    1065             : 
    1066             :   base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
    1067             : 
    1068             : #ifdef DEBUG
    1069             :   virtual void Print() = 0;
    1070             : #endif
    1071             : 
    1072             :  protected:
    1073             :   intptr_t GetNextInlineAllocationStepSize();
    1074             :   bool AllocationObserversActive() {
    1075   550150435 :     return !allocation_observers_paused_ && !allocation_observers_.empty();
    1076             :   }
    1077             : 
    1078             :   std::vector<AllocationObserver*> allocation_observers_;
    1079             : 
    1080             :   // The List manages the pages that belong to the given space.
    1081             :   base::List<MemoryChunk> memory_chunk_list_;
    1082             : 
    1083             :   // Tracks off-heap memory used by this space.
    1084             :   std::atomic<size_t>* external_backing_store_bytes_;
    1085             : 
    1086             :  private:
    1087             :   bool allocation_observers_paused_;
    1088             :   Heap* heap_;
    1089             :   AllocationSpace id_;
    1090             : 
    1091             :   // Keeps track of committed memory in a space.
    1092             :   size_t committed_;
    1093             :   size_t max_committed_;
    1094             : 
    1095             :   DISALLOW_COPY_AND_ASSIGN(Space);
    1096             : };
    1097             : 
    1098             : 
    1099             : class MemoryChunkValidator {
    1100             :   // Computed offsets should match the compiler generated ones.
    1101             :   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
    1102             : 
    1103             :   // Validate our estimates on the header size.
    1104             :   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
    1105             :   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
    1106             :   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
    1107             : };
    1108             : 
    1109             : 
    1110             : // The process-wide singleton that keeps track of code range regions with the
    1111             : // intention to reuse free code range regions as a workaround for CFG memory
    1112             : // leaks (see crbug.com/870054).
    1113      118984 : class CodeRangeAddressHint {
    1114             :  public:
    1115             :   // Returns the most recently freed code range start address for the given
    1116             :   // size. If there is no such entry, then a random address is returned.
    1117             :   V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
    1118             : 
    1119             :   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
    1120             :                                               size_t code_range_size);
    1121             : 
    1122             :  private:
    1123             :   base::Mutex mutex_;
    1124             :   // A map from code range size to an array of recently freed code range
    1125             :   // addresses. There should be O(1) different code range sizes.
    1126             :   // The length of each array is limited by the peak number of code ranges,
    1127             :   // which should be also O(1).
    1128             :   std::unordered_map<size_t, std::vector<Address>> recently_freed_;
    1129             : };
    1130             : 
    1131             : class SkipList {
    1132             :  public:
    1133             :   SkipList() { Clear(); }
    1134             : 
    1135             :   void Clear() {
    1136    13371136 :     for (int idx = 0; idx < kSize; idx++) {
    1137    13371136 :       starts_[idx] = static_cast<Address>(-1);
    1138             :     }
    1139             :   }
    1140             : 
    1141      521465 :   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
    1142             : 
    1143             :   void AddObject(Address addr, int size) {
    1144             :     int start_region = RegionNumber(addr);
    1145    97121060 :     int end_region = RegionNumber(addr + size - kTaggedSize);
    1146   100573066 :     for (int idx = start_region; idx <= end_region; idx++) {
    1147   100573066 :       if (starts_[idx] > addr) {
    1148     2850646 :         starts_[idx] = addr;
    1149             :       } else {
    1150             :         // In the first region, there may already be an object closer to the
    1151             :         // start of the region. Do not change the start in that case. If this
    1152             :         // is not the first region, you probably added overlapping objects.
    1153             :         DCHECK_EQ(start_region, idx);
    1154             :       }
    1155             :     }
    1156             :   }
    1157             : 
    1158             :   static inline int RegionNumber(Address addr) {
    1159   447981583 :     return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
    1160             :   }
    1161             : 
    1162    97121060 :   static void Update(Address addr, int size) {
    1163             :     Page* page = Page::FromAddress(addr);
    1164    97121060 :     SkipList* list = page->skip_list();
    1165    97121060 :     if (list == nullptr) {
    1166       93693 :       list = new SkipList();
    1167             :       page->set_skip_list(list);
    1168             :     }
    1169             : 
    1170             :     list->AddObject(addr, size);
    1171    97121060 :   }
    1172             : 
    1173             :  private:
    1174             :   static const int kRegionSizeLog2 = 13;
    1175             :   static const int kRegionSize = 1 << kRegionSizeLog2;
    1176             :   static const int kSize = Page::kPageSize / kRegionSize;
    1177             : 
    1178             :   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
    1179             : 
    1180             :   Address starts_[kSize];
    1181             : };
    1182             : 
    1183             : 
    1184             : // ----------------------------------------------------------------------------
    1185             : // A space acquires chunks of memory from the operating system. The memory
    1186             : // allocator allocates and deallocates pages for the paged heap spaces and large
    1187             : // pages for large object space.
    1188      191649 : class V8_EXPORT_PRIVATE MemoryAllocator {
    1189             :  public:
    1190             :   // Unmapper takes care of concurrently unmapping and uncommitting memory
    1191             :   // chunks.
    1192      127766 :   class Unmapper {
    1193             :    public:
    1194             :     class UnmapFreeMemoryTask;
    1195             : 
    1196       63898 :     Unmapper(Heap* heap, MemoryAllocator* allocator)
    1197             :         : heap_(heap),
    1198             :           allocator_(allocator),
    1199             :           pending_unmapping_tasks_semaphore_(0),
    1200             :           pending_unmapping_tasks_(0),
    1201      255592 :           active_unmapping_tasks_(0) {
    1202       63898 :       chunks_[kRegular].reserve(kReservedQueueingSlots);
    1203       63898 :       chunks_[kPooled].reserve(kReservedQueueingSlots);
    1204       63898 :     }
    1205             : 
    1206      247423 :     void AddMemoryChunkSafe(MemoryChunk* chunk) {
    1207      489952 :       if (!heap_->IsLargeMemoryChunk(chunk) &&
    1208             :           chunk->executable() != EXECUTABLE) {
    1209      240340 :         AddMemoryChunkSafe<kRegular>(chunk);
    1210             :       } else {
    1211        7083 :         AddMemoryChunkSafe<kNonRegular>(chunk);
    1212             :       }
    1213      247423 :     }
    1214             : 
    1215      226986 :     MemoryChunk* TryGetPooledMemoryChunkSafe() {
    1216             :       // Procedure:
    1217             :       // (1) Try to get a chunk that was declared as pooled and already has
    1218             :       // been uncommitted.
    1219             :       // (2) Try to steal any memory chunk of kPageSize that would've been
    1220             :       // unmapped.
    1221      226986 :       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
    1222      226987 :       if (chunk == nullptr) {
    1223      206749 :         chunk = GetMemoryChunkSafe<kRegular>();
    1224      206749 :         if (chunk != nullptr) {
    1225             :           // For stolen chunks we need to manually free any allocated memory.
    1226        8500 :           chunk->ReleaseAllocatedMemory();
    1227             :         }
    1228             :       }
    1229      226987 :       return chunk;
    1230             :     }
    1231             : 
    1232             :     V8_EXPORT_PRIVATE void FreeQueuedChunks();
    1233             :     void CancelAndWaitForPendingTasks();
    1234             :     void PrepareForMarkCompact();
    1235             :     void EnsureUnmappingCompleted();
    1236             :     V8_EXPORT_PRIVATE void TearDown();
    1237             :     size_t NumberOfCommittedChunks();
    1238             :     int NumberOfChunks();
    1239             :     size_t CommittedBufferedMemory();
    1240             : 
    1241             :    private:
    1242             :     static const int kReservedQueueingSlots = 64;
    1243             :     static const int kMaxUnmapperTasks = 4;
    1244             : 
    1245             :     enum ChunkQueueType {
    1246             :       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
    1247             :                     // can thus be used for stealing.
    1248             :       kNonRegular,  // Large chunks and executable chunks.
    1249             :       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
    1250             :       kNumberOfChunkQueues,
    1251             :     };
    1252             : 
    1253             :     enum class FreeMode {
    1254             :       kUncommitPooled,
    1255             :       kReleasePooled,
    1256             :     };
    1257             : 
    1258             :     template <ChunkQueueType type>
    1259      465355 :     void AddMemoryChunkSafe(MemoryChunk* chunk) {
    1260      465355 :       base::MutexGuard guard(&mutex_);
    1261      465357 :       chunks_[type].push_back(chunk);
    1262      465357 :     }
    1263             : 
    1264             :     template <ChunkQueueType type>
    1265     1882087 :     MemoryChunk* GetMemoryChunkSafe() {
    1266     1882087 :       base::MutexGuard guard(&mutex_);
    1267     1882436 :       if (chunks_[type].empty()) return nullptr;
    1268      465357 :       MemoryChunk* chunk = chunks_[type].back();
    1269             :       chunks_[type].pop_back();
    1270      465357 :       return chunk;
    1271             :     }
    1272             : 
    1273             :     bool MakeRoomForNewTasks();
    1274             : 
    1275             :     template <FreeMode mode>
    1276             :     void PerformFreeMemoryOnQueuedChunks();
    1277             : 
    1278             :     void PerformFreeMemoryOnQueuedNonRegularChunks();
    1279             : 
    1280             :     Heap* const heap_;
    1281             :     MemoryAllocator* const allocator_;
    1282             :     base::Mutex mutex_;
    1283             :     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
    1284             :     CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
    1285             :     base::Semaphore pending_unmapping_tasks_semaphore_;
    1286             :     intptr_t pending_unmapping_tasks_;
    1287             :     std::atomic<intptr_t> active_unmapping_tasks_;
    1288             : 
    1289             :     friend class MemoryAllocator;
    1290             :   };
    1291             : 
    1292             :   enum AllocationMode {
    1293             :     kRegular,
    1294             :     kPooled,
    1295             :   };
    1296             : 
    1297             :   enum FreeMode {
    1298             :     kFull,
    1299             :     kAlreadyPooled,
    1300             :     kPreFreeAndQueue,
    1301             :     kPooledAndQueue,
    1302             :   };
    1303             : 
    1304             :   static intptr_t GetCommitPageSize();
    1305             : 
    1306             :   // Computes the memory area of discardable memory within a given memory area
    1307             :   // [addr, addr+size) and returns the result as base::AddressRegion. If the
    1308             :   // memory is not discardable base::AddressRegion is an empty region.
    1309             :   static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
    1310             :                                                       size_t size);
    1311             : 
    1312             :   MemoryAllocator(Isolate* isolate, size_t max_capacity,
    1313             :                   size_t code_range_size);
    1314             : 
    1315             :   void TearDown();
    1316             : 
    1317             :   // Allocates a Page from the allocator. AllocationMode is used to indicate
    1318             :   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
    1319             :   // should be tried first.
    1320             :   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
    1321             :             typename SpaceType>
    1322             :   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1323             :   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
    1324             : 
    1325             :   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
    1326             :                                Executability executable);
    1327             : 
    1328             :   template <MemoryAllocator::FreeMode mode = kFull>
    1329             :   EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
    1330      197696 :   void Free(MemoryChunk* chunk);
    1331             : 
    1332             :   // Returns allocated spaces in bytes.
    1333             :   size_t Size() { return size_; }
    1334             : 
    1335             :   // Returns allocated executable spaces in bytes.
    1336             :   size_t SizeExecutable() { return size_executable_; }
    1337             : 
    1338             :   // Returns the maximum available bytes of heaps.
    1339             :   size_t Available() {
    1340             :     const size_t size = Size();
    1341         327 :     return capacity_ < size ? 0 : capacity_ - size;
    1342             :   }
    1343             : 
    1344             :   // Returns an indication of whether a pointer is in a space that has
    1345             :   // been allocated by this MemoryAllocator.
    1346             :   V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
    1347     3906836 :     return address < lowest_ever_allocated_ ||
    1348     1953418 :            address >= highest_ever_allocated_;
    1349             :   }
    1350             : 
    1351             :   // Returns a MemoryChunk in which the memory region from commit_area_size to
    1352             :   // reserve_area_size of the chunk area is reserved but not committed, it
    1353             :   // could be committed later by calling MemoryChunk::CommitArea.
    1354             :   MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
    1355             :                              Executability executable, Space* space);
    1356             : 
    1357             :   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
    1358             :                                 size_t alignment, Executability executable,
    1359             :                                 void* hint, VirtualMemory* controller);
    1360             : 
    1361             :   void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
    1362             : 
    1363             :   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
    1364             :   // internally memory is freed from |start_free| to the end of the reservation.
    1365             :   // Additional memory beyond the page is not accounted though, so
    1366             :   // |bytes_to_free| is computed by the caller.
    1367             :   void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
    1368             :                          size_t bytes_to_free, Address new_area_end);
    1369             : 
    1370             :   // Checks if an allocated MemoryChunk was intended to be used for executable
    1371             :   // memory.
    1372             :   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
    1373             :     return executable_memory_.find(chunk) != executable_memory_.end();
    1374             :   }
    1375             : 
    1376             :   // Commit memory region owned by given reservation object.  Returns true if
    1377             :   // it succeeded and false otherwise.
    1378             :   bool CommitMemory(VirtualMemory* reservation);
    1379             : 
    1380             :   // Uncommit memory region owned by given reservation object. Returns true if
    1381             :   // it succeeded and false otherwise.
    1382             :   bool UncommitMemory(VirtualMemory* reservation);
    1383             : 
    1384             :   // Zaps a contiguous block of memory [start..(start+size)[ with
    1385             :   // a given zap value.
    1386             :   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
    1387             : 
    1388             :   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
    1389             :                                                     Address start,
    1390             :                                                     size_t commit_size,
    1391             :                                                     size_t reserved_size);
    1392             : 
    1393             :   // Page allocator instance for allocating non-executable pages.
    1394             :   // Guaranteed to be a valid pointer.
    1395             :   v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
    1396             : 
    1397             :   // Page allocator instance for allocating executable pages.
    1398             :   // Guaranteed to be a valid pointer.
    1399             :   v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
    1400             : 
    1401             :   // Returns page allocator suitable for allocating pages with requested
    1402             :   // executability.
    1403             :   v8::PageAllocator* page_allocator(Executability executable) {
    1404             :     return executable == EXECUTABLE ? code_page_allocator_
    1405      890889 :                                     : data_page_allocator_;
    1406             :   }
    1407             : 
    1408             :   // A region of memory that may contain executable code including reserved
    1409             :   // OS page with read-write access in the beginning.
    1410             :   const base::AddressRegion& code_range() const {
    1411             :     // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
    1412             :     DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
    1413             :     DCHECK_IMPLIES(!code_range_.is_empty(),
    1414             :                    code_range_.contains(code_page_allocator_instance_->begin(),
    1415             :                                         code_page_allocator_instance_->size()));
    1416             :     return code_range_;
    1417             :   }
    1418             : 
    1419             :   Unmapper* unmapper() { return &unmapper_; }
    1420             : 
    1421             :  private:
    1422             :   void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
    1423             :                                    size_t requested);
    1424             : 
    1425             :   // PreFree logically frees the object, i.e., it takes care of the size
    1426             :   // bookkeeping and calls the allocation callback.
    1427             :   void PreFreeMemory(MemoryChunk* chunk);
    1428             : 
    1429             :   // FreeMemory can be called concurrently when PreFree was executed before.
    1430             :   void PerformFreeMemory(MemoryChunk* chunk);
    1431             : 
    1432             :   // See AllocatePage for public interface. Note that currently we only support
    1433             :   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
    1434             :   template <typename SpaceType>
    1435             :   MemoryChunk* AllocatePagePooled(SpaceType* owner);
    1436             : 
    1437             :   // Initializes pages in a chunk. Returns the first page address.
    1438             :   // This function and GetChunkId() are provided for the mark-compact
    1439             :   // collector to rebuild page headers in the from space, which is
    1440             :   // used as a marking stack and its page headers are destroyed.
    1441             :   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
    1442             :                                PagedSpace* owner);
    1443             : 
    1444      728989 :   void UpdateAllocatedSpaceLimits(Address low, Address high) {
    1445             :     // The use of atomic primitives does not guarantee correctness (wrt.
    1446             :     // desired semantics) by default. The loop here ensures that we update the
    1447             :     // values only if they did not change in between.
    1448      728989 :     Address ptr = kNullAddress;
    1449      728989 :     do {
    1450      728989 :       ptr = lowest_ever_allocated_;
    1451      919115 :     } while ((low < ptr) &&
    1452      190126 :              !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
    1453      728989 :     do {
    1454      728989 :       ptr = highest_ever_allocated_;
    1455      859899 :     } while ((high > ptr) &&
    1456      130910 :              !highest_ever_allocated_.compare_exchange_weak(ptr, high));
    1457      728989 :   }
    1458             : 
    1459             :   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
    1460             :     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
    1461             :     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
    1462             :     executable_memory_.insert(chunk);
    1463             :   }
    1464             : 
    1465      134359 :   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
    1466             :     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
    1467             :     executable_memory_.erase(chunk);
    1468      134359 :     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
    1469      134359 :   }
    1470             : 
    1471             :   Isolate* isolate_;
    1472             : 
    1473             :   // This object controls virtual space reserved for V8 heap instance.
    1474             :   // Depending on the configuration it may contain the following:
    1475             :   // - no reservation (on 32-bit architectures)
    1476             :   // - code range reservation used by bounded code page allocator (on 64-bit
    1477             :   //   architectures without pointers compression in V8 heap)
    1478             :   // - data + code range reservation (on 64-bit architectures with pointers
    1479             :   //   compression in V8 heap)
    1480             :   VirtualMemory heap_reservation_;
    1481             : 
    1482             :   // Page allocator used for allocating data pages. Depending on the
    1483             :   // configuration it may be a page allocator instance provided by v8::Platform
    1484             :   // or a BoundedPageAllocator (when pointer compression is enabled).
    1485             :   v8::PageAllocator* data_page_allocator_;
    1486             : 
    1487             :   // Page allocator used for allocating code pages. Depending on the
    1488             :   // configuration it may be a page allocator instance provided by v8::Platform
    1489             :   // or a BoundedPageAllocator (when pointer compression is enabled or
    1490             :   // on those 64-bit architectures where pc-relative 32-bit displacement
    1491             :   // can be used for call and jump instructions).
    1492             :   v8::PageAllocator* code_page_allocator_;
    1493             : 
    1494             :   // A part of the |heap_reservation_| that may contain executable code
    1495             :   // including reserved page with read-write access in the beginning.
    1496             :   // See details below.
    1497             :   base::AddressRegion code_range_;
    1498             : 
    1499             :   // This unique pointer owns the instance of bounded code allocator
    1500             :   // that controls executable pages allocation. It does not control the
    1501             :   // optionally existing page in the beginning of the |code_range_|.
    1502             :   // So, summarizing all above, the following conditions hold:
    1503             :   // 1) |heap_reservation_| >= |code_range_|
    1504             :   // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
    1505             :   // 3) |heap_reservation_| is AllocatePageSize()-aligned
    1506             :   // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
    1507             :   // 5) |code_range_| is CommitPageSize()-aligned
    1508             :   std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
    1509             : 
    1510             :   // Maximum space size in bytes.
    1511             :   size_t capacity_;
    1512             : 
    1513             :   // Allocated space size in bytes.
    1514             :   std::atomic<size_t> size_;
    1515             :   // Allocated executable space size in bytes.
    1516             :   std::atomic<size_t> size_executable_;
    1517             : 
    1518             :   // We keep the lowest and highest addresses allocated as a quick way
    1519             :   // of determining that pointers are outside the heap. The estimate is
    1520             :   // conservative, i.e. not all addresses in 'allocated' space are allocated
    1521             :   // to our heap. The range is [lowest, highest[, inclusive on the low end
    1522             :   // and exclusive on the high end.
    1523             :   std::atomic<Address> lowest_ever_allocated_;
    1524             :   std::atomic<Address> highest_ever_allocated_;
    1525             : 
    1526             :   VirtualMemory last_chunk_;
    1527             :   Unmapper unmapper_;
    1528             : 
    1529             :   // Data structure to remember allocated executable memory chunks.
    1530             :   std::unordered_set<MemoryChunk*> executable_memory_;
    1531             : 
    1532             :   friend class heap::TestCodePageAllocatorScope;
    1533             : 
    1534             :   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
    1535             : };
    1536             : 
    1537             : extern template Page*
    1538             : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
    1539             :     size_t size, PagedSpace* owner, Executability executable);
    1540             : extern template Page*
    1541             : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
    1542             :     size_t size, SemiSpace* owner, Executability executable);
    1543             : extern template Page*
    1544             : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
    1545             :     size_t size, SemiSpace* owner, Executability executable);
    1546             : 
    1547             : // -----------------------------------------------------------------------------
    1548             : // Interface for heap object iterator to be implemented by all object space
    1549             : // object iterators.
    1550             : //
    1551             : // NOTE: The space specific object iterators also implements the own next()
    1552             : //       method which is used to avoid using virtual functions
    1553             : //       iterating a specific space.
    1554             : 
    1555       63271 : class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
    1556             :  public:
    1557       63265 :   virtual ~ObjectIterator() = default;
    1558             :   virtual HeapObject Next() = 0;
    1559             : };
    1560             : 
    1561             : template <class PAGE_TYPE>
    1562             : class PageIteratorImpl
    1563             :     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
    1564             :  public:
    1565      267809 :   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
    1566             :   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
    1567             :   PAGE_TYPE* operator*() { return p_; }
    1568             :   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    1569       84762 :     return rhs.p_ == p_;
    1570             :   }
    1571             :   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
    1572     1078278 :     return rhs.p_ != p_;
    1573             :   }
    1574             :   inline PageIteratorImpl<PAGE_TYPE>& operator++();
    1575             :   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
    1576             : 
    1577             :  private:
    1578             :   PAGE_TYPE* p_;
    1579             : };
    1580             : 
    1581             : typedef PageIteratorImpl<Page> PageIterator;
    1582             : typedef PageIteratorImpl<LargePage> LargePageIterator;
    1583             : 
    1584             : class PageRange {
    1585             :  public:
    1586             :   typedef PageIterator iterator;
    1587       31859 :   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
    1588             :   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
    1589             :   inline PageRange(Address start, Address limit);
    1590             : 
    1591             :   iterator begin() { return iterator(begin_); }
    1592             :   iterator end() { return iterator(end_); }
    1593             : 
    1594             :  private:
    1595             :   Page* begin_;
    1596             :   Page* end_;
    1597             : };
    1598             : 
    1599             : // -----------------------------------------------------------------------------
    1600             : // Heap object iterator in new/old/map spaces.
    1601             : //
    1602             : // A HeapObjectIterator iterates objects from the bottom of the given space
    1603             : // to its top or from the bottom of the given page to its top.
    1604             : //
    1605             : // If objects are allocated in the page during iteration the iterator may
    1606             : // or may not iterate over those objects.  The caller must create a new
    1607             : // iterator in order to be sure to visit these new objects.
    1608       63265 : class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
    1609             :  public:
    1610             :   // Creates a new object iterator in a given space.
    1611             :   explicit HeapObjectIterator(PagedSpace* space);
    1612             :   explicit HeapObjectIterator(Page* page);
    1613             : 
    1614             :   // Advance to the next object, skipping free spaces and other fillers and
    1615             :   // skipping the special garbage section of which there is one per space.
    1616             :   // Returns nullptr when the iteration has ended.
    1617             :   inline HeapObject Next() override;
    1618             : 
    1619             :  private:
    1620             :   // Fast (inlined) path of next().
    1621             :   inline HeapObject FromCurrentPage();
    1622             : 
    1623             :   // Slow path of next(), goes into the next page.  Returns false if the
    1624             :   // iteration has ended.
    1625             :   bool AdvanceToNextPage();
    1626             : 
    1627             :   Address cur_addr_;  // Current iteration point.
    1628             :   Address cur_end_;   // End iteration point.
    1629             :   PagedSpace* space_;
    1630             :   PageRange page_range_;
    1631             :   PageRange::iterator current_page_;
    1632             : };
    1633             : 
    1634             : 
    1635             : // -----------------------------------------------------------------------------
    1636             : // A space has a circular list of pages. The next page can be accessed via
    1637             : // Page::next_page() call.
    1638             : 
    1639             : // An abstraction of allocation and relocation pointers in a page-structured
    1640             : // space.
    1641             : class LinearAllocationArea {
    1642             :  public:
    1643      764108 :   LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
    1644      412116 :   LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
    1645             : 
    1646             :   void Reset(Address top, Address limit) {
    1647             :     set_top(top);
    1648             :     set_limit(limit);
    1649             :   }
    1650             : 
    1651             :   V8_INLINE void set_top(Address top) {
    1652             :     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
    1653   533198017 :     top_ = top;
    1654             :   }
    1655             : 
    1656             :   V8_INLINE Address top() const {
    1657             :     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
    1658             :     return top_;
    1659             :   }
    1660             : 
    1661             :   Address* top_address() { return &top_; }
    1662             : 
    1663     4069098 :   V8_INLINE void set_limit(Address limit) { limit_ = limit; }
    1664             : 
    1665             :   V8_INLINE Address limit() const { return limit_; }
    1666             : 
    1667             :   Address* limit_address() { return &limit_; }
    1668             : 
    1669             : #ifdef DEBUG
    1670             :   bool VerifyPagedAllocation() {
    1671             :     return (Page::FromAllocationAreaAddress(top_) ==
    1672             :             Page::FromAllocationAreaAddress(limit_)) &&
    1673             :            (top_ <= limit_);
    1674             :   }
    1675             : #endif
    1676             : 
    1677             :  private:
    1678             :   // Current allocation top.
    1679             :   Address top_;
    1680             :   // Current allocation limit.
    1681             :   Address limit_;
    1682             : };
    1683             : 
    1684             : 
    1685             : // An abstraction of the accounting statistics of a page-structured space.
    1686             : //
    1687             : // The stats are only set by functions that ensure they stay balanced. These
    1688             : // functions increase or decrease one of the non-capacity stats in conjunction
    1689             : // with capacity, or else they always balance increases and decreases to the
    1690             : // non-capacity stats.
    1691             : class AllocationStats {
    1692             :  public:
    1693             :   AllocationStats() { Clear(); }
    1694             : 
    1695             :   // Zero out all the allocation statistics (i.e., no capacity).
    1696             :   void Clear() {
    1697             :     capacity_ = 0;
    1698     1493643 :     max_capacity_ = 0;
    1699             :     ClearSize();
    1700             :   }
    1701             : 
    1702             :   void ClearSize() {
    1703     1744119 :     size_ = 0;
    1704             : #ifdef DEBUG
    1705             :     allocated_on_page_.clear();
    1706             : #endif
    1707             :   }
    1708             : 
    1709             :   // Accessors for the allocation statistics.
    1710             :   size_t Capacity() { return capacity_; }
    1711             :   size_t MaxCapacity() { return max_capacity_; }
    1712             :   size_t Size() { return size_; }
    1713             : #ifdef DEBUG
    1714             :   size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
    1715             : #endif
    1716             : 
    1717             :   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    1718             :     DCHECK_GE(size_ + bytes, size_);
    1719     2461565 :     size_ += bytes;
    1720             : #ifdef DEBUG
    1721             :     allocated_on_page_[page] += bytes;
    1722             : #endif
    1723             :   }
    1724             : 
    1725             :   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    1726             :     DCHECK_GE(size_, bytes);
    1727     1716412 :     size_ -= bytes;
    1728             : #ifdef DEBUG
    1729             :     DCHECK_GE(allocated_on_page_[page], bytes);
    1730             :     allocated_on_page_[page] -= bytes;
    1731             : #endif
    1732             :   }
    1733             : 
    1734             :   void DecreaseCapacity(size_t bytes) {
    1735             :     DCHECK_GE(capacity_, bytes);
    1736             :     DCHECK_GE(capacity_ - bytes, size_);
    1737             :     capacity_ -= bytes;
    1738             :   }
    1739             : 
    1740      604782 :   void IncreaseCapacity(size_t bytes) {
    1741             :     DCHECK_GE(capacity_ + bytes, capacity_);
    1742             :     capacity_ += bytes;
    1743      604782 :     if (capacity_ > max_capacity_) {
    1744      528891 :       max_capacity_ = capacity_;
    1745             :     }
    1746      604782 :   }
    1747             : 
    1748             :  private:
    1749             :   // |capacity_|: The number of object-area bytes (i.e., not including page
    1750             :   // bookkeeping structures) currently in the space.
    1751             :   // During evacuation capacity of the main spaces is accessed from multiple
    1752             :   // threads to check the old generation hard limit.
    1753             :   std::atomic<size_t> capacity_;
    1754             : 
    1755             :   // |max_capacity_|: The maximum capacity ever observed.
    1756             :   size_t max_capacity_;
    1757             : 
    1758             :   // |size_|: The number of allocated bytes.
    1759             :   size_t size_;
    1760             : 
    1761             : #ifdef DEBUG
    1762             :   std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
    1763             : #endif
    1764             : };
    1765             : 
    1766             : // A free list maintaining free blocks of memory. The free list is organized in
    1767             : // a way to encourage objects allocated around the same time to be near each
    1768             : // other. The normal way to allocate is intended to be by bumping a 'top'
    1769             : // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
    1770             : // find a new space to allocate from. This is done with the free list, which is
    1771             : // divided up into rough categories to cut down on waste. Having finer
    1772             : // categories would scatter allocation more.
    1773             : 
    1774             : // The free list is organized in categories as follows:
    1775             : // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
    1776             : //   allocation, when categories >= small do not have entries anymore.
    1777             : // 11-31 words (tiny): The tiny blocks are only used for allocation, when
    1778             : //   categories >= small do not have entries anymore.
    1779             : // 32-255 words (small): Used for allocating free space between 1-31 words in
    1780             : //   size.
    1781             : // 256-2047 words (medium): Used for allocating free space between 32-255 words
    1782             : //   in size.
    1783             : // 1048-16383 words (large): Used for allocating free space between 256-2047
    1784             : //   words in size.
    1785             : // At least 16384 words (huge): This list is for objects of 2048 words or
    1786             : //   larger. Empty pages are also added to this list.
    1787             : class V8_EXPORT_PRIVATE FreeList {
    1788             :  public:
    1789             :   // This method returns how much memory can be allocated after freeing
    1790             :   // maximum_freed memory.
    1791             :   static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
    1792      524556 :     if (maximum_freed <= kTiniestListMax) {
    1793             :       // Since we are not iterating over all list entries, we cannot guarantee
    1794             :       // that we can find the maximum freed block in that free list.
    1795             :       return 0;
    1796      496903 :     } else if (maximum_freed <= kTinyListMax) {
    1797             :       return kTinyAllocationMax;
    1798      456495 :     } else if (maximum_freed <= kSmallListMax) {
    1799             :       return kSmallAllocationMax;
    1800      401694 :     } else if (maximum_freed <= kMediumListMax) {
    1801             :       return kMediumAllocationMax;
    1802      259396 :     } else if (maximum_freed <= kLargeListMax) {
    1803             :       return kLargeAllocationMax;
    1804             :     }
    1805             :     return maximum_freed;
    1806             :   }
    1807             : 
    1808             :   static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
    1809    23215794 :     if (size_in_bytes <= kTiniestListMax) {
    1810             :       return kTiniest;
    1811    10315037 :     } else if (size_in_bytes <= kTinyListMax) {
    1812             :       return kTiny;
    1813     4870669 :     } else if (size_in_bytes <= kSmallListMax) {
    1814             :       return kSmall;
    1815     1918108 :     } else if (size_in_bytes <= kMediumListMax) {
    1816             :       return kMedium;
    1817     1408125 :     } else if (size_in_bytes <= kLargeListMax) {
    1818             :       return kLarge;
    1819             :     }
    1820             :     return kHuge;
    1821             :   }
    1822             : 
    1823             :   FreeList();
    1824             : 
    1825             :   // Adds a node on the free list. The block of size {size_in_bytes} starting
    1826             :   // at {start} is placed on the free list. The return value is the number of
    1827             :   // bytes that were not added to the free list, because they freed memory block
    1828             :   // was too small. Bookkeeping information will be written to the block, i.e.,
    1829             :   // its contents will be destroyed. The start address should be word aligned,
    1830             :   // and the size should be a non-zero multiple of the word size.
    1831             :   size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
    1832             : 
    1833             :   // Allocates a free space node frome the free list of at least size_in_bytes
    1834             :   // bytes. Returns the actual node size in node_size which can be bigger than
    1835             :   // size_in_bytes. This method returns null if the allocation request cannot be
    1836             :   // handled by the free list.
    1837             :   V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
    1838             :                                            size_t* node_size);
    1839             : 
    1840             :   // Clear the free list.
    1841             :   void Reset();
    1842             : 
    1843      998852 :   void ResetStats() {
    1844             :     wasted_bytes_ = 0;
    1845             :     ForAllFreeListCategories(
    1846      250476 :         [](FreeListCategory* category) { category->ResetStats(); });
    1847      998852 :   }
    1848             : 
    1849             :   // Return the number of bytes available on the free list.
    1850             :   size_t Available() {
    1851             :     size_t available = 0;
    1852     1844303 :     ForAllFreeListCategories([&available](FreeListCategory* category) {
    1853     1844303 :       available += category->available();
    1854             :     });
    1855             :     return available;
    1856             :   }
    1857             : 
    1858             :   bool IsEmpty() {
    1859             :     bool empty = true;
    1860             :     ForAllFreeListCategories([&empty](FreeListCategory* category) {
    1861             :       if (!category->is_empty()) empty = false;
    1862             :     });
    1863             :     return empty;
    1864             :   }
    1865             : 
    1866             :   // Used after booting the VM.
    1867             :   void RepairLists(Heap* heap);
    1868             : 
    1869             :   size_t EvictFreeListItems(Page* page);
    1870             :   bool ContainsPageFreeListItems(Page* page);
    1871             : 
    1872             :   size_t wasted_bytes() { return wasted_bytes_; }
    1873             : 
    1874             :   template <typename Callback>
    1875             :   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
    1876    16650018 :     FreeListCategory* current = categories_[type];
    1877    19678064 :     while (current != nullptr) {
    1878             :       FreeListCategory* next = current->next();
    1879             :       callback(current);
    1880             :       current = next;
    1881             :     }
    1882             :   }
    1883             : 
    1884             :   template <typename Callback>
    1885      313303 :   void ForAllFreeListCategories(Callback callback) {
    1886    16712845 :     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    1887    16650018 :       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
    1888             :     }
    1889      313303 :   }
    1890             : 
    1891             :   bool AddCategory(FreeListCategory* category);
    1892             :   void RemoveCategory(FreeListCategory* category);
    1893             :   void PrintCategories(FreeListCategoryType type);
    1894             : 
    1895             :   // Returns a page containing an entry for a given type, or nullptr otherwise.
    1896             :   inline Page* GetPageForCategoryType(FreeListCategoryType type);
    1897             : 
    1898             : #ifdef DEBUG
    1899             :   size_t SumFreeLists();
    1900             :   bool IsVeryLong();
    1901             : #endif
    1902             : 
    1903             :  private:
    1904             :   class FreeListCategoryIterator {
    1905             :    public:
    1906             :     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
    1907     4552962 :         : current_(free_list->categories_[type]) {}
    1908             : 
    1909             :     bool HasNext() { return current_ != nullptr; }
    1910             : 
    1911             :     FreeListCategory* Next() {
    1912             :       DCHECK(HasNext());
    1913             :       FreeListCategory* tmp = current_;
    1914     1375365 :       current_ = current_->next();
    1915             :       return tmp;
    1916             :     }
    1917             : 
    1918             :    private:
    1919             :     FreeListCategory* current_;
    1920             :   };
    1921             : 
    1922             :   // The size range of blocks, in bytes.
    1923             :   static const size_t kMinBlockSize = 3 * kTaggedSize;
    1924             : 
    1925             :   // This is a conservative upper bound. The actual maximum block size takes
    1926             :   // padding and alignment of data and code pages into account.
    1927             :   static const size_t kMaxBlockSize = Page::kPageSize;
    1928             : 
    1929             :   static const size_t kTiniestListMax = 0xa * kTaggedSize;
    1930             :   static const size_t kTinyListMax = 0x1f * kTaggedSize;
    1931             :   static const size_t kSmallListMax = 0xff * kTaggedSize;
    1932             :   static const size_t kMediumListMax = 0x7ff * kTaggedSize;
    1933             :   static const size_t kLargeListMax = 0x3fff * kTaggedSize;
    1934             :   static const size_t kTinyAllocationMax = kTiniestListMax;
    1935             :   static const size_t kSmallAllocationMax = kTinyListMax;
    1936             :   static const size_t kMediumAllocationMax = kSmallListMax;
    1937             :   static const size_t kLargeAllocationMax = kMediumListMax;
    1938             : 
    1939             :   // Walks all available categories for a given |type| and tries to retrieve
    1940             :   // a node. Returns nullptr if the category is empty.
    1941             :   FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
    1942             :                        size_t* node_size);
    1943             : 
    1944             :   // Tries to retrieve a node from the first category in a given |type|.
    1945             :   // Returns nullptr if the category is empty or the top entry is smaller
    1946             :   // than minimum_size.
    1947             :   FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
    1948             :                           size_t* node_size);
    1949             : 
    1950             :   // Searches a given |type| for a node of at least |minimum_size|.
    1951             :   FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
    1952             :                                 size_t minimum_size);
    1953             : 
    1954             :   // The tiny categories are not used for fast allocation.
    1955             :   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
    1956             :       size_t size_in_bytes) {
    1957     1932204 :     if (size_in_bytes <= kSmallAllocationMax) {
    1958             :       return kSmall;
    1959      627912 :     } else if (size_in_bytes <= kMediumAllocationMax) {
    1960             :       return kMedium;
    1961      500829 :     } else if (size_in_bytes <= kLargeAllocationMax) {
    1962             :       return kLarge;
    1963             :     }
    1964             :     return kHuge;
    1965             :   }
    1966             : 
    1967             :   FreeListCategory* top(FreeListCategoryType type) const {
    1968       58685 :     return categories_[type];
    1969             :   }
    1970             : 
    1971             :   std::atomic<size_t> wasted_bytes_;
    1972             :   FreeListCategory* categories_[kNumberOfCategories];
    1973             : 
    1974             :   friend class FreeListCategory;
    1975             : };
    1976             : 
    1977             : // LocalAllocationBuffer represents a linear allocation area that is created
    1978             : // from a given {AllocationResult} and can be used to allocate memory without
    1979             : // synchronization.
    1980             : //
    1981             : // The buffer is properly closed upon destruction and reassignment.
    1982             : // Example:
    1983             : //   {
    1984             : //     AllocationResult result = ...;
    1985             : //     LocalAllocationBuffer a(heap, result, size);
    1986             : //     LocalAllocationBuffer b = a;
    1987             : //     CHECK(!a.IsValid());
    1988             : //     CHECK(b.IsValid());
    1989             : //     // {a} is invalid now and cannot be used for further allocations.
    1990             : //   }
    1991             : //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
    1992             : //   // filler object for the remaining area.
    1993             : class LocalAllocationBuffer {
    1994             :  public:
    1995             :   // Indicates that a buffer cannot be used for allocations anymore. Can result
    1996             :   // from either reassigning a buffer, or trying to construct it from an
    1997             :   // invalid {AllocationResult}.
    1998             :   static LocalAllocationBuffer InvalidBuffer() {
    1999             :     return LocalAllocationBuffer(
    2000      208994 :         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
    2001             :   }
    2002             : 
    2003             :   // Creates a new LAB from a given {AllocationResult}. Results in
    2004             :   // InvalidBuffer if the result indicates a retry.
    2005             :   static inline LocalAllocationBuffer FromResult(Heap* heap,
    2006             :                                                  AllocationResult result,
    2007             :                                                  intptr_t size);
    2008             : 
    2009      615425 :   ~LocalAllocationBuffer() { Close(); }
    2010             : 
    2011             :   // Convert to C++11 move-semantics once allowed by the style guide.
    2012             :   LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
    2013             :   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
    2014             :       V8_NOEXCEPT;
    2015             : 
    2016             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
    2017             :       int size_in_bytes, AllocationAlignment alignment);
    2018             : 
    2019    91229446 :   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
    2020             : 
    2021             :   // Try to merge LABs, which is only possible when they are adjacent in memory.
    2022             :   // Returns true if the merge was successful, false otherwise.
    2023             :   inline bool TryMerge(LocalAllocationBuffer* other);
    2024             : 
    2025             :   inline bool TryFreeLast(HeapObject object, int object_size);
    2026             : 
    2027             :   // Close a LAB, effectively invalidating it. Returns the unused area.
    2028             :   LinearAllocationArea Close();
    2029             : 
    2030             :  private:
    2031             :   LocalAllocationBuffer(Heap* heap,
    2032             :                         LinearAllocationArea allocation_info) V8_NOEXCEPT;
    2033             : 
    2034             :   Heap* heap_;
    2035             :   LinearAllocationArea allocation_info_;
    2036             : };
    2037             : 
    2038      560714 : class SpaceWithLinearArea : public Space {
    2039             :  public:
    2040             :   SpaceWithLinearArea(Heap* heap, AllocationSpace id)
    2041     1121578 :       : Space(heap, id), top_on_previous_step_(0) {
    2042             :     allocation_info_.Reset(kNullAddress, kNullAddress);
    2043             :   }
    2044             : 
    2045             :   virtual bool SupportsInlineAllocation() = 0;
    2046             : 
    2047             :   // Returns the allocation pointer in this space.
    2048   367775159 :   Address top() { return allocation_info_.top(); }
    2049    70397778 :   Address limit() { return allocation_info_.limit(); }
    2050             : 
    2051             :   // The allocation top address.
    2052             :   Address* allocation_top_address() { return allocation_info_.top_address(); }
    2053             : 
    2054             :   // The allocation limit address.
    2055             :   Address* allocation_limit_address() {
    2056             :     return allocation_info_.limit_address();
    2057             :   }
    2058             : 
    2059             :   V8_EXPORT_PRIVATE void AddAllocationObserver(
    2060             :       AllocationObserver* observer) override;
    2061             :   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
    2062             :       AllocationObserver* observer) override;
    2063             :   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
    2064             :   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
    2065             : 
    2066             :   // When allocation observers are active we may use a lower limit to allow the
    2067             :   // observers to 'interrupt' earlier than the natural limit. Given a linear
    2068             :   // area bounded by [start, end), this function computes the limit to use to
    2069             :   // allow proper observation based on existing observers. min_size specifies
    2070             :   // the minimum size that the limited area should have.
    2071             :   Address ComputeLimit(Address start, Address end, size_t min_size);
    2072             :   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
    2073             :       size_t min_size) = 0;
    2074             : 
    2075             :  protected:
    2076             :   // If we are doing inline allocation in steps, this method performs the 'step'
    2077             :   // operation. top is the memory address of the bump pointer at the last
    2078             :   // inline allocation (i.e. it determines the numbers of bytes actually
    2079             :   // allocated since the last step.) top_for_next_step is the address of the
    2080             :   // bump pointer where the next byte is going to be allocated from. top and
    2081             :   // top_for_next_step may be different when we cross a page boundary or reset
    2082             :   // the space.
    2083             :   // TODO(ofrobots): clarify the precise difference between this and
    2084             :   // Space::AllocationStep.
    2085             :   void InlineAllocationStep(Address top, Address top_for_next_step,
    2086             :                             Address soon_object, size_t size);
    2087             :   V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
    2088             : 
    2089             :   // TODO(ofrobots): make these private after refactoring is complete.
    2090             :   LinearAllocationArea allocation_info_;
    2091             :   Address top_on_previous_step_;
    2092             : };
    2093             : 
    2094             : class V8_EXPORT_PRIVATE PagedSpace
    2095             :     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
    2096             :  public:
    2097             :   typedef PageIterator iterator;
    2098             : 
    2099             :   static const size_t kCompactionMemoryWanted = 500 * KB;
    2100             : 
    2101             :   // Creates a space with an id.
    2102             :   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
    2103             : 
    2104      995680 :   ~PagedSpace() override { TearDown(); }
    2105             : 
    2106             :   // Checks whether an object/address is in this space.
    2107             :   inline bool Contains(Address a);
    2108             :   inline bool Contains(Object o);
    2109             :   bool ContainsSlow(Address addr);
    2110             : 
    2111             :   // Does the space need executable memory?
    2112             :   Executability executable() { return executable_; }
    2113             : 
    2114             :   // Prepares for a mark-compact GC.
    2115             :   void PrepareForMarkCompact();
    2116             : 
    2117             :   // Current capacity without growing (Size() + Available()).
    2118             :   size_t Capacity() { return accounting_stats_.Capacity(); }
    2119             : 
    2120             :   // Approximate amount of physical memory committed for this space.
    2121             :   size_t CommittedPhysicalMemory() override;
    2122             : 
    2123             :   void ResetFreeListStatistics();
    2124             : 
    2125             :   // Sets the capacity, the available space and the wasted space to zero.
    2126             :   // The stats are rebuilt during sweeping by adding each page to the
    2127             :   // capacity and the size when it is encountered.  As free spaces are
    2128             :   // discovered during the sweeping they are subtracted from the size and added
    2129             :   // to the available and wasted totals.
    2130             :   void ClearStats() {
    2131             :     accounting_stats_.ClearSize();
    2132      250476 :     free_list_.ResetStats();
    2133      250476 :     ResetFreeListStatistics();
    2134             :   }
    2135             : 
    2136             :   // Available bytes without growing.  These are the bytes on the free list.
    2137             :   // The bytes in the linear allocation area are not included in this total
    2138             :   // because updating the stats would slow down allocation.  New pages are
    2139             :   // immediately added to the free list so they show up here.
    2140     1929894 :   size_t Available() override { return free_list_.Available(); }
    2141             : 
    2142             :   // Allocated bytes in this space.  Garbage bytes that were not found due to
    2143             :   // concurrent sweeping are counted as being allocated!  The bytes in the
    2144             :   // current linear allocation area (between top and limit) are also counted
    2145             :   // here.
    2146    11293237 :   size_t Size() override { return accounting_stats_.Size(); }
    2147             : 
    2148             :   // As size, but the bytes in lazily swept pages are estimated and the bytes
    2149             :   // in the current linear allocation area are not included.
    2150             :   size_t SizeOfObjects() override;
    2151             : 
    2152             :   // Wasted bytes in this space.  These are just the bytes that were thrown away
    2153             :   // due to being too small to use for allocation.
    2154     1284792 :   virtual size_t Waste() { return free_list_.wasted_bytes(); }
    2155             : 
    2156             :   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
    2157             : 
    2158             :   // Allocate the requested number of bytes in the space if possible, return a
    2159             :   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
    2160             :   // to be manually updated later.
    2161             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
    2162             :       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
    2163             : 
    2164             :   // Allocate the requested number of bytes in the space double aligned if
    2165             :   // possible, return a failure object if not.
    2166             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
    2167             :       int size_in_bytes, AllocationAlignment alignment);
    2168             : 
    2169             :   // Allocate the requested number of bytes in the space and consider allocation
    2170             :   // alignment if needed.
    2171             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
    2172             :       int size_in_bytes, AllocationAlignment alignment);
    2173             : 
    2174    23656546 :   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
    2175    23656546 :     if (size_in_bytes == 0) return 0;
    2176             :     heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
    2177    23264365 :                                  ClearRecordedSlots::kNo);
    2178    23254986 :     if (mode == SpaceAccountingMode::kSpaceAccounted) {
    2179     1542575 :       return AccountedFree(start, size_in_bytes);
    2180             :     } else {
    2181    21709188 :       return UnaccountedFree(start, size_in_bytes);
    2182             :     }
    2183             :   }
    2184             : 
    2185             :   // Give a block of memory to the space's free list.  It might be added to
    2186             :   // the free list or accounted as waste.
    2187             :   // If add_to_freelist is false then just accounting stats are updated and
    2188             :   // no attempt to add area to free list is made.
    2189             :   size_t AccountedFree(Address start, size_t size_in_bytes) {
    2190     1542577 :     size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
    2191             :     Page* page = Page::FromAddress(start);
    2192             :     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
    2193             :     DCHECK_GE(size_in_bytes, wasted);
    2194     1542575 :     return size_in_bytes - wasted;
    2195             :   }
    2196             : 
    2197             :   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
    2198    21712409 :     size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
    2199             :     DCHECK_GE(size_in_bytes, wasted);
    2200    21709188 :     return size_in_bytes - wasted;
    2201             :   }
    2202             : 
    2203             :   inline bool TryFreeLast(HeapObject object, int object_size);
    2204             : 
    2205             :   void ResetFreeList();
    2206             : 
    2207             :   // Empty space linear allocation area, returning unused area to free list.
    2208             :   void FreeLinearAllocationArea();
    2209             : 
    2210             :   void MarkLinearAllocationAreaBlack();
    2211             :   void UnmarkLinearAllocationArea();
    2212             : 
    2213             :   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
    2214             :     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
    2215             :   }
    2216             :   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
    2217             :     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
    2218             :   }
    2219             :   void DecreaseCapacity(size_t bytes) {
    2220             :     accounting_stats_.DecreaseCapacity(bytes);
    2221             :   }
    2222             :   void IncreaseCapacity(size_t bytes) {
    2223      604782 :     accounting_stats_.IncreaseCapacity(bytes);
    2224             :   }
    2225             : 
    2226             :   void RefineAllocatedBytesAfterSweeping(Page* page);
    2227             : 
    2228             :   Page* InitializePage(MemoryChunk* chunk, Executability executable);
    2229             : 
    2230             :   void ReleasePage(Page* page);
    2231             : 
    2232             :   // Adds the page to this space and returns the number of bytes added to the
    2233             :   // free list of the space.
    2234             :   size_t AddPage(Page* page);
    2235             :   void RemovePage(Page* page);
    2236             :   // Remove a page if it has at least |size_in_bytes| bytes available that can
    2237             :   // be used for allocation.
    2238             :   Page* RemovePageSafe(int size_in_bytes);
    2239             : 
    2240             :   void SetReadable();
    2241             :   void SetReadAndExecutable();
    2242             :   void SetReadAndWritable();
    2243             : 
    2244      465720 :   void SetDefaultCodePermissions() {
    2245      465720 :     if (FLAG_jitless) {
    2246           0 :       SetReadable();
    2247             :     } else {
    2248      465720 :       SetReadAndExecutable();
    2249             :     }
    2250      465726 :   }
    2251             : 
    2252             : #ifdef VERIFY_HEAP
    2253             :   // Verify integrity of this space.
    2254             :   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
    2255             : 
    2256             :   void VerifyLiveBytes();
    2257             : 
    2258             :   // Overridden by subclasses to verify space-specific object
    2259             :   // properties (e.g., only maps or free-list nodes are in map space).
    2260             :   virtual void VerifyObject(HeapObject obj) {}
    2261             : #endif
    2262             : 
    2263             : #ifdef DEBUG
    2264             :   void VerifyCountersAfterSweeping();
    2265             :   void VerifyCountersBeforeConcurrentSweeping();
    2266             :   // Print meta info and objects in this space.
    2267             :   void Print() override;
    2268             : 
    2269             :   // Report code object related statistics
    2270             :   static void ReportCodeStatistics(Isolate* isolate);
    2271             :   static void ResetCodeStatistics(Isolate* isolate);
    2272             : #endif
    2273             : 
    2274             :   bool CanExpand(size_t size);
    2275             : 
    2276             :   // Returns the number of total pages in this space.
    2277             :   int CountTotalPages();
    2278             : 
    2279             :   // Return size of allocatable area on a page in this space.
    2280     2817638 :   inline int AreaSize() { return static_cast<int>(area_size_); }
    2281             : 
    2282   150560924 :   virtual bool is_local() { return false; }
    2283             : 
    2284             :   // Merges {other} into the current space. Note that this modifies {other},
    2285             :   // e.g., removes its bump pointer area and resets statistics.
    2286             :   void MergeCompactionSpace(CompactionSpace* other);
    2287             : 
    2288             :   // Refills the free list from the corresponding free list filled by the
    2289             :   // sweeper.
    2290             :   virtual void RefillFreeList();
    2291             : 
    2292             :   FreeList* free_list() { return &free_list_; }
    2293             : 
    2294             :   base::Mutex* mutex() { return &space_mutex_; }
    2295             : 
    2296             :   inline void UnlinkFreeListCategories(Page* page);
    2297             :   inline size_t RelinkFreeListCategories(Page* page);
    2298             : 
    2299             :   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
    2300             : 
    2301             :   iterator begin() { return iterator(first_page()); }
    2302             :   iterator end() { return iterator(nullptr); }
    2303             : 
    2304             :   // Shrink immortal immovable pages of the space to be exactly the size needed
    2305             :   // using the high water mark.
    2306             :   void ShrinkImmortalImmovablePages();
    2307             : 
    2308             :   size_t ShrinkPageToHighWaterMark(Page* page);
    2309             : 
    2310             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2311             : 
    2312             :   void SetLinearAllocationArea(Address top, Address limit);
    2313             : 
    2314             :  private:
    2315             :   // Set space linear allocation area.
    2316             :   void SetTopAndLimit(Address top, Address limit) {
    2317             :     DCHECK(top == limit ||
    2318             :            Page::FromAddress(top) == Page::FromAddress(limit - 1));
    2319     2586707 :     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
    2320             :     allocation_info_.Reset(top, limit);
    2321             :   }
    2322             :   void DecreaseLimit(Address new_limit);
    2323             :   void UpdateInlineAllocationLimit(size_t min_size) override;
    2324    23304807 :   bool SupportsInlineAllocation() override {
    2325    23304807 :     return identity() == OLD_SPACE && !is_local();
    2326             :   }
    2327             : 
    2328             :  protected:
    2329             :   // PagedSpaces that should be included in snapshots have different, i.e.,
    2330             :   // smaller, initial pages.
    2331           0 :   virtual bool snapshotable() { return true; }
    2332             : 
    2333             :   bool HasPages() { return first_page() != nullptr; }
    2334             : 
    2335             :   // Cleans up the space, frees all pages in this space except those belonging
    2336             :   // to the initial chunk, uncommits addresses in the initial chunk.
    2337             :   void TearDown();
    2338             : 
    2339             :   // Expands the space by allocating a fixed number of pages. Returns false if
    2340             :   // it cannot allocate requested number of pages from OS, or if the hard heap
    2341             :   // size limit has been hit.
    2342             :   bool Expand();
    2343             : 
    2344             :   // Sets up a linear allocation area that fits the given number of bytes.
    2345             :   // Returns false if there is not enough space and the caller has to retry
    2346             :   // after collecting garbage.
    2347             :   inline bool EnsureLinearAllocationArea(int size_in_bytes);
    2348             :   // Allocates an object from the linear allocation area. Assumes that the
    2349             :   // linear allocation area is large enought to fit the object.
    2350             :   inline HeapObject AllocateLinearly(int size_in_bytes);
    2351             :   // Tries to allocate an aligned object from the linear allocation area.
    2352             :   // Returns nullptr if the linear allocation area does not fit the object.
    2353             :   // Otherwise, returns the object pointer and writes the allocation size
    2354             :   // (object size + alignment filler size) to the size_in_bytes.
    2355             :   inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
    2356             :                                                AllocationAlignment alignment);
    2357             : 
    2358             :   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
    2359             :       size_t size_in_bytes);
    2360             : 
    2361             :   // If sweeping is still in progress try to sweep unswept pages. If that is
    2362             :   // not successful, wait for the sweeper threads and retry free-list
    2363             :   // allocation. Returns false if there is not enough space and the caller
    2364             :   // has to retry after collecting garbage.
    2365             :   V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
    2366             : 
    2367             :   // Slow path of AllocateRaw. This function is space-dependent. Returns false
    2368             :   // if there is not enough space and the caller has to retry after
    2369             :   // collecting garbage.
    2370             :   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
    2371             :       int size_in_bytes);
    2372             : 
    2373             :   // Implementation of SlowAllocateRaw. Returns false if there is not enough
    2374             :   // space and the caller has to retry after collecting garbage.
    2375             :   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
    2376             :       int size_in_bytes);
    2377             : 
    2378             :   Executability executable_;
    2379             : 
    2380             :   size_t area_size_;
    2381             : 
    2382             :   // Accounting information for this space.
    2383             :   AllocationStats accounting_stats_;
    2384             : 
    2385             :   // The space's free list.
    2386             :   FreeList free_list_;
    2387             : 
    2388             :   // Mutex guarding any concurrent access to the space.
    2389             :   base::Mutex space_mutex_;
    2390             : 
    2391             :   friend class IncrementalMarking;
    2392             :   friend class MarkCompactCollector;
    2393             : 
    2394             :   // Used in cctest.
    2395             :   friend class heap::HeapTester;
    2396             : };
    2397             : 
    2398             : enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
    2399             : 
    2400             : // -----------------------------------------------------------------------------
    2401             : // SemiSpace in young generation
    2402             : //
    2403             : // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
    2404             : // The mark-compact collector  uses the memory of the first page in the from
    2405             : // space as a marking stack when tracing live objects.
    2406       62872 : class SemiSpace : public Space {
    2407             :  public:
    2408             :   typedef PageIterator iterator;
    2409             : 
    2410             :   static void Swap(SemiSpace* from, SemiSpace* to);
    2411             : 
    2412             :   SemiSpace(Heap* heap, SemiSpaceId semispace)
    2413             :       : Space(heap, NEW_SPACE),
    2414             :         current_capacity_(0),
    2415             :         maximum_capacity_(0),
    2416             :         minimum_capacity_(0),
    2417             :         age_mark_(kNullAddress),
    2418             :         committed_(false),
    2419             :         id_(semispace),
    2420             :         current_page_(nullptr),
    2421       62888 :         pages_used_(0) {}
    2422             : 
    2423             :   inline bool Contains(HeapObject o);
    2424             :   inline bool Contains(Object o);
    2425             :   inline bool ContainsSlow(Address a);
    2426             : 
    2427             :   void SetUp(size_t initial_capacity, size_t maximum_capacity);
    2428             :   void TearDown();
    2429             : 
    2430             :   bool Commit();
    2431             :   bool Uncommit();
    2432             :   bool is_committed() { return committed_; }
    2433             : 
    2434             :   // Grow the semispace to the new capacity.  The new capacity requested must
    2435             :   // be larger than the current capacity and less than the maximum capacity.
    2436             :   bool GrowTo(size_t new_capacity);
    2437             : 
    2438             :   // Shrinks the semispace to the new capacity.  The new capacity requested
    2439             :   // must be more than the amount of used memory in the semispace and less
    2440             :   // than the current capacity.
    2441             :   bool ShrinkTo(size_t new_capacity);
    2442             : 
    2443             :   bool EnsureCurrentCapacity();
    2444             : 
    2445             :   Address space_end() { return memory_chunk_list_.back()->area_end(); }
    2446             : 
    2447             :   // Returns the start address of the first page of the space.
    2448             :   Address space_start() {
    2449             :     DCHECK_NE(memory_chunk_list_.front(), nullptr);
    2450      281923 :     return memory_chunk_list_.front()->area_start();
    2451             :   }
    2452             : 
    2453             :   Page* current_page() { return current_page_; }
    2454             :   int pages_used() { return pages_used_; }
    2455             : 
    2456             :   // Returns the start address of the current page of the space.
    2457     1403669 :   Address page_low() { return current_page_->area_start(); }
    2458             : 
    2459             :   // Returns one past the end address of the current page of the space.
    2460     1336948 :   Address page_high() { return current_page_->area_end(); }
    2461             : 
    2462       82206 :   bool AdvancePage() {
    2463       82206 :     Page* next_page = current_page_->next_page();
    2464             :     // We cannot expand if we reached the maximum number of pages already. Note
    2465             :     // that we need to account for the next page already for this check as we
    2466             :     // could potentially fill the whole page after advancing.
    2467      164412 :     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
    2468       82206 :     if (next_page == nullptr || reached_max_pages) {
    2469             :       return false;
    2470             :     }
    2471       61932 :     current_page_ = next_page;
    2472       61932 :     pages_used_++;
    2473             :     return true;
    2474             :   }
    2475             : 
    2476             :   // Resets the space to using the first page.
    2477             :   void Reset();
    2478             : 
    2479             :   void RemovePage(Page* page);
    2480             :   void PrependPage(Page* page);
    2481             : 
    2482             :   Page* InitializePage(MemoryChunk* chunk, Executability executable);
    2483             : 
    2484             :   // Age mark accessors.
    2485             :   Address age_mark() { return age_mark_; }
    2486             :   void set_age_mark(Address mark);
    2487             : 
    2488             :   // Returns the current capacity of the semispace.
    2489             :   size_t current_capacity() { return current_capacity_; }
    2490             : 
    2491             :   // Returns the maximum capacity of the semispace.
    2492             :   size_t maximum_capacity() { return maximum_capacity_; }
    2493             : 
    2494             :   // Returns the initial capacity of the semispace.
    2495             :   size_t minimum_capacity() { return minimum_capacity_; }
    2496             : 
    2497             :   SemiSpaceId id() { return id_; }
    2498             : 
    2499             :   // Approximate amount of physical memory committed for this space.
    2500             :   size_t CommittedPhysicalMemory() override;
    2501             : 
    2502             :   // If we don't have these here then SemiSpace will be abstract.  However
    2503             :   // they should never be called:
    2504             : 
    2505           0 :   size_t Size() override {
    2506           0 :     UNREACHABLE();
    2507             :   }
    2508             : 
    2509           0 :   size_t SizeOfObjects() override { return Size(); }
    2510             : 
    2511           0 :   size_t Available() override {
    2512           0 :     UNREACHABLE();
    2513             :   }
    2514             : 
    2515             :   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
    2516             :   Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
    2517             : 
    2518             :   iterator begin() { return iterator(first_page()); }
    2519             :   iterator end() { return iterator(nullptr); }
    2520             : 
    2521             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2522             : 
    2523             : #ifdef DEBUG
    2524             :   void Print() override;
    2525             :   // Validate a range of of addresses in a SemiSpace.
    2526             :   // The "from" address must be on a page prior to the "to" address,
    2527             :   // in the linked page order, or it must be earlier on the same page.
    2528             :   static void AssertValidRange(Address from, Address to);
    2529             : #else
    2530             :   // Do nothing.
    2531             :   inline static void AssertValidRange(Address from, Address to) {}
    2532             : #endif
    2533             : 
    2534             : #ifdef VERIFY_HEAP
    2535             :   virtual void Verify();
    2536             : #endif
    2537             : 
    2538             :  private:
    2539             :   void RewindPages(int num_pages);
    2540             : 
    2541             :   inline int max_pages() {
    2542       82206 :     return static_cast<int>(current_capacity_ / Page::kPageSize);
    2543             :   }
    2544             : 
    2545             :   // Copies the flags into the masked positions on all pages in the space.
    2546             :   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
    2547             : 
    2548             :   // The currently committed space capacity.
    2549             :   size_t current_capacity_;
    2550             : 
    2551             :   // The maximum capacity that can be used by this space. A space cannot grow
    2552             :   // beyond that size.
    2553             :   size_t maximum_capacity_;
    2554             : 
    2555             :   // The minimum capacity for the space. A space cannot shrink below this size.
    2556             :   size_t minimum_capacity_;
    2557             : 
    2558             :   // Used to govern object promotion during mark-compact collection.
    2559             :   Address age_mark_;
    2560             : 
    2561             :   bool committed_;
    2562             :   SemiSpaceId id_;
    2563             : 
    2564             :   Page* current_page_;
    2565             : 
    2566             :   int pages_used_;
    2567             : 
    2568             :   friend class NewSpace;
    2569             :   friend class SemiSpaceIterator;
    2570             : };
    2571             : 
    2572             : 
    2573             : // A SemiSpaceIterator is an ObjectIterator that iterates over the active
    2574             : // semispace of the heap's new space.  It iterates over the objects in the
    2575             : // semispace from a given start address (defaulting to the bottom of the
    2576             : // semispace) to the top of the semispace.  New objects allocated after the
    2577             : // iterator is created are not iterated.
    2578       15706 : class SemiSpaceIterator : public ObjectIterator {
    2579             :  public:
    2580             :   // Create an iterator over the allocated objects in the given to-space.
    2581             :   explicit SemiSpaceIterator(NewSpace* space);
    2582             : 
    2583             :   inline HeapObject Next() override;
    2584             : 
    2585             :  private:
    2586             :   void Initialize(Address start, Address end);
    2587             : 
    2588             :   // The current iteration point.
    2589             :   Address current_;
    2590             :   // The end of iteration.
    2591             :   Address limit_;
    2592             : };
    2593             : 
    2594             : // -----------------------------------------------------------------------------
    2595             : // The young generation space.
    2596             : //
    2597             : // The new space consists of a contiguous pair of semispaces.  It simply
    2598             : // forwards most functions to the appropriate semispace.
    2599             : 
    2600             : class NewSpace : public SpaceWithLinearArea {
    2601             :  public:
    2602             :   typedef PageIterator iterator;
    2603             : 
    2604             :   NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
    2605             :            size_t initial_semispace_capacity, size_t max_semispace_capacity);
    2606             : 
    2607      314355 :   ~NewSpace() override { TearDown(); }
    2608             : 
    2609             :   inline bool ContainsSlow(Address a);
    2610             :   inline bool Contains(Object o);
    2611             :   inline bool Contains(HeapObject o);
    2612             : 
    2613             :   // Tears down the space.  Heap memory was not allocated by the space, so it
    2614             :   // is not deallocated here.
    2615             :   void TearDown();
    2616             : 
    2617             :   // Flip the pair of spaces.
    2618             :   void Flip();
    2619             : 
    2620             :   // Grow the capacity of the semispaces.  Assumes that they are not at
    2621             :   // their maximum capacity.
    2622             :   void Grow();
    2623             : 
    2624             :   // Shrink the capacity of the semispaces.
    2625             :   void Shrink();
    2626             : 
    2627             :   // Return the allocated bytes in the active semispace.
    2628     1171763 :   size_t Size() override {
    2629             :     DCHECK_GE(top(), to_space_.page_low());
    2630     2343526 :     return to_space_.pages_used() *
    2631           0 :                MemoryChunkLayout::AllocatableMemoryInDataPage() +
    2632     1171763 :            static_cast<size_t>(top() - to_space_.page_low());
    2633             :   }
    2634             : 
    2635      819574 :   size_t SizeOfObjects() override { return Size(); }
    2636             : 
    2637             :   // Return the allocatable capacity of a semispace.
    2638             :   size_t Capacity() {
    2639             :     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    2640      495876 :     return (to_space_.current_capacity() / Page::kPageSize) *
    2641      495876 :            MemoryChunkLayout::AllocatableMemoryInDataPage();
    2642             :   }
    2643             : 
    2644             :   // Return the current size of a semispace, allocatable and non-allocatable
    2645             :   // memory.
    2646             :   size_t TotalCapacity() {
    2647             :     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
    2648      396873 :     return to_space_.current_capacity();
    2649             :   }
    2650             : 
    2651             :   // Committed memory for NewSpace is the committed memory of both semi-spaces
    2652             :   // combined.
    2653      717619 :   size_t CommittedMemory() override {
    2654      717619 :     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
    2655             :   }
    2656             : 
    2657           0 :   size_t MaximumCommittedMemory() override {
    2658             :     return from_space_.MaximumCommittedMemory() +
    2659           0 :            to_space_.MaximumCommittedMemory();
    2660             :   }
    2661             : 
    2662             :   // Approximate amount of physical memory committed for this space.
    2663             :   size_t CommittedPhysicalMemory() override;
    2664             : 
    2665             :   // Return the available bytes without growing.
    2666      107413 :   size_t Available() override {
    2667             :     DCHECK_GE(Capacity(), Size());
    2668      107413 :     return Capacity() - Size();
    2669             :   }
    2670             : 
    2671          30 :   size_t ExternalBackingStoreBytes(
    2672             :       ExternalBackingStoreType type) const override {
    2673             :     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
    2674          30 :     return to_space_.ExternalBackingStoreBytes(type);
    2675             :   }
    2676             : 
    2677      214753 :   size_t AllocatedSinceLastGC() {
    2678      214753 :     const Address age_mark = to_space_.age_mark();
    2679             :     DCHECK_NE(age_mark, kNullAddress);
    2680             :     DCHECK_NE(top(), kNullAddress);
    2681             :     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
    2682             :     Page* const last_page = Page::FromAllocationAreaAddress(top());
    2683             :     Page* current_page = age_mark_page;
    2684             :     size_t allocated = 0;
    2685      214753 :     if (current_page != last_page) {
    2686             :       DCHECK_EQ(current_page, age_mark_page);
    2687             :       DCHECK_GE(age_mark_page->area_end(), age_mark);
    2688       49640 :       allocated += age_mark_page->area_end() - age_mark;
    2689             :       current_page = current_page->next_page();
    2690             :     } else {
    2691             :       DCHECK_GE(top(), age_mark);
    2692      165113 :       return top() - age_mark;
    2693             :     }
    2694      144169 :     while (current_page != last_page) {
    2695             :       DCHECK_NE(current_page, age_mark_page);
    2696       44889 :       allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
    2697             :       current_page = current_page->next_page();
    2698             :     }
    2699             :     DCHECK_GE(top(), current_page->area_start());
    2700       49640 :     allocated += top() - current_page->area_start();
    2701             :     DCHECK_LE(allocated, Size());
    2702       49640 :     return allocated;
    2703             :   }
    2704             : 
    2705             :   void MovePageFromSpaceToSpace(Page* page) {
    2706             :     DCHECK(page->InFromSpace());
    2707        1465 :     from_space_.RemovePage(page);
    2708        1465 :     to_space_.PrependPage(page);
    2709             :   }
    2710             : 
    2711             :   bool Rebalance();
    2712             : 
    2713             :   // Return the maximum capacity of a semispace.
    2714             :   size_t MaximumCapacity() {
    2715             :     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
    2716      323316 :     return to_space_.maximum_capacity();
    2717             :   }
    2718             : 
    2719             :   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
    2720             : 
    2721             :   // Returns the initial capacity of a semispace.
    2722             :   size_t InitialTotalCapacity() {
    2723             :     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
    2724       26747 :     return to_space_.minimum_capacity();
    2725             :   }
    2726             : 
    2727     1294881 :   void ResetOriginalTop() {
    2728             :     DCHECK_GE(top(), original_top_);
    2729             :     DCHECK_LE(top(), original_limit_);
    2730             :     original_top_.store(top(), std::memory_order_release);
    2731     1294881 :   }
    2732             : 
    2733             :   Address original_top_acquire() {
    2734             :     return original_top_.load(std::memory_order_acquire);
    2735             :   }
    2736             :   Address original_limit_relaxed() {
    2737             :     return original_limit_.load(std::memory_order_relaxed);
    2738             :   }
    2739             : 
    2740             :   // Return the address of the first allocatable address in the active
    2741             :   // semispace. This may be the address where the first object resides.
    2742             :   Address first_allocatable_address() { return to_space_.space_start(); }
    2743             : 
    2744             :   // Get the age mark of the inactive semispace.
    2745   152340062 :   Address age_mark() { return from_space_.age_mark(); }
    2746             :   // Set the age mark in the active semispace.
    2747      107086 :   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
    2748             : 
    2749             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2750             :   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
    2751             : 
    2752             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2753             :   AllocateRawUnaligned(int size_in_bytes);
    2754             : 
    2755             :   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
    2756             :   AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
    2757             : 
    2758             :   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
    2759             :       int size_in_bytes, AllocationAlignment alignment);
    2760             : 
    2761             :   // Reset the allocation pointer to the beginning of the active semispace.
    2762             :   void ResetLinearAllocationArea();
    2763             : 
    2764             :   // When inline allocation stepping is active, either because of incremental
    2765             :   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
    2766             :   // inline allocation every once in a while. This is done by setting
    2767             :   // allocation_info_.limit to be lower than the actual limit and and increasing
    2768             :   // it in steps to guarantee that the observers are notified periodically.
    2769             :   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
    2770             : 
    2771             :   inline bool ToSpaceContainsSlow(Address a);
    2772             :   inline bool ToSpaceContains(Object o);
    2773             :   inline bool FromSpaceContains(Object o);
    2774             : 
    2775             :   // Try to switch the active semispace to a new, empty, page.
    2776             :   // Returns false if this isn't possible or reasonable (i.e., there
    2777             :   // are no pages, or the current page is already empty), or true
    2778             :   // if successful.
    2779             :   bool AddFreshPage();
    2780             :   bool AddFreshPageSynchronized();
    2781             : 
    2782             : #ifdef VERIFY_HEAP
    2783             :   // Verify the active semispace.
    2784             :   virtual void Verify(Isolate* isolate);
    2785             : #endif
    2786             : 
    2787             : #ifdef DEBUG
    2788             :   // Print the active semispace.
    2789             :   void Print() override { to_space_.Print(); }
    2790             : #endif
    2791             : 
    2792             :   // Return whether the operation succeeded.
    2793             :   bool CommitFromSpaceIfNeeded() {
    2794      107086 :     if (from_space_.is_committed()) return true;
    2795       40535 :     return from_space_.Commit();
    2796             :   }
    2797             : 
    2798             :   bool UncommitFromSpace() {
    2799       26722 :     if (!from_space_.is_committed()) return true;
    2800       25357 :     return from_space_.Uncommit();
    2801             :   }
    2802             : 
    2803           0 :   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
    2804             : 
    2805             :   SemiSpace* active_space() { return &to_space_; }
    2806             : 
    2807             :   Page* first_page() { return to_space_.first_page(); }
    2808             :   Page* last_page() { return to_space_.last_page(); }
    2809             : 
    2810             :   iterator begin() { return to_space_.begin(); }
    2811             :   iterator end() { return to_space_.end(); }
    2812             : 
    2813             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    2814             : 
    2815             :   SemiSpace& from_space() { return from_space_; }
    2816             :   SemiSpace& to_space() { return to_space_; }
    2817             : 
    2818             :  private:
    2819             :   // Update linear allocation area to match the current to-space page.
    2820             :   void UpdateLinearAllocationArea();
    2821             : 
    2822             :   base::Mutex mutex_;
    2823             : 
    2824             :   // The top and the limit at the time of setting the linear allocation area.
    2825             :   // These values can be accessed by background tasks.
    2826             :   std::atomic<Address> original_top_;
    2827             :   std::atomic<Address> original_limit_;
    2828             : 
    2829             :   // The semispaces.
    2830             :   SemiSpace to_space_;
    2831             :   SemiSpace from_space_;
    2832             :   VirtualMemory reservation_;
    2833             : 
    2834             :   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
    2835      546225 :   bool SupportsInlineAllocation() override { return true; }
    2836             : 
    2837             :   friend class SemiSpaceIterator;
    2838             : };
    2839             : 
    2840             : class PauseAllocationObserversScope {
    2841             :  public:
    2842             :   explicit PauseAllocationObserversScope(Heap* heap);
    2843             :   ~PauseAllocationObserversScope();
    2844             : 
    2845             :  private:
    2846             :   Heap* heap_;
    2847             :   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
    2848             : };
    2849             : 
    2850             : // -----------------------------------------------------------------------------
    2851             : // Compaction space that is used temporarily during compaction.
    2852             : 
    2853      123181 : class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
    2854             :  public:
    2855             :   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
    2856      123180 :       : PagedSpace(heap, id, executable) {}
    2857             : 
    2858    96888872 :   bool is_local() override { return true; }
    2859             : 
    2860             :  protected:
    2861             :   // The space is temporary and not included in any snapshots.
    2862           0 :   bool snapshotable() override { return false; }
    2863             : 
    2864             :   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
    2865             :       int size_in_bytes) override;
    2866             : 
    2867             :   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
    2868             :       int size_in_bytes) override;
    2869             : };
    2870             : 
    2871             : 
    2872             : // A collection of |CompactionSpace|s used by a single compaction task.
    2873             : class CompactionSpaceCollection : public Malloced {
    2874             :  public:
    2875      123179 :   explicit CompactionSpaceCollection(Heap* heap)
    2876             :       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
    2877      123179 :         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
    2878             : 
    2879    97392482 :   CompactionSpace* Get(AllocationSpace space) {
    2880    97392482 :     switch (space) {
    2881             :       case OLD_SPACE:
    2882    97267720 :         return &old_space_;
    2883             :       case CODE_SPACE:
    2884      124762 :         return &code_space_;
    2885             :       default:
    2886           0 :         UNREACHABLE();
    2887             :     }
    2888             :     UNREACHABLE();
    2889             :   }
    2890             : 
    2891             :  private:
    2892             :   CompactionSpace old_space_;
    2893             :   CompactionSpace code_space_;
    2894             : };
    2895             : 
    2896             : // -----------------------------------------------------------------------------
    2897             : // Old generation regular object space.
    2898             : 
    2899      125750 : class OldSpace : public PagedSpace {
    2900             :  public:
    2901             :   // Creates an old space object. The constructor does not allocate pages
    2902             :   // from OS.
    2903       62893 :   explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
    2904             : 
    2905             :   static bool IsAtPageStart(Address addr) {
    2906             :     return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
    2907             :            MemoryChunkLayout::ObjectStartOffsetInDataPage();
    2908             :   }
    2909             : };
    2910             : 
    2911             : // -----------------------------------------------------------------------------
    2912             : // Old generation code object space.
    2913             : 
    2914      125736 : class CodeSpace : public PagedSpace {
    2915             :  public:
    2916             :   // Creates an old space object. The constructor does not allocate pages
    2917             :   // from OS.
    2918       62883 :   explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
    2919             : };
    2920             : 
    2921             : // For contiguous spaces, top should be in the space (or at the end) and limit
    2922             : // should be the end of the space.
    2923             : #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
    2924             :   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
    2925             :               (info).top() <= (space).page_high() &&  \
    2926             :               (info).limit() <= (space).page_high())
    2927             : 
    2928             : 
    2929             : // -----------------------------------------------------------------------------
    2930             : // Old space for all map objects
    2931             : 
    2932      125736 : class MapSpace : public PagedSpace {
    2933             :  public:
    2934             :   // Creates a map space object.
    2935       62883 :   explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
    2936             : 
    2937           0 :   int RoundSizeDownToObjectAlignment(int size) override {
    2938             :     if (base::bits::IsPowerOfTwo(Map::kSize)) {
    2939             :       return RoundDown(size, Map::kSize);
    2940             :     } else {
    2941           0 :       return (size / Map::kSize) * Map::kSize;
    2942             :     }
    2943             :   }
    2944             : 
    2945             : #ifdef VERIFY_HEAP
    2946             :   void VerifyObject(HeapObject obj) override;
    2947             : #endif
    2948             : };
    2949             : 
    2950             : // -----------------------------------------------------------------------------
    2951             : // Read Only space for all Immortal Immovable and Immutable objects
    2952             : 
    2953      125735 : class ReadOnlySpace : public PagedSpace {
    2954             :  public:
    2955             :   class WritableScope {
    2956             :    public:
    2957             :     explicit WritableScope(ReadOnlySpace* space) : space_(space) {
    2958             :       space_->MarkAsReadWrite();
    2959             :     }
    2960             : 
    2961         441 :     ~WritableScope() { space_->MarkAsReadOnly(); }
    2962             : 
    2963             :    private:
    2964             :     ReadOnlySpace* space_;
    2965             :   };
    2966             : 
    2967             :   explicit ReadOnlySpace(Heap* heap);
    2968             : 
    2969             :   bool writable() const { return !is_marked_read_only_; }
    2970             : 
    2971             :   void ClearStringPaddingIfNeeded();
    2972             :   void MarkAsReadOnly();
    2973             : 
    2974             :   // During boot the free_space_map is created, and afterwards we may need
    2975             :   // to write it into the free list nodes that were already created.
    2976             :   void RepairFreeListsAfterDeserialization();
    2977             : 
    2978             :  private:
    2979             :   void MarkAsReadWrite();
    2980             :   void SetPermissionsForPages(PageAllocator::Permission access);
    2981             : 
    2982             :   bool is_marked_read_only_ = false;
    2983             :   //
    2984             :   // String padding must be cleared just before serialization and therefore the
    2985             :   // string padding in the space will already have been cleared if the space was
    2986             :   // deserialized.
    2987             :   bool is_string_padding_cleared_;
    2988             : };
    2989             : 
    2990             : // -----------------------------------------------------------------------------
    2991             : // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
    2992             : // managed by the large object space.
    2993             : // Large objects do not move during garbage collections.
    2994             : 
    2995             : class LargeObjectSpace : public Space {
    2996             :  public:
    2997             :   typedef LargePageIterator iterator;
    2998             : 
    2999             :   explicit LargeObjectSpace(Heap* heap);
    3000             :   LargeObjectSpace(Heap* heap, AllocationSpace id);
    3001             : 
    3002      502944 :   ~LargeObjectSpace() override { TearDown(); }
    3003             : 
    3004             :   // Releases internal resources, frees objects in this space.
    3005             :   void TearDown();
    3006             : 
    3007             :   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
    3008             :   AllocateRaw(int object_size);
    3009             : 
    3010             :   // Available bytes for objects in this space.
    3011             :   size_t Available() override;
    3012             : 
    3013     1555104 :   size_t Size() override { return size_; }
    3014     9655289 :   size_t SizeOfObjects() override { return objects_size_; }
    3015             : 
    3016             :   // Approximate amount of physical memory committed for this space.
    3017             :   size_t CommittedPhysicalMemory() override;
    3018             : 
    3019             :   int PageCount() { return page_count_; }
    3020             : 
    3021             :   // Finds an object for a given address, returns a Smi if it is not found.
    3022             :   // The function iterates through all objects in this space, may be slow.
    3023             :   Object FindObject(Address a);
    3024             : 
    3025             :   // Finds a large object page containing the given address, returns nullptr
    3026             :   // if such a page doesn't exist.
    3027             :   LargePage* FindPage(Address a);
    3028             : 
    3029             :   // Clears the marking state of live objects.
    3030             :   void ClearMarkingStateOfLiveObjects();
    3031             : 
    3032             :   // Frees unmarked objects.
    3033             :   void FreeUnmarkedObjects();
    3034             : 
    3035             :   void InsertChunkMapEntries(LargePage* page);
    3036             :   void RemoveChunkMapEntries(LargePage* page);
    3037             :   void RemoveChunkMapEntries(LargePage* page, Address free_start);
    3038             : 
    3039             :   void PromoteNewLargeObject(LargePage* page);
    3040             : 
    3041             :   // Checks whether a heap object is in this space; O(1).
    3042             :   bool Contains(HeapObject obj);
    3043             :   // Checks whether an address is in the object area in this space. Iterates
    3044             :   // all objects in the space. May be slow.
    3045           0 :   bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
    3046             : 
    3047             :   // Checks whether the space is empty.
    3048           5 :   bool IsEmpty() { return first_page() == nullptr; }
    3049             : 
    3050             :   void Register(LargePage* page, size_t object_size);
    3051             :   void Unregister(LargePage* page, size_t object_size);
    3052             : 
    3053             :   LargePage* first_page() {
    3054             :     return reinterpret_cast<LargePage*>(Space::first_page());
    3055             :   }
    3056             : 
    3057             :   // Collect code statistics.
    3058             :   void CollectCodeStatistics();
    3059             : 
    3060             :   iterator begin() { return iterator(first_page()); }
    3061             :   iterator end() { return iterator(nullptr); }
    3062             : 
    3063             :   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
    3064             : 
    3065             :   base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
    3066             : 
    3067             : #ifdef VERIFY_HEAP
    3068             :   virtual void Verify(Isolate* isolate);
    3069             : #endif
    3070             : 
    3071             : #ifdef DEBUG
    3072             :   void Print() override;
    3073             : #endif
    3074             : 
    3075             :  protected:
    3076             :   LargePage* AllocateLargePage(int object_size, Executability executable);
    3077             :   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
    3078             :                                                      Executability executable);
    3079             : 
    3080             :   size_t size_;          // allocated bytes
    3081             :   int page_count_;       // number of chunks
    3082             :   size_t objects_size_;  // size of objects
    3083             : 
    3084             :  private:
    3085             :   // The chunk_map_mutex_ has to be used when the chunk map is accessed
    3086             :   // concurrently.
    3087             :   base::Mutex chunk_map_mutex_;
    3088             : 
    3089             :   // Page-aligned addresses to their corresponding LargePage.
    3090             :   std::unordered_map<Address, LargePage*> chunk_map_;
    3091             : 
    3092             :   friend class LargeObjectIterator;
    3093             : };
    3094             : 
    3095      125736 : class NewLargeObjectSpace : public LargeObjectSpace {
    3096             :  public:
    3097             :   explicit NewLargeObjectSpace(Heap* heap);
    3098             : 
    3099             :   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
    3100             : 
    3101             :   // Available bytes for objects in this space.
    3102             :   size_t Available() override;
    3103             : 
    3104             :   void Flip();
    3105             : 
    3106             :   void FreeAllObjects();
    3107             : };
    3108             : 
    3109      125736 : class CodeLargeObjectSpace : public LargeObjectSpace {
    3110             :  public:
    3111             :   explicit CodeLargeObjectSpace(Heap* heap);
    3112             : 
    3113             :   V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
    3114             :   AllocateRaw(int object_size);
    3115             : };
    3116             : 
    3117       47118 : class LargeObjectIterator : public ObjectIterator {
    3118             :  public:
    3119             :   explicit LargeObjectIterator(LargeObjectSpace* space);
    3120             : 
    3121             :   HeapObject Next() override;
    3122             : 
    3123             :  private:
    3124             :   LargePage* current_;
    3125             : };
    3126             : 
    3127             : // Iterates over the chunks (pages and large object pages) that can contain
    3128             : // pointers to new space or to evacuation candidates.
    3129             : class OldGenerationMemoryChunkIterator {
    3130             :  public:
    3131             :   inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
    3132             : 
    3133             :   // Return nullptr when the iterator is done.
    3134             :   inline MemoryChunk* next();
    3135             : 
    3136             :  private:
    3137             :   enum State {
    3138             :     kOldSpaceState,
    3139             :     kMapState,
    3140             :     kCodeState,
    3141             :     kLargeObjectState,
    3142             :     kCodeLargeObjectState,
    3143             :     kFinishedState
    3144             :   };
    3145             :   Heap* heap_;
    3146             :   State state_;
    3147             :   PageIterator old_iterator_;
    3148             :   PageIterator code_iterator_;
    3149             :   PageIterator map_iterator_;
    3150             :   LargePageIterator lo_iterator_;
    3151             :   LargePageIterator code_lo_iterator_;
    3152             : };
    3153             : 
    3154             : }  // namespace internal
    3155             : }  // namespace v8
    3156             : 
    3157             : #endif  // V8_HEAP_SPACES_H_

Generated by: LCOV version 1.10