Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_H_
6 : #define V8_HEAP_SPACES_H_
7 :
8 : #include <list>
9 : #include <map>
10 : #include <memory>
11 : #include <unordered_map>
12 : #include <unordered_set>
13 : #include <vector>
14 :
15 : #include "src/allocation.h"
16 : #include "src/base/atomic-utils.h"
17 : #include "src/base/bounded-page-allocator.h"
18 : #include "src/base/export-template.h"
19 : #include "src/base/iterator.h"
20 : #include "src/base/list.h"
21 : #include "src/base/platform/mutex.h"
22 : #include "src/cancelable-task.h"
23 : #include "src/flags.h"
24 : #include "src/globals.h"
25 : #include "src/heap/heap.h"
26 : #include "src/heap/invalidated-slots.h"
27 : #include "src/heap/marking.h"
28 : #include "src/objects.h"
29 : #include "src/objects/free-space.h"
30 : #include "src/objects/heap-object.h"
31 : #include "src/objects/map.h"
32 : #include "src/utils.h"
33 :
34 : namespace v8 {
35 : namespace internal {
36 :
37 : namespace heap {
38 : class HeapTester;
39 : class TestCodePageAllocatorScope;
40 : } // namespace heap
41 :
42 : class AllocationObserver;
43 : class CompactionSpace;
44 : class CompactionSpaceCollection;
45 : class FreeList;
46 : class Isolate;
47 : class LinearAllocationArea;
48 : class LocalArrayBufferTracker;
49 : class MemoryAllocator;
50 : class MemoryChunk;
51 : class MemoryChunkLayout;
52 : class Page;
53 : class PagedSpace;
54 : class SemiSpace;
55 : class SkipList;
56 : class SlotsBuffer;
57 : class SlotSet;
58 : class TypedSlotSet;
59 : class Space;
60 :
61 : // -----------------------------------------------------------------------------
62 : // Heap structures:
63 : //
64 : // A JS heap consists of a young generation, an old generation, and a large
65 : // object space. The young generation is divided into two semispaces. A
66 : // scavenger implements Cheney's copying algorithm. The old generation is
67 : // separated into a map space and an old object space. The map space contains
68 : // all (and only) map objects, the rest of old objects go into the old space.
69 : // The old generation is collected by a mark-sweep-compact collector.
70 : //
71 : // The semispaces of the young generation are contiguous. The old and map
72 : // spaces consists of a list of pages. A page has a page header and an object
73 : // area.
74 : //
75 : // There is a separate large object space for objects larger than
76 : // kMaxRegularHeapObjectSize, so that they do not have to move during
77 : // collection. The large object space is paged. Pages in large object space
78 : // may be larger than the page size.
79 : //
80 : // A store-buffer based write barrier is used to keep track of intergenerational
81 : // references. See heap/store-buffer.h.
82 : //
83 : // During scavenges and mark-sweep collections we sometimes (after a store
84 : // buffer overflow) iterate intergenerational pointers without decoding heap
85 : // object maps so if the page belongs to old space or large object space
86 : // it is essential to guarantee that the page does not contain any
87 : // garbage pointers to new space: every pointer aligned word which satisfies
88 : // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
89 : // new space. Thus objects in old space and large object spaces should have a
90 : // special layout (e.g. no bare integer fields). This requirement does not
91 : // apply to map space which is iterated in a special fashion. However we still
92 : // require pointer fields of dead maps to be cleaned.
93 : //
94 : // To enable lazy cleaning of old space pages we can mark chunks of the page
95 : // as being garbage. Garbage sections are marked with a special map. These
96 : // sections are skipped when scanning the page, even if we are otherwise
97 : // scanning without regard for object boundaries. Garbage sections are chained
98 : // together to form a free list after a GC. Garbage sections created outside
99 : // of GCs by object trunctation etc. may not be in the free list chain. Very
100 : // small free spaces are ignored, they need only be cleaned of bogus pointers
101 : // into new space.
102 : //
103 : // Each page may have up to one special garbage section. The start of this
104 : // section is denoted by the top field in the space. The end of the section
105 : // is denoted by the limit field in the space. This special garbage section
106 : // is not marked with a free space map in the data. The point of this section
107 : // is to enable linear allocation without having to constantly update the byte
108 : // array every time the top field is updated and a new object is created. The
109 : // special garbage section is not in the chain of garbage sections.
110 : //
111 : // Since the top and limit fields are in the space, not the page, only one page
112 : // has a special garbage section, and if the top and limit are equal then there
113 : // is no special garbage section.
114 :
115 : // Some assertion macros used in the debugging mode.
116 :
117 : #define DCHECK_OBJECT_SIZE(size) \
118 : DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
119 :
120 : #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
121 : DCHECK((0 < size) && (size <= code_space->AreaSize()))
122 :
123 : enum FreeListCategoryType {
124 : kTiniest,
125 : kTiny,
126 : kSmall,
127 : kMedium,
128 : kLarge,
129 : kHuge,
130 :
131 : kFirstCategory = kTiniest,
132 : kLastCategory = kHuge,
133 : kNumberOfCategories = kLastCategory + 1,
134 : kInvalidCategory
135 : };
136 :
137 : enum FreeMode { kLinkCategory, kDoNotLinkCategory };
138 :
139 : enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
140 :
141 : enum RememberedSetType {
142 : OLD_TO_NEW,
143 : OLD_TO_OLD,
144 : NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
145 : };
146 :
147 : // A free list category maintains a linked list of free memory blocks.
148 : class FreeListCategory {
149 : public:
150 : FreeListCategory(FreeList* free_list, Page* page)
151 : : free_list_(free_list),
152 : page_(page),
153 : type_(kInvalidCategory),
154 : available_(0),
155 : prev_(nullptr),
156 5100232 : next_(nullptr) {}
157 :
158 : void Initialize(FreeListCategoryType type) {
159 2550660 : type_ = type;
160 2550660 : available_ = 0;
161 2550660 : prev_ = nullptr;
162 2550660 : next_ = nullptr;
163 : }
164 :
165 : void Reset();
166 :
167 0 : void ResetStats() { Reset(); }
168 :
169 : void RepairFreeList(Heap* heap);
170 :
171 : // Relinks the category into the currently owning free list. Requires that the
172 : // category is currently unlinked.
173 : void Relink();
174 :
175 : void Free(Address address, size_t size_in_bytes, FreeMode mode);
176 :
177 : // Performs a single try to pick a node of at least |minimum_size| from the
178 : // category. Stores the actual size in |node_size|. Returns nullptr if no
179 : // node is found.
180 : FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
181 :
182 : // Picks a node of at least |minimum_size| from the category. Stores the
183 : // actual size in |node_size|. Returns nullptr if no node is found.
184 : FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
185 :
186 : inline FreeList* owner();
187 : inline Page* page() const { return page_; }
188 : inline bool is_linked();
189 : bool is_empty() { return top().is_null(); }
190 : size_t available() const { return available_; }
191 :
192 6662736 : void set_free_list(FreeList* free_list) { free_list_ = free_list; }
193 :
194 : #ifdef DEBUG
195 : size_t SumFreeList();
196 : int FreeListLength();
197 : #endif
198 :
199 : private:
200 : // For debug builds we accurately compute free lists lengths up until
201 : // {kVeryLongFreeList} by manually walking the list.
202 : static const int kVeryLongFreeList = 500;
203 :
204 : FreeSpace top() { return top_; }
205 24223827 : void set_top(FreeSpace top) { top_ = top; }
206 : FreeListCategory* prev() { return prev_; }
207 3621955 : void set_prev(FreeListCategory* prev) { prev_ = prev; }
208 : FreeListCategory* next() { return next_; }
209 4990251 : void set_next(FreeListCategory* next) { next_ = next; }
210 :
211 : // This FreeListCategory is owned by the given free_list_.
212 : FreeList* free_list_;
213 :
214 : // This FreeListCategory holds free list entries of the given page_.
215 : Page* const page_;
216 :
217 : // |type_|: The type of this free list category.
218 : FreeListCategoryType type_;
219 :
220 : // |available_|: Total available bytes in all blocks of this free list
221 : // category.
222 : size_t available_;
223 :
224 : // |top_|: Points to the top FreeSpace in the free list category.
225 : FreeSpace top_;
226 :
227 : FreeListCategory* prev_;
228 : FreeListCategory* next_;
229 :
230 : friend class FreeList;
231 : friend class PagedSpace;
232 :
233 : DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
234 : };
235 :
236 : class MemoryChunkLayout {
237 : public:
238 : static size_t CodePageGuardStartOffset();
239 : static size_t CodePageGuardSize();
240 : static intptr_t ObjectStartOffsetInCodePage();
241 : static intptr_t ObjectEndOffsetInCodePage();
242 : static size_t AllocatableMemoryInCodePage();
243 : static intptr_t ObjectStartOffsetInDataPage();
244 : V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
245 : static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
246 : static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
247 : };
248 :
249 : // MemoryChunk represents a memory region owned by a specific space.
250 : // It is divided into the header and the body. Chunk start is always
251 : // 1MB aligned. Start of the body is aligned so it can accommodate
252 : // any heap object.
253 : class MemoryChunk {
254 : public:
255 : // Use with std data structures.
256 : struct Hasher {
257 : size_t operator()(MemoryChunk* const chunk) const {
258 432353625 : return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
259 : }
260 : };
261 :
262 : enum Flag {
263 : NO_FLAGS = 0u,
264 : IS_EXECUTABLE = 1u << 0,
265 : POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
266 : POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
267 : // A page in the from-space or a young large page that was not scavenged
268 : // yet.
269 : FROM_PAGE = 1u << 3,
270 : // A page in the to-space or a young large page that was scavenged.
271 : TO_PAGE = 1u << 4,
272 : LARGE_PAGE = 1u << 5,
273 : EVACUATION_CANDIDATE = 1u << 6,
274 : NEVER_EVACUATE = 1u << 7,
275 :
276 : // Large objects can have a progress bar in their page header. These object
277 : // are scanned in increments and will be kept black while being scanned.
278 : // Even if the mutator writes to them they will be kept black and a white
279 : // to grey transition is performed in the value.
280 : HAS_PROGRESS_BAR = 1u << 8,
281 :
282 : // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
283 : // from new to old space during evacuation.
284 : PAGE_NEW_OLD_PROMOTION = 1u << 9,
285 :
286 : // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
287 : // within the new space during evacuation.
288 : PAGE_NEW_NEW_PROMOTION = 1u << 10,
289 :
290 : // This flag is intended to be used for testing. Works only when both
291 : // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
292 : // are set. It forces the page to become an evacuation candidate at next
293 : // candidates selection cycle.
294 : FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
295 :
296 : // This flag is intended to be used for testing.
297 : NEVER_ALLOCATE_ON_PAGE = 1u << 12,
298 :
299 : // The memory chunk is already logically freed, however the actual freeing
300 : // still has to be performed.
301 : PRE_FREED = 1u << 13,
302 :
303 : // |POOLED|: When actually freeing this chunk, only uncommit and do not
304 : // give up the reservation as we still reuse the chunk at some point.
305 : POOLED = 1u << 14,
306 :
307 : // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
308 : // has been aborted and needs special handling by the sweeper.
309 : COMPACTION_WAS_ABORTED = 1u << 15,
310 :
311 : // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
312 : // on pages is sometimes aborted. The flag is used to avoid repeatedly
313 : // triggering on the same page.
314 : COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
315 :
316 : // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
317 : // to iterate the page.
318 : SWEEP_TO_ITERATE = 1u << 17,
319 :
320 : // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
321 : // enabled.
322 : INCREMENTAL_MARKING = 1u << 18,
323 : NEW_SPACE_BELOW_AGE_MARK = 1u << 19
324 : };
325 :
326 : using Flags = uintptr_t;
327 :
328 : static const Flags kPointersToHereAreInterestingMask =
329 : POINTERS_TO_HERE_ARE_INTERESTING;
330 :
331 : static const Flags kPointersFromHereAreInterestingMask =
332 : POINTERS_FROM_HERE_ARE_INTERESTING;
333 :
334 : static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
335 :
336 : static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
337 :
338 : static const Flags kIsLargePageMask = LARGE_PAGE;
339 :
340 : static const Flags kSkipEvacuationSlotsRecordingMask =
341 : kEvacuationCandidateMask | kIsInYoungGenerationMask;
342 :
343 : // |kSweepingDone|: The page state when sweeping is complete or sweeping must
344 : // not be performed on that page. Sweeper threads that are done with their
345 : // work will set this value and not touch the page anymore.
346 : // |kSweepingPending|: This page is ready for parallel sweeping.
347 : // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
348 : enum ConcurrentSweepingState {
349 : kSweepingDone,
350 : kSweepingPending,
351 : kSweepingInProgress,
352 : };
353 :
354 : static const intptr_t kAlignment =
355 : (static_cast<uintptr_t>(1) << kPageSizeBits);
356 :
357 : static const intptr_t kAlignmentMask = kAlignment - 1;
358 :
359 : static const intptr_t kSizeOffset = 0;
360 : static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
361 : static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
362 : static const intptr_t kReservationOffset =
363 : kMarkBitmapOffset + kSystemPointerSize;
364 : static const intptr_t kHeapOffset =
365 : kReservationOffset + 3 * kSystemPointerSize;
366 : static const intptr_t kHeaderSentinelOffset =
367 : kHeapOffset + kSystemPointerSize;
368 : static const intptr_t kOwnerOffset =
369 : kHeaderSentinelOffset + kSystemPointerSize;
370 :
371 : static const size_t kHeaderSize =
372 : kSizeOffset // NOLINT
373 : + kSizetSize // size_t size
374 : + kUIntptrSize // uintptr_t flags_
375 : + kSystemPointerSize // Bitmap* marking_bitmap_
376 : + 3 * kSystemPointerSize // VirtualMemory reservation_
377 : + kSystemPointerSize // Heap* heap_
378 : + kSystemPointerSize // Address header_sentinel_
379 : + kSystemPointerSize // Address area_start_
380 : + kSystemPointerSize // Address area_end_
381 : + kSystemPointerSize // Address owner_
382 : + kSizetSize // size_t progress_bar_
383 : + kIntptrSize // intptr_t live_byte_count_
384 : + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
385 : + kSystemPointerSize *
386 : NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
387 : + kSystemPointerSize // InvalidatedSlots* invalidated_slots_
388 : + kSystemPointerSize // SkipList* skip_list_
389 : + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
390 : + kSystemPointerSize // base::Mutex* mutex_
391 : + kSystemPointerSize // std::atomic<ConcurrentSweepingState>
392 : // concurrent_sweeping_
393 : + kSystemPointerSize // base::Mutex* page_protection_change_mutex_
394 : + kSystemPointerSize // unitptr_t write_unprotect_counter_
395 : + kSizetSize * ExternalBackingStoreType::kNumTypes
396 : // std::atomic<size_t> external_backing_store_bytes_
397 : + kSizetSize // size_t allocated_bytes_
398 : + kSizetSize // size_t wasted_memory_
399 : + kSystemPointerSize * 2 // base::ListNode
400 : + kSystemPointerSize * kNumberOfCategories
401 : // FreeListCategory categories_[kNumberOfCategories]
402 : + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
403 : + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
404 : + kSystemPointerSize; // Bitmap* young_generation_bitmap_
405 :
406 : // Page size in bytes. This must be a multiple of the OS page size.
407 : static const int kPageSize = 1 << kPageSizeBits;
408 :
409 : // Maximum number of nested code memory modification scopes.
410 : // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
411 : static const int kMaxWriteUnprotectCounter = 4;
412 :
413 8496018006 : static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
414 :
415 : // Only works if the pointer is in the first kPageSize of the MemoryChunk.
416 : static MemoryChunk* FromAddress(Address a) {
417 379136627 : return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
418 : }
419 : // Only works if the object is in the first kPageSize of the MemoryChunk.
420 2638785 : static MemoryChunk* FromHeapObject(const HeapObject o) {
421 7879709633 : return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
422 : }
423 :
424 : void SetOldGenerationPageFlags(bool is_marking);
425 : void SetYoungGenerationPageFlags(bool is_marking);
426 :
427 : static inline MemoryChunk* FromAnyPointerAddress(Address addr);
428 :
429 2892346 : static inline void UpdateHighWaterMark(Address mark) {
430 4227704 : if (mark == kNullAddress) return;
431 : // Need to subtract one from the mark because when a chunk is full the
432 : // top points to the next address after the chunk, which effectively belongs
433 : // to another chunk. See the comment to Page::FromAllocationAreaAddress.
434 1556988 : MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
435 1556988 : intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
436 1556988 : intptr_t old_mark = 0;
437 1556988 : do {
438 1556988 : old_mark = chunk->high_water_mark_;
439 : } while (
440 2229558 : (new_mark > old_mark) &&
441 : !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
442 : }
443 :
444 : static inline void MoveExternalBackingStoreBytes(
445 : ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
446 : size_t amount);
447 :
448 : void DiscardUnusedMemory(Address addr, size_t size);
449 :
450 : Address address() const {
451 1012345706 : return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
452 : }
453 :
454 : base::Mutex* mutex() { return mutex_; }
455 :
456 : bool Contains(Address addr) {
457 778986 : return addr >= area_start() && addr < area_end();
458 : }
459 :
460 : // Checks whether |addr| can be a limit of addresses in this page. It's a
461 : // limit if it's in the page, or if it's just after the last byte of the page.
462 : bool ContainsLimit(Address addr) {
463 61435708 : return addr >= area_start() && addr <= area_end();
464 : }
465 :
466 : void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
467 : concurrent_sweeping_ = state;
468 : }
469 :
470 : ConcurrentSweepingState concurrent_sweeping_state() {
471 : return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
472 : }
473 :
474 498008 : bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
475 :
476 : size_t size() const { return size_; }
477 : void set_size(size_t size) { size_ = size; }
478 :
479 : inline Heap* heap() const { return heap_; }
480 :
481 : Heap* synchronized_heap();
482 :
483 0 : inline SkipList* skip_list() { return skip_list_; }
484 :
485 89404 : inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
486 :
487 : template <RememberedSetType type>
488 490 : bool ContainsSlots() {
489 : return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
490 741 : invalidated_slots() != nullptr;
491 : }
492 :
493 : template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
494 : SlotSet* slot_set() {
495 : if (access_mode == AccessMode::ATOMIC)
496 198828724 : return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
497 : return slot_set_[type];
498 : }
499 :
500 : template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
501 : TypedSlotSet* typed_slot_set() {
502 : if (access_mode == AccessMode::ATOMIC)
503 3219786 : return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
504 : return typed_slot_set_[type];
505 : }
506 :
507 : template <RememberedSetType type>
508 : SlotSet* AllocateSlotSet();
509 : // Not safe to be called concurrently.
510 : template <RememberedSetType type>
511 : void ReleaseSlotSet();
512 : template <RememberedSetType type>
513 : TypedSlotSet* AllocateTypedSlotSet();
514 : // Not safe to be called concurrently.
515 : template <RememberedSetType type>
516 : void ReleaseTypedSlotSet();
517 :
518 : InvalidatedSlots* AllocateInvalidatedSlots();
519 : void ReleaseInvalidatedSlots();
520 : void RegisterObjectWithInvalidatedSlots(HeapObject object, int size);
521 : // Updates invalidated_slots after array left-trimming.
522 : void MoveObjectWithInvalidatedSlots(HeapObject old_start,
523 : HeapObject new_start);
524 : bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
525 : InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
526 :
527 : void ReleaseLocalTracker();
528 :
529 : void AllocateYoungGenerationBitmap();
530 : void ReleaseYoungGenerationBitmap();
531 :
532 : void AllocateMarkingBitmap();
533 : void ReleaseMarkingBitmap();
534 :
535 : Address area_start() { return area_start_; }
536 : Address area_end() { return area_end_; }
537 8938532 : size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
538 :
539 : // Approximate amount of physical memory committed for this chunk.
540 : size_t CommittedPhysicalMemory();
541 :
542 184299 : Address HighWaterMark() { return address() + high_water_mark_; }
543 :
544 : size_t ProgressBar() {
545 : DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
546 : return progress_bar_.load(std::memory_order_acquire);
547 : }
548 :
549 : bool TrySetProgressBar(size_t old_value, size_t new_value) {
550 : DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
551 : return progress_bar_.compare_exchange_strong(old_value, new_value,
552 : std::memory_order_acq_rel);
553 : }
554 :
555 : void ResetProgressBar() {
556 58391 : if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
557 : progress_bar_.store(0, std::memory_order_release);
558 : }
559 : }
560 :
561 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
562 : size_t amount);
563 :
564 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
565 : size_t amount);
566 :
567 : size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
568 1370789 : return external_backing_store_bytes_[type];
569 : }
570 :
571 : // Some callers rely on the fact that this can operate on both
572 : // tagged and aligned object addresses.
573 2638785 : inline uint32_t AddressToMarkbitIndex(Address addr) const {
574 8761492589 : return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
575 : }
576 :
577 : inline Address MarkbitIndexToAddress(uint32_t index) const {
578 : return this->address() + (index << kTaggedSizeLog2);
579 : }
580 :
581 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
582 : void SetFlag(Flag flag) {
583 : if (access_mode == AccessMode::NON_ATOMIC) {
584 5097158 : flags_ |= flag;
585 : } else {
586 11395 : base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
587 : }
588 : }
589 :
590 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
591 : bool IsFlagSet(Flag flag) {
592 6545451438 : return (GetFlags<access_mode>() & flag) != 0;
593 : }
594 :
595 3275613 : void ClearFlag(Flag flag) { flags_ &= ~flag; }
596 : // Set or clear multiple flags at a time. The flags in the mask are set to
597 : // the value in "flags", the rest retain the current value in |flags_|.
598 : void SetFlags(uintptr_t flags, uintptr_t mask) {
599 1059204 : flags_ = (flags_ & ~mask) | (flags & mask);
600 : }
601 :
602 : // Return all current flags.
603 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
604 : uintptr_t GetFlags() {
605 : if (access_mode == AccessMode::NON_ATOMIC) {
606 : return flags_;
607 : } else {
608 6063189537 : return base::AsAtomicWord::Relaxed_Load(&flags_);
609 : }
610 : }
611 :
612 : bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
613 :
614 : void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
615 :
616 : bool CanAllocate() {
617 229796 : return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
618 : }
619 :
620 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
621 10634766 : bool IsEvacuationCandidate() {
622 : DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
623 : IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
624 10634766 : return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
625 : }
626 :
627 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
628 537791 : bool ShouldSkipEvacuationSlotRecording() {
629 : uintptr_t flags = GetFlags<access_mode>();
630 : return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
631 30742911 : ((flags & COMPACTION_WAS_ABORTED) == 0);
632 : }
633 :
634 : Executability executable() {
635 3468797 : return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
636 : }
637 :
638 1717851077 : bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
639 42193405 : bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
640 100305071 : bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
641 :
642 : bool InYoungGeneration() const {
643 1544910385 : return (flags_ & kIsInYoungGenerationMask) != 0;
644 : }
645 4058 : bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
646 : bool InNewLargeObjectSpace() const {
647 188800538 : return InYoungGeneration() && IsLargePage();
648 : }
649 : bool InOldSpace() const;
650 : bool InLargeObjectSpace() const;
651 :
652 :
653 : Space* owner() const { return owner_; }
654 :
655 : void set_owner(Space* space) { owner_ = space; }
656 :
657 : static inline bool HasHeaderSentinel(Address slot_addr);
658 :
659 : // Emits a memory barrier. For TSAN builds the other thread needs to perform
660 : // MemoryChunk::synchronized_heap() to simulate the barrier.
661 : void InitializationMemoryFence();
662 :
663 : void SetReadable();
664 : void SetReadAndExecutable();
665 : void SetReadAndWritable();
666 :
667 2633089 : void SetDefaultCodePermissions() {
668 2633089 : if (FLAG_jitless) {
669 71923 : SetReadable();
670 : } else {
671 2561166 : SetReadAndExecutable();
672 : }
673 2633088 : }
674 :
675 : base::ListNode<MemoryChunk>& list_node() { return list_node_; }
676 :
677 : protected:
678 : static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
679 : Address area_start, Address area_end,
680 : Executability executable, Space* owner,
681 : VirtualMemory reservation);
682 :
683 : // Should be called when memory chunk is about to be freed.
684 : void ReleaseAllocatedMemory();
685 :
686 : // Sets the requested page permissions only if the write unprotect counter
687 : // has reached 0.
688 : void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
689 : PageAllocator::Permission permission);
690 :
691 1089612 : VirtualMemory* reserved_memory() { return &reservation_; }
692 :
693 : template <AccessMode mode>
694 : ConcurrentBitmap<mode>* marking_bitmap() const {
695 : return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
696 : }
697 :
698 : template <AccessMode mode>
699 : ConcurrentBitmap<mode>* young_generation_bitmap() const {
700 : return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
701 : }
702 :
703 : size_t size_;
704 : uintptr_t flags_;
705 :
706 : Bitmap* marking_bitmap_;
707 :
708 : // If the chunk needs to remember its memory reservation, it is stored here.
709 : VirtualMemory reservation_;
710 :
711 : Heap* heap_;
712 :
713 : // This is used to distinguish the memory chunk header from the interior of a
714 : // large page. The memory chunk header stores here an impossible tagged
715 : // pointer: the tagger pointer of the page start. A field in a large object is
716 : // guaranteed to not contain such a pointer.
717 : Address header_sentinel_;
718 :
719 : // The space owning this memory chunk.
720 : std::atomic<Space*> owner_;
721 :
722 : // Start and end of allocatable memory on this chunk.
723 : Address area_start_;
724 : Address area_end_;
725 :
726 : // Used by the incremental marker to keep track of the scanning progress in
727 : // large objects that have a progress bar and are scanned in increments.
728 : std::atomic<size_t> progress_bar_;
729 :
730 : // Count of bytes marked black on page.
731 : intptr_t live_byte_count_;
732 :
733 : // A single slot set for small pages (of size kPageSize) or an array of slot
734 : // set for large pages. In the latter case the number of entries in the array
735 : // is ceil(size() / kPageSize).
736 : SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
737 : TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
738 : InvalidatedSlots* invalidated_slots_;
739 :
740 : SkipList* skip_list_;
741 :
742 : // Assuming the initial allocation on a page is sequential,
743 : // count highest number of bytes ever allocated on the page.
744 : std::atomic<intptr_t> high_water_mark_;
745 :
746 : base::Mutex* mutex_;
747 :
748 : std::atomic<intptr_t> concurrent_sweeping_;
749 :
750 : base::Mutex* page_protection_change_mutex_;
751 :
752 : // This field is only relevant for code pages. It depicts the number of
753 : // times a component requested this page to be read+writeable. The
754 : // counter is decremented when a component resets to read+executable.
755 : // If Value() == 0 => The memory is read and executable.
756 : // If Value() >= 1 => The Memory is read and writable (and maybe executable).
757 : // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
758 : // excessive nesting of scopes.
759 : // All executable MemoryChunks are allocated rw based on the assumption that
760 : // they will be used immediatelly for an allocation. They are initialized
761 : // with the number of open CodeSpaceMemoryModificationScopes. The caller
762 : // that triggers the page allocation is responsible for decrementing the
763 : // counter.
764 : uintptr_t write_unprotect_counter_;
765 :
766 : // Byte allocated on the page, which includes all objects on the page
767 : // and the linear allocation area.
768 : size_t allocated_bytes_;
769 :
770 : // Tracks off-heap memory used by this memory chunk.
771 : std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
772 :
773 : // Freed memory that was not added to the free list.
774 : size_t wasted_memory_;
775 :
776 : base::ListNode<MemoryChunk> list_node_;
777 :
778 : FreeListCategory* categories_[kNumberOfCategories];
779 :
780 : LocalArrayBufferTracker* local_tracker_;
781 :
782 : std::atomic<intptr_t> young_generation_live_byte_count_;
783 : Bitmap* young_generation_bitmap_;
784 :
785 : private:
786 927908 : void InitializeReservedMemory() { reservation_.Reset(); }
787 :
788 : friend class ConcurrentMarkingState;
789 : friend class IncrementalMarkingState;
790 : friend class MajorAtomicMarkingState;
791 : friend class MajorMarkingState;
792 : friend class MajorNonAtomicMarkingState;
793 : friend class MemoryAllocator;
794 : friend class MemoryChunkValidator;
795 : friend class MinorMarkingState;
796 : friend class MinorNonAtomicMarkingState;
797 : friend class PagedSpace;
798 : };
799 :
800 : static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
801 : "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
802 :
803 : // -----------------------------------------------------------------------------
804 : // A page is a memory chunk of a size 512K. Large object pages may be larger.
805 : //
806 : // The only way to get a page pointer is by calling factory methods:
807 : // Page* p = Page::FromAddress(addr); or
808 : // Page* p = Page::FromAllocationAreaAddress(address);
809 : class Page : public MemoryChunk {
810 : public:
811 : static const intptr_t kCopyAllFlags = ~0;
812 :
813 : // Page flags copied from from-space to to-space when flipping semispaces.
814 : static const intptr_t kCopyOnFlipFlagsMask =
815 : static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
816 : static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
817 : static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
818 :
819 : // Returns the page containing a given address. The address ranges
820 : // from [page_addr .. page_addr + kPageSize[. This only works if the object
821 : // is in fact in a page.
822 0 : static Page* FromAddress(Address addr) {
823 272645021 : return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
824 : }
825 0 : static Page* FromHeapObject(const HeapObject o) {
826 6076858469 : return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
827 : }
828 :
829 : // Returns the page containing the address provided. The address can
830 : // potentially point righter after the page. To be also safe for tagged values
831 : // we subtract a hole word. The valid address ranges from
832 : // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
833 : static Page* FromAllocationAreaAddress(Address address) {
834 1046567 : return Page::FromAddress(address - kTaggedSize);
835 : }
836 :
837 : // Checks if address1 and address2 are on the same new space page.
838 : static bool OnSamePage(Address address1, Address address2) {
839 : return Page::FromAddress(address1) == Page::FromAddress(address2);
840 : }
841 :
842 : // Checks whether an address is page aligned.
843 : static bool IsAlignedToPageSize(Address addr) {
844 2232814 : return (addr & kPageAlignmentMask) == 0;
845 : }
846 :
847 : static Page* ConvertNewToOld(Page* old_page);
848 :
849 : inline void MarkNeverAllocateForTesting();
850 : inline void MarkEvacuationCandidate();
851 : inline void ClearEvacuationCandidate();
852 :
853 : Page* next_page() { return static_cast<Page*>(list_node_.next()); }
854 : Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
855 :
856 : template <typename Callback>
857 1110456 : inline void ForAllFreeListCategories(Callback callback) {
858 17080819 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
859 7883454 : callback(categories_[i]);
860 : }
861 1110456 : }
862 :
863 : // Returns the offset of a given address to this page.
864 580 : inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
865 :
866 : // Returns the address for a given offset to the this page.
867 : Address OffsetToAddress(size_t offset) {
868 314 : Address address_in_page = address() + offset;
869 : DCHECK_GE(address_in_page, area_start_);
870 : DCHECK_LT(address_in_page, area_end_);
871 : return address_in_page;
872 : }
873 :
874 : // WaitUntilSweepingCompleted only works when concurrent sweeping is in
875 : // progress. In particular, when we know that right before this call a
876 : // sweeper thread was sweeping this page.
877 : void WaitUntilSweepingCompleted() {
878 0 : mutex_->Lock();
879 0 : mutex_->Unlock();
880 : DCHECK(SweepingDone());
881 : }
882 :
883 : void AllocateLocalTracker();
884 : inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
885 : bool contains_array_buffers();
886 :
887 : void ResetFreeListStatistics();
888 :
889 : size_t AvailableInFreeList();
890 :
891 : size_t AvailableInFreeListFromAllocatedBytes() {
892 : DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
893 : return area_size() - wasted_memory() - allocated_bytes();
894 : }
895 :
896 : FreeListCategory* free_list_category(FreeListCategoryType type) {
897 20696092 : return categories_[type];
898 : }
899 :
900 : size_t wasted_memory() { return wasted_memory_; }
901 419984 : void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
902 : size_t allocated_bytes() { return allocated_bytes_; }
903 : void IncreaseAllocatedBytes(size_t bytes) {
904 : DCHECK_LE(bytes, area_size());
905 1273106 : allocated_bytes_ += bytes;
906 : }
907 : void DecreaseAllocatedBytes(size_t bytes) {
908 : DCHECK_LE(bytes, area_size());
909 : DCHECK_GE(allocated_bytes(), bytes);
910 21117797 : allocated_bytes_ -= bytes;
911 : }
912 :
913 : void ResetAllocatedBytes();
914 :
915 : size_t ShrinkToHighWaterMark();
916 :
917 : V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
918 : void DestroyBlackArea(Address start, Address end);
919 :
920 : void InitializeFreeListCategories();
921 : void AllocateFreeListCategories();
922 : void ReleaseFreeListCategories();
923 :
924 : #ifdef DEBUG
925 : void Print();
926 : #endif // DEBUG
927 :
928 : private:
929 : enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
930 :
931 : friend class MemoryAllocator;
932 : };
933 :
934 : class ReadOnlyPage : public Page {
935 : public:
936 : // Clears any pointers in the header that point out of the page that would
937 : // otherwise make the header non-relocatable.
938 : void MakeHeaderRelocatable();
939 :
940 : private:
941 : friend class ReadOnlySpace;
942 : };
943 :
944 : class LargePage : public MemoryChunk {
945 : public:
946 : // A limit to guarantee that we do not overflow typed slot offset in
947 : // the old to old remembered set.
948 : // Note that this limit is higher than what assembler already imposes on
949 : // x64 and ia32 architectures.
950 : static const int kMaxCodePageSize = 512 * MB;
951 :
952 : static LargePage* FromHeapObject(const HeapObject o) {
953 : return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
954 : }
955 :
956 : inline HeapObject GetObject();
957 :
958 0 : inline LargePage* next_page() {
959 0 : return static_cast<LargePage*>(list_node_.next());
960 : }
961 :
962 : // Uncommit memory that is not in use anymore by the object. If the object
963 : // cannot be shrunk 0 is returned.
964 : Address GetAddressToShrink(Address object_address, size_t object_size);
965 :
966 : void ClearOutOfLiveRangeSlots(Address free_start);
967 :
968 : private:
969 : static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
970 : Executability executable);
971 :
972 : friend class MemoryAllocator;
973 : };
974 :
975 :
976 : // ----------------------------------------------------------------------------
977 : // Space is the abstract superclass for all allocation spaces.
978 : class Space : public Malloced {
979 : public:
980 837461 : Space(Heap* heap, AllocationSpace id)
981 : : allocation_observers_paused_(false),
982 : heap_(heap),
983 : id_(id),
984 : committed_(0),
985 1674922 : max_committed_(0) {
986 : external_backing_store_bytes_ =
987 837461 : new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
988 : external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
989 837462 : external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
990 : 0;
991 : CheckOffsetsAreConsistent();
992 837462 : }
993 :
994 : void CheckOffsetsAreConsistent() const;
995 :
996 : static inline void MoveExternalBackingStoreBytes(
997 : ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
998 :
999 1674616 : virtual ~Space() {
1000 837305 : delete[] external_backing_store_bytes_;
1001 837311 : external_backing_store_bytes_ = nullptr;
1002 837311 : }
1003 :
1004 : Heap* heap() const { return heap_; }
1005 :
1006 : // Identity used in error reporting.
1007 0 : AllocationSpace identity() { return id_; }
1008 :
1009 0 : const char* name() { return AllocationSpaceName(id_); }
1010 :
1011 : V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
1012 : AllocationObserver* observer);
1013 :
1014 : V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
1015 : AllocationObserver* observer);
1016 :
1017 : V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
1018 :
1019 : V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
1020 :
1021 161330 : V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
1022 :
1023 : void AllocationStep(int bytes_since_last, Address soon_object, int size);
1024 :
1025 : // Return the total amount committed memory for this space, i.e., allocatable
1026 : // memory and page headers.
1027 4673017 : virtual size_t CommittedMemory() { return committed_; }
1028 :
1029 0 : virtual size_t MaximumCommittedMemory() { return max_committed_; }
1030 :
1031 : // Returns allocated size.
1032 : virtual size_t Size() = 0;
1033 :
1034 : // Returns size of objects. Can differ from the allocated size
1035 : // (e.g. see LargeObjectSpace).
1036 0 : virtual size_t SizeOfObjects() { return Size(); }
1037 :
1038 : // Approximate amount of physical memory committed for this space.
1039 : virtual size_t CommittedPhysicalMemory() = 0;
1040 :
1041 : // Return the available bytes without growing.
1042 : virtual size_t Available() = 0;
1043 :
1044 21797318 : virtual int RoundSizeDownToObjectAlignment(int size) {
1045 21797318 : if (id_ == CODE_SPACE) {
1046 0 : return RoundDown(size, kCodeAlignment);
1047 : } else {
1048 21797318 : return RoundDown(size, kTaggedSize);
1049 : }
1050 : }
1051 :
1052 : virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
1053 :
1054 : void AccountCommitted(size_t bytes) {
1055 : DCHECK_GE(committed_ + bytes, committed_);
1056 731373 : committed_ += bytes;
1057 731373 : if (committed_ > max_committed_) {
1058 637347 : max_committed_ = committed_;
1059 : }
1060 : }
1061 :
1062 : void AccountUncommitted(size_t bytes) {
1063 : DCHECK_GE(committed_, committed_ - bytes);
1064 436341 : committed_ -= bytes;
1065 : }
1066 :
1067 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1068 : size_t amount);
1069 :
1070 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1071 : size_t amount);
1072 :
1073 : // Returns amount of off-heap memory in-use by objects in this Space.
1074 65 : virtual size_t ExternalBackingStoreBytes(
1075 : ExternalBackingStoreType type) const {
1076 160 : return external_backing_store_bytes_[type];
1077 : }
1078 :
1079 : V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
1080 :
1081 112 : MemoryChunk* first_page() { return memory_chunk_list_.front(); }
1082 : MemoryChunk* last_page() { return memory_chunk_list_.back(); }
1083 :
1084 : base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
1085 :
1086 : #ifdef DEBUG
1087 : virtual void Print() = 0;
1088 : #endif
1089 :
1090 : protected:
1091 : intptr_t GetNextInlineAllocationStepSize();
1092 : bool AllocationObserversActive() {
1093 273940931 : return !allocation_observers_paused_ && !allocation_observers_.empty();
1094 : }
1095 :
1096 : std::vector<AllocationObserver*> allocation_observers_;
1097 :
1098 : // The List manages the pages that belong to the given space.
1099 : base::List<MemoryChunk> memory_chunk_list_;
1100 :
1101 : // Tracks off-heap memory used by this space.
1102 : std::atomic<size_t>* external_backing_store_bytes_;
1103 :
1104 : private:
1105 : static const intptr_t kIdOffset = 9 * kSystemPointerSize;
1106 :
1107 : bool allocation_observers_paused_;
1108 : Heap* heap_;
1109 : AllocationSpace id_;
1110 :
1111 : // Keeps track of committed memory in a space.
1112 : size_t committed_;
1113 : size_t max_committed_;
1114 :
1115 : DISALLOW_COPY_AND_ASSIGN(Space);
1116 : };
1117 :
1118 :
1119 : class MemoryChunkValidator {
1120 : // Computed offsets should match the compiler generated ones.
1121 : STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1122 :
1123 : // Validate our estimates on the header size.
1124 : STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1125 : STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1126 : STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1127 : };
1128 :
1129 :
1130 : // The process-wide singleton that keeps track of code range regions with the
1131 : // intention to reuse free code range regions as a workaround for CFG memory
1132 : // leaks (see crbug.com/870054).
1133 58267 : class CodeRangeAddressHint {
1134 : public:
1135 : // Returns the most recently freed code range start address for the given
1136 : // size. If there is no such entry, then a random address is returned.
1137 : V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
1138 :
1139 : V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
1140 : size_t code_range_size);
1141 :
1142 : private:
1143 : base::Mutex mutex_;
1144 : // A map from code range size to an array of recently freed code range
1145 : // addresses. There should be O(1) different code range sizes.
1146 : // The length of each array is limited by the peak number of code ranges,
1147 : // which should be also O(1).
1148 : std::unordered_map<size_t, std::vector<Address>> recently_freed_;
1149 : };
1150 :
1151 : class SkipList {
1152 : public:
1153 0 : SkipList() { Clear(); }
1154 :
1155 0 : void Clear() {
1156 12315226 : for (int idx = 0; idx < kSize; idx++) {
1157 6062880 : starts_[idx] = static_cast<Address>(-1);
1158 : }
1159 0 : }
1160 :
1161 532093 : Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1162 :
1163 0 : void AddObject(Address addr, int size) {
1164 0 : int start_region = RegionNumber(addr);
1165 95219681 : int end_region = RegionNumber(addr + size - kTaggedSize);
1166 293531813 : for (int idx = start_region; idx <= end_region; idx++) {
1167 98424184 : if (starts_[idx] > addr) {
1168 2763680 : starts_[idx] = addr;
1169 : } else {
1170 : // In the first region, there may already be an object closer to the
1171 : // start of the region. Do not change the start in that case. If this
1172 : // is not the first region, you probably added overlapping objects.
1173 : DCHECK_EQ(start_region, idx);
1174 : }
1175 : }
1176 0 : }
1177 :
1178 0 : static inline int RegionNumber(Address addr) {
1179 416362907 : return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
1180 : }
1181 :
1182 95219681 : static void Update(Address addr, int size) {
1183 0 : Page* page = Page::FromAddress(addr);
1184 0 : SkipList* list = page->skip_list();
1185 95219681 : if (list == nullptr) {
1186 89404 : list = new SkipList();
1187 0 : page->set_skip_list(list);
1188 : }
1189 :
1190 0 : list->AddObject(addr, size);
1191 95219681 : }
1192 :
1193 : private:
1194 : static const int kRegionSizeLog2 = 13;
1195 : static const int kRegionSize = 1 << kRegionSizeLog2;
1196 : static const int kSize = Page::kPageSize / kRegionSize;
1197 :
1198 : STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1199 :
1200 : Address starts_[kSize];
1201 : };
1202 :
1203 :
1204 : // ----------------------------------------------------------------------------
1205 : // A space acquires chunks of memory from the operating system. The memory
1206 : // allocator allocates and deallocates pages for the paged heap spaces and large
1207 : // pages for large object space.
1208 187602 : class V8_EXPORT_PRIVATE MemoryAllocator {
1209 : public:
1210 : // Unmapper takes care of concurrently unmapping and uncommitting memory
1211 : // chunks.
1212 125068 : class Unmapper {
1213 : public:
1214 : class UnmapFreeMemoryTask;
1215 :
1216 62549 : Unmapper(Heap* heap, MemoryAllocator* allocator)
1217 : : heap_(heap),
1218 : allocator_(allocator),
1219 : pending_unmapping_tasks_semaphore_(0),
1220 : pending_unmapping_tasks_(0),
1221 250190 : active_unmapping_tasks_(0) {
1222 62547 : chunks_[kRegular].reserve(kReservedQueueingSlots);
1223 62549 : chunks_[kPooled].reserve(kReservedQueueingSlots);
1224 62549 : }
1225 :
1226 450603 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1227 891691 : if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
1228 439417 : AddMemoryChunkSafe<kRegular>(chunk);
1229 : } else {
1230 11186 : AddMemoryChunkSafe<kNonRegular>(chunk);
1231 : }
1232 450602 : }
1233 :
1234 432380 : MemoryChunk* TryGetPooledMemoryChunkSafe() {
1235 : // Procedure:
1236 : // (1) Try to get a chunk that was declared as pooled and already has
1237 : // been uncommitted.
1238 : // (2) Try to steal any memory chunk of kPageSize that would've been
1239 : // unmapped.
1240 432380 : MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1241 432380 : if (chunk == nullptr) {
1242 396037 : chunk = GetMemoryChunkSafe<kRegular>();
1243 396037 : if (chunk != nullptr) {
1244 : // For stolen chunks we need to manually free any allocated memory.
1245 22533 : chunk->ReleaseAllocatedMemory();
1246 : }
1247 : }
1248 432380 : return chunk;
1249 : }
1250 :
1251 : V8_EXPORT_PRIVATE void FreeQueuedChunks();
1252 : void CancelAndWaitForPendingTasks();
1253 : void PrepareForGC();
1254 : void EnsureUnmappingCompleted();
1255 : V8_EXPORT_PRIVATE void TearDown();
1256 : size_t NumberOfCommittedChunks();
1257 : int NumberOfChunks();
1258 : size_t CommittedBufferedMemory();
1259 :
1260 : private:
1261 : static const int kReservedQueueingSlots = 64;
1262 : static const int kMaxUnmapperTasks = 4;
1263 :
1264 : enum ChunkQueueType {
1265 : kRegular, // Pages of kPageSize that do not live in a CodeRange and
1266 : // can thus be used for stealing.
1267 : kNonRegular, // Large chunks and executable chunks.
1268 : kPooled, // Pooled chunks, already uncommited and ready for reuse.
1269 : kNumberOfChunkQueues,
1270 : };
1271 :
1272 : enum class FreeMode {
1273 : kUncommitPooled,
1274 : kReleasePooled,
1275 : };
1276 :
1277 : template <ChunkQueueType type>
1278 859467 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1279 859467 : base::MutexGuard guard(&mutex_);
1280 859496 : chunks_[type].push_back(chunk);
1281 859497 : }
1282 :
1283 : template <ChunkQueueType type>
1284 2611562 : MemoryChunk* GetMemoryChunkSafe() {
1285 2611562 : base::MutexGuard guard(&mutex_);
1286 2612003 : if (chunks_[type].empty()) return nullptr;
1287 859498 : MemoryChunk* chunk = chunks_[type].back();
1288 : chunks_[type].pop_back();
1289 859498 : return chunk;
1290 : }
1291 :
1292 : bool MakeRoomForNewTasks();
1293 :
1294 : template <FreeMode mode>
1295 : void PerformFreeMemoryOnQueuedChunks();
1296 :
1297 : void PerformFreeMemoryOnQueuedNonRegularChunks();
1298 :
1299 : Heap* const heap_;
1300 : MemoryAllocator* const allocator_;
1301 : base::Mutex mutex_;
1302 : std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1303 : CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1304 : base::Semaphore pending_unmapping_tasks_semaphore_;
1305 : intptr_t pending_unmapping_tasks_;
1306 : std::atomic<intptr_t> active_unmapping_tasks_;
1307 :
1308 : friend class MemoryAllocator;
1309 : };
1310 :
1311 : enum AllocationMode {
1312 : kRegular,
1313 : kPooled,
1314 : };
1315 :
1316 : enum FreeMode {
1317 : kFull,
1318 : kAlreadyPooled,
1319 : kPreFreeAndQueue,
1320 : kPooledAndQueue,
1321 : };
1322 :
1323 : static intptr_t GetCommitPageSize();
1324 :
1325 : // Computes the memory area of discardable memory within a given memory area
1326 : // [addr, addr+size) and returns the result as base::AddressRegion. If the
1327 : // memory is not discardable base::AddressRegion is an empty region.
1328 : static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
1329 : size_t size);
1330 :
1331 : MemoryAllocator(Isolate* isolate, size_t max_capacity,
1332 : size_t code_range_size);
1333 :
1334 : void TearDown();
1335 :
1336 : // Allocates a Page from the allocator. AllocationMode is used to indicate
1337 : // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1338 : // should be tried first.
1339 : template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1340 : typename SpaceType>
1341 : EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1342 : Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1343 :
1344 : LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1345 : Executability executable);
1346 :
1347 : template <MemoryAllocator::FreeMode mode = kFull>
1348 : EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1349 : void Free(MemoryChunk* chunk);
1350 :
1351 : // Returns allocated spaces in bytes.
1352 : size_t Size() { return size_; }
1353 :
1354 : // Returns allocated executable spaces in bytes.
1355 : size_t SizeExecutable() { return size_executable_; }
1356 :
1357 : // Returns the maximum available bytes of heaps.
1358 : size_t Available() {
1359 : const size_t size = Size();
1360 325 : return capacity_ < size ? 0 : capacity_ - size;
1361 : }
1362 :
1363 : // Returns an indication of whether a pointer is in a space that has
1364 : // been allocated by this MemoryAllocator.
1365 : V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1366 3868574 : return address < lowest_ever_allocated_ ||
1367 : address >= highest_ever_allocated_;
1368 : }
1369 :
1370 : // Returns a MemoryChunk in which the memory region from commit_area_size to
1371 : // reserve_area_size of the chunk area is reserved but not committed, it
1372 : // could be committed later by calling MemoryChunk::CommitArea.
1373 : MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1374 : Executability executable, Space* space);
1375 :
1376 : Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1377 : size_t alignment, Executability executable,
1378 : void* hint, VirtualMemory* controller);
1379 :
1380 : void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
1381 :
1382 : // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1383 : // internally memory is freed from |start_free| to the end of the reservation.
1384 : // Additional memory beyond the page is not accounted though, so
1385 : // |bytes_to_free| is computed by the caller.
1386 : void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1387 : size_t bytes_to_free, Address new_area_end);
1388 :
1389 : // Checks if an allocated MemoryChunk was intended to be used for executable
1390 : // memory.
1391 0 : bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1392 0 : return executable_memory_.find(chunk) != executable_memory_.end();
1393 : }
1394 :
1395 : // Commit memory region owned by given reservation object. Returns true if
1396 : // it succeeded and false otherwise.
1397 : bool CommitMemory(VirtualMemory* reservation);
1398 :
1399 : // Uncommit memory region owned by given reservation object. Returns true if
1400 : // it succeeded and false otherwise.
1401 : bool UncommitMemory(VirtualMemory* reservation);
1402 :
1403 : // Zaps a contiguous block of memory [start..(start+size)[ with
1404 : // a given zap value.
1405 : void ZapBlock(Address start, size_t size, uintptr_t zap_value);
1406 :
1407 : V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1408 : Address start,
1409 : size_t commit_size,
1410 : size_t reserved_size);
1411 :
1412 : // Page allocator instance for allocating non-executable pages.
1413 : // Guaranteed to be a valid pointer.
1414 : v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
1415 :
1416 : // Page allocator instance for allocating executable pages.
1417 : // Guaranteed to be a valid pointer.
1418 : v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
1419 :
1420 : // Returns page allocator suitable for allocating pages with requested
1421 : // executability.
1422 : v8::PageAllocator* page_allocator(Executability executable) {
1423 : return executable == EXECUTABLE ? code_page_allocator_
1424 1114910 : : data_page_allocator_;
1425 : }
1426 :
1427 : // A region of memory that may contain executable code including reserved
1428 : // OS page with read-write access in the beginning.
1429 84840 : const base::AddressRegion& code_range() const {
1430 : // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
1431 : DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
1432 : DCHECK_IMPLIES(!code_range_.is_empty(),
1433 : code_range_.contains(code_page_allocator_instance_->begin(),
1434 : code_page_allocator_instance_->size()));
1435 84840 : return code_range_;
1436 : }
1437 :
1438 1360542 : Unmapper* unmapper() { return &unmapper_; }
1439 :
1440 : // PreFree logically frees the object, i.e., it takes care of the size
1441 : // bookkeeping and calls the allocation callback.
1442 : void PreFreeMemory(MemoryChunk* chunk);
1443 :
1444 : private:
1445 : void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
1446 : size_t requested);
1447 :
1448 : // FreeMemory can be called concurrently when PreFree was executed before.
1449 : void PerformFreeMemory(MemoryChunk* chunk);
1450 :
1451 : // See AllocatePage for public interface. Note that currently we only support
1452 : // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1453 : template <typename SpaceType>
1454 : MemoryChunk* AllocatePagePooled(SpaceType* owner);
1455 :
1456 : // Initializes pages in a chunk. Returns the first page address.
1457 : // This function and GetChunkId() are provided for the mark-compact
1458 : // collector to rebuild page headers in the from space, which is
1459 : // used as a marking stack and its page headers are destroyed.
1460 : Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1461 : PagedSpace* owner);
1462 :
1463 927908 : void UpdateAllocatedSpaceLimits(Address low, Address high) {
1464 : // The use of atomic primitives does not guarantee correctness (wrt.
1465 : // desired semantics) by default. The loop here ensures that we update the
1466 : // values only if they did not change in between.
1467 927908 : Address ptr = kNullAddress;
1468 927908 : do {
1469 927908 : ptr = lowest_ever_allocated_;
1470 1051986 : } while ((low < ptr) &&
1471 : !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
1472 927908 : do {
1473 927908 : ptr = highest_ever_allocated_;
1474 1621330 : } while ((high > ptr) &&
1475 : !highest_ever_allocated_.compare_exchange_weak(ptr, high));
1476 927908 : }
1477 :
1478 : void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1479 : DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1480 : DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1481 : executable_memory_.insert(chunk);
1482 : }
1483 :
1484 129739 : void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1485 : DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1486 : executable_memory_.erase(chunk);
1487 129739 : chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1488 129738 : }
1489 :
1490 : Isolate* isolate_;
1491 :
1492 : // This object controls virtual space reserved for V8 heap instance.
1493 : // Depending on the configuration it may contain the following:
1494 : // - no reservation (on 32-bit architectures)
1495 : // - code range reservation used by bounded code page allocator (on 64-bit
1496 : // architectures without pointers compression in V8 heap)
1497 : // - data + code range reservation (on 64-bit architectures with pointers
1498 : // compression in V8 heap)
1499 : VirtualMemory heap_reservation_;
1500 :
1501 : // Page allocator used for allocating data pages. Depending on the
1502 : // configuration it may be a page allocator instance provided by v8::Platform
1503 : // or a BoundedPageAllocator (when pointer compression is enabled).
1504 : v8::PageAllocator* data_page_allocator_;
1505 :
1506 : // Page allocator used for allocating code pages. Depending on the
1507 : // configuration it may be a page allocator instance provided by v8::Platform
1508 : // or a BoundedPageAllocator (when pointer compression is enabled or
1509 : // on those 64-bit architectures where pc-relative 32-bit displacement
1510 : // can be used for call and jump instructions).
1511 : v8::PageAllocator* code_page_allocator_;
1512 :
1513 : // A part of the |heap_reservation_| that may contain executable code
1514 : // including reserved page with read-write access in the beginning.
1515 : // See details below.
1516 : base::AddressRegion code_range_;
1517 :
1518 : // This unique pointer owns the instance of bounded code allocator
1519 : // that controls executable pages allocation. It does not control the
1520 : // optionally existing page in the beginning of the |code_range_|.
1521 : // So, summarizing all above, the following conditions hold:
1522 : // 1) |heap_reservation_| >= |code_range_|
1523 : // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
1524 : // 3) |heap_reservation_| is AllocatePageSize()-aligned
1525 : // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
1526 : // 5) |code_range_| is CommitPageSize()-aligned
1527 : std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
1528 :
1529 : // Maximum space size in bytes.
1530 : size_t capacity_;
1531 :
1532 : // Allocated space size in bytes.
1533 : std::atomic<size_t> size_;
1534 : // Allocated executable space size in bytes.
1535 : std::atomic<size_t> size_executable_;
1536 :
1537 : // We keep the lowest and highest addresses allocated as a quick way
1538 : // of determining that pointers are outside the heap. The estimate is
1539 : // conservative, i.e. not all addresses in 'allocated' space are allocated
1540 : // to our heap. The range is [lowest, highest[, inclusive on the low end
1541 : // and exclusive on the high end.
1542 : std::atomic<Address> lowest_ever_allocated_;
1543 : std::atomic<Address> highest_ever_allocated_;
1544 :
1545 : VirtualMemory last_chunk_;
1546 : Unmapper unmapper_;
1547 :
1548 : // Data structure to remember allocated executable memory chunks.
1549 : std::unordered_set<MemoryChunk*> executable_memory_;
1550 :
1551 : friend class heap::TestCodePageAllocatorScope;
1552 :
1553 : DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1554 : };
1555 :
1556 : extern template Page*
1557 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1558 : size_t size, PagedSpace* owner, Executability executable);
1559 : extern template Page*
1560 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1561 : size_t size, SemiSpace* owner, Executability executable);
1562 : extern template Page*
1563 : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1564 : size_t size, SemiSpace* owner, Executability executable);
1565 :
1566 : // -----------------------------------------------------------------------------
1567 : // Interface for heap object iterator to be implemented by all object space
1568 : // object iterators.
1569 : //
1570 : // NOTE: The space specific object iterators also implements the own next()
1571 : // method which is used to avoid using virtual functions
1572 : // iterating a specific space.
1573 :
1574 63014 : class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1575 : public:
1576 63008 : virtual ~ObjectIterator() = default;
1577 : virtual HeapObject Next() = 0;
1578 : };
1579 :
1580 : template <class PAGE_TYPE>
1581 : class PageIteratorImpl
1582 : : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1583 : public:
1584 73718 : explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
1585 : PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1586 : PAGE_TYPE* operator*() { return p_; }
1587 : bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1588 85641 : return rhs.p_ == p_;
1589 : }
1590 : bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1591 730783 : return rhs.p_ != p_;
1592 : }
1593 : inline PageIteratorImpl<PAGE_TYPE>& operator++();
1594 : inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1595 :
1596 : private:
1597 : PAGE_TYPE* p_;
1598 : };
1599 :
1600 : typedef PageIteratorImpl<Page> PageIterator;
1601 : typedef PageIteratorImpl<LargePage> LargePageIterator;
1602 :
1603 : class PageRange {
1604 : public:
1605 : typedef PageIterator iterator;
1606 31734 : PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1607 : explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1608 : inline PageRange(Address start, Address limit);
1609 :
1610 : iterator begin() { return iterator(begin_); }
1611 : iterator end() { return iterator(end_); }
1612 :
1613 : private:
1614 : Page* begin_;
1615 : Page* end_;
1616 : };
1617 :
1618 : // -----------------------------------------------------------------------------
1619 : // Heap object iterator in new/old/map spaces.
1620 : //
1621 : // A HeapObjectIterator iterates objects from the bottom of the given space
1622 : // to its top or from the bottom of the given page to its top.
1623 : //
1624 : // If objects are allocated in the page during iteration the iterator may
1625 : // or may not iterate over those objects. The caller must create a new
1626 : // iterator in order to be sure to visit these new objects.
1627 94736 : class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1628 : public:
1629 : // Creates a new object iterator in a given space.
1630 : explicit HeapObjectIterator(PagedSpace* space);
1631 : explicit HeapObjectIterator(Page* page);
1632 :
1633 : // Advance to the next object, skipping free spaces and other fillers and
1634 : // skipping the special garbage section of which there is one per space.
1635 : // Returns nullptr when the iteration has ended.
1636 : inline HeapObject Next() override;
1637 :
1638 : private:
1639 : // Fast (inlined) path of next().
1640 : inline HeapObject FromCurrentPage();
1641 :
1642 : // Slow path of next(), goes into the next page. Returns false if the
1643 : // iteration has ended.
1644 : bool AdvanceToNextPage();
1645 :
1646 : Address cur_addr_; // Current iteration point.
1647 : Address cur_end_; // End iteration point.
1648 : PagedSpace* space_;
1649 : PageRange page_range_;
1650 : PageRange::iterator current_page_;
1651 : };
1652 :
1653 :
1654 : // -----------------------------------------------------------------------------
1655 : // A space has a circular list of pages. The next page can be accessed via
1656 : // Page::next_page() call.
1657 :
1658 : // An abstraction of allocation and relocation pointers in a page-structured
1659 : // space.
1660 : class LinearAllocationArea {
1661 : public:
1662 677829 : LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
1663 336300 : LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1664 :
1665 : void Reset(Address top, Address limit) {
1666 : set_top(top);
1667 : set_limit(limit);
1668 : }
1669 :
1670 : V8_INLINE void set_top(Address top) {
1671 : SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1672 508912075 : top_ = top;
1673 : }
1674 :
1675 : V8_INLINE Address top() const {
1676 : SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1677 1047865294 : return top_;
1678 : }
1679 :
1680 393788 : Address* top_address() { return &top_; }
1681 :
1682 3771108 : V8_INLINE void set_limit(Address limit) { limit_ = limit; }
1683 :
1684 576210963 : V8_INLINE Address limit() const { return limit_; }
1685 :
1686 383510 : Address* limit_address() { return &limit_; }
1687 :
1688 : #ifdef DEBUG
1689 : bool VerifyPagedAllocation() {
1690 : return (Page::FromAllocationAreaAddress(top_) ==
1691 : Page::FromAllocationAreaAddress(limit_)) &&
1692 : (top_ <= limit_);
1693 : }
1694 : #endif
1695 :
1696 : private:
1697 : // Current allocation top.
1698 : Address top_;
1699 : // Current allocation limit.
1700 : Address limit_;
1701 : };
1702 :
1703 :
1704 : // An abstraction of the accounting statistics of a page-structured space.
1705 : //
1706 : // The stats are only set by functions that ensure they stay balanced. These
1707 : // functions increase or decrease one of the non-capacity stats in conjunction
1708 : // with capacity, or else they always balance increases and decreases to the
1709 : // non-capacity stats.
1710 : class AllocationStats {
1711 : public:
1712 : AllocationStats() { Clear(); }
1713 :
1714 : // Zero out all the allocation statistics (i.e., no capacity).
1715 : void Clear() {
1716 : capacity_ = 0;
1717 1404666 : max_capacity_ = 0;
1718 : ClearSize();
1719 : }
1720 :
1721 : void ClearSize() {
1722 1626531 : size_ = 0;
1723 : #ifdef DEBUG
1724 : allocated_on_page_.clear();
1725 : #endif
1726 : }
1727 :
1728 : // Accessors for the allocation statistics.
1729 : size_t Capacity() { return capacity_; }
1730 : size_t MaxCapacity() { return max_capacity_; }
1731 : size_t Size() { return size_; }
1732 : #ifdef DEBUG
1733 : size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1734 : #endif
1735 :
1736 : void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1737 : DCHECK_GE(size_ + bytes, size_);
1738 2296115 : size_ += bytes;
1739 : #ifdef DEBUG
1740 : allocated_on_page_[page] += bytes;
1741 : #endif
1742 : }
1743 :
1744 : void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1745 : DCHECK_GE(size_, bytes);
1746 1602881 : size_ -= bytes;
1747 : #ifdef DEBUG
1748 : DCHECK_GE(allocated_on_page_[page], bytes);
1749 : allocated_on_page_[page] -= bytes;
1750 : #endif
1751 : }
1752 :
1753 : void DecreaseCapacity(size_t bytes) {
1754 : DCHECK_GE(capacity_, bytes);
1755 : DCHECK_GE(capacity_ - bytes, size_);
1756 : capacity_ -= bytes;
1757 : }
1758 :
1759 553187 : void IncreaseCapacity(size_t bytes) {
1760 : DCHECK_GE(capacity_ + bytes, capacity_);
1761 : capacity_ += bytes;
1762 553187 : if (capacity_ > max_capacity_) {
1763 492111 : max_capacity_ = capacity_;
1764 : }
1765 553187 : }
1766 :
1767 : private:
1768 : // |capacity_|: The number of object-area bytes (i.e., not including page
1769 : // bookkeeping structures) currently in the space.
1770 : // During evacuation capacity of the main spaces is accessed from multiple
1771 : // threads to check the old generation hard limit.
1772 : std::atomic<size_t> capacity_;
1773 :
1774 : // |max_capacity_|: The maximum capacity ever observed.
1775 : size_t max_capacity_;
1776 :
1777 : // |size_|: The number of allocated bytes.
1778 : size_t size_;
1779 :
1780 : #ifdef DEBUG
1781 : std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1782 : #endif
1783 : };
1784 :
1785 : // A free list maintaining free blocks of memory. The free list is organized in
1786 : // a way to encourage objects allocated around the same time to be near each
1787 : // other. The normal way to allocate is intended to be by bumping a 'top'
1788 : // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1789 : // find a new space to allocate from. This is done with the free list, which is
1790 : // divided up into rough categories to cut down on waste. Having finer
1791 : // categories would scatter allocation more.
1792 :
1793 : // The free list is organized in categories as follows:
1794 : // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1795 : // allocation, when categories >= small do not have entries anymore.
1796 : // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1797 : // categories >= small do not have entries anymore.
1798 : // 32-255 words (small): Used for allocating free space between 1-31 words in
1799 : // size.
1800 : // 256-2047 words (medium): Used for allocating free space between 32-255 words
1801 : // in size.
1802 : // 1048-16383 words (large): Used for allocating free space between 256-2047
1803 : // words in size.
1804 : // At least 16384 words (huge): This list is for objects of 2048 words or
1805 : // larger. Empty pages are also added to this list.
1806 : class V8_EXPORT_PRIVATE FreeList {
1807 : public:
1808 : // This method returns how much memory can be allocated after freeing
1809 : // maximum_freed memory.
1810 : static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1811 469552 : if (maximum_freed <= kTiniestListMax) {
1812 : // Since we are not iterating over all list entries, we cannot guarantee
1813 : // that we can find the maximum freed block in that free list.
1814 : return 0;
1815 451413 : } else if (maximum_freed <= kTinyListMax) {
1816 : return kTinyAllocationMax;
1817 431258 : } else if (maximum_freed <= kSmallListMax) {
1818 : return kSmallAllocationMax;
1819 396453 : } else if (maximum_freed <= kMediumListMax) {
1820 : return kMediumAllocationMax;
1821 235943 : } else if (maximum_freed <= kLargeListMax) {
1822 : return kLargeAllocationMax;
1823 : }
1824 : return maximum_freed;
1825 : }
1826 :
1827 : static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1828 21136953 : if (size_in_bytes <= kTiniestListMax) {
1829 : return kTiniest;
1830 11121495 : } else if (size_in_bytes <= kTinyListMax) {
1831 : return kTiny;
1832 4959707 : } else if (size_in_bytes <= kSmallListMax) {
1833 : return kSmall;
1834 1929004 : } else if (size_in_bytes <= kMediumListMax) {
1835 : return kMedium;
1836 1321678 : } else if (size_in_bytes <= kLargeListMax) {
1837 : return kLarge;
1838 : }
1839 : return kHuge;
1840 : }
1841 :
1842 : FreeList();
1843 :
1844 : // Adds a node on the free list. The block of size {size_in_bytes} starting
1845 : // at {start} is placed on the free list. The return value is the number of
1846 : // bytes that were not added to the free list, because they freed memory block
1847 : // was too small. Bookkeeping information will be written to the block, i.e.,
1848 : // its contents will be destroyed. The start address should be word aligned,
1849 : // and the size should be a non-zero multiple of the word size.
1850 : size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1851 :
1852 : // Allocates a free space node frome the free list of at least size_in_bytes
1853 : // bytes. Returns the actual node size in node_size which can be bigger than
1854 : // size_in_bytes. This method returns null if the allocation request cannot be
1855 : // handled by the free list.
1856 : V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
1857 : size_t* node_size);
1858 :
1859 : // Clear the free list.
1860 : void Reset();
1861 :
1862 690107 : void ResetStats() {
1863 : wasted_bytes_ = 0;
1864 : ForAllFreeListCategories(
1865 221865 : [](FreeListCategory* category) { category->ResetStats(); });
1866 690107 : }
1867 :
1868 : // Return the number of bytes available on the free list.
1869 : size_t Available() {
1870 : size_t available = 0;
1871 : ForAllFreeListCategories([&available](FreeListCategory* category) {
1872 1363864 : available += category->available();
1873 : });
1874 : return available;
1875 : }
1876 :
1877 : bool IsEmpty() {
1878 : bool empty = true;
1879 : ForAllFreeListCategories([&empty](FreeListCategory* category) {
1880 : if (!category->is_empty()) empty = false;
1881 : });
1882 : return empty;
1883 : }
1884 :
1885 : // Used after booting the VM.
1886 : void RepairLists(Heap* heap);
1887 :
1888 : size_t EvictFreeListItems(Page* page);
1889 : bool ContainsPageFreeListItems(Page* page);
1890 :
1891 : size_t wasted_bytes() { return wasted_bytes_; }
1892 :
1893 : template <typename Callback>
1894 : void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1895 15114444 : FreeListCategory* current = categories_[type];
1896 17575913 : while (current != nullptr) {
1897 : FreeListCategory* next = current->next();
1898 : callback(current);
1899 : current = next;
1900 : }
1901 : }
1902 :
1903 : template <typename Callback>
1904 283343 : void ForAllFreeListCategories(Callback callback) {
1905 32747962 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1906 15114444 : ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1907 : }
1908 283343 : }
1909 :
1910 : bool AddCategory(FreeListCategory* category);
1911 : void RemoveCategory(FreeListCategory* category);
1912 : void PrintCategories(FreeListCategoryType type);
1913 :
1914 : // Returns a page containing an entry for a given type, or nullptr otherwise.
1915 : inline Page* GetPageForCategoryType(FreeListCategoryType type);
1916 :
1917 : #ifdef DEBUG
1918 : size_t SumFreeLists();
1919 : bool IsVeryLong();
1920 : #endif
1921 :
1922 : private:
1923 : class FreeListCategoryIterator {
1924 : public:
1925 : FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1926 4219773 : : current_(free_list->categories_[type]) {}
1927 :
1928 : bool HasNext() { return current_ != nullptr; }
1929 :
1930 : FreeListCategory* Next() {
1931 : DCHECK(HasNext());
1932 : FreeListCategory* tmp = current_;
1933 : current_ = current_->next();
1934 : return tmp;
1935 : }
1936 :
1937 : private:
1938 : FreeListCategory* current_;
1939 : };
1940 :
1941 : // The size range of blocks, in bytes.
1942 : static const size_t kMinBlockSize = 3 * kTaggedSize;
1943 :
1944 : // This is a conservative upper bound. The actual maximum block size takes
1945 : // padding and alignment of data and code pages into account.
1946 : static const size_t kMaxBlockSize = Page::kPageSize;
1947 :
1948 : static const size_t kTiniestListMax = 0xa * kTaggedSize;
1949 : static const size_t kTinyListMax = 0x1f * kTaggedSize;
1950 : static const size_t kSmallListMax = 0xff * kTaggedSize;
1951 : static const size_t kMediumListMax = 0x7ff * kTaggedSize;
1952 : static const size_t kLargeListMax = 0x1fff * kTaggedSize;
1953 : static const size_t kTinyAllocationMax = kTiniestListMax;
1954 : static const size_t kSmallAllocationMax = kTinyListMax;
1955 : static const size_t kMediumAllocationMax = kSmallListMax;
1956 : static const size_t kLargeAllocationMax = kMediumListMax;
1957 :
1958 : // Walks all available categories for a given |type| and tries to retrieve
1959 : // a node. Returns nullptr if the category is empty.
1960 : FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
1961 : size_t* node_size);
1962 :
1963 : // Tries to retrieve a node from the first category in a given |type|.
1964 : // Returns nullptr if the category is empty or the top entry is smaller
1965 : // than minimum_size.
1966 : FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
1967 : size_t* node_size);
1968 :
1969 : // Searches a given |type| for a node of at least |minimum_size|.
1970 : FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1971 : size_t minimum_size);
1972 :
1973 : // The tiny categories are not used for fast allocation.
1974 : FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1975 : size_t size_in_bytes) {
1976 1853598 : if (size_in_bytes <= kSmallAllocationMax) {
1977 : return kSmall;
1978 645499 : } else if (size_in_bytes <= kMediumAllocationMax) {
1979 : return kMedium;
1980 526315 : } else if (size_in_bytes <= kLargeAllocationMax) {
1981 : return kLarge;
1982 : }
1983 : return kHuge;
1984 : }
1985 :
1986 : FreeListCategory* top(FreeListCategoryType type) const {
1987 127885 : return categories_[type];
1988 : }
1989 :
1990 : std::atomic<size_t> wasted_bytes_;
1991 : FreeListCategory* categories_[kNumberOfCategories];
1992 :
1993 : friend class FreeListCategory;
1994 : };
1995 :
1996 : // LocalAllocationBuffer represents a linear allocation area that is created
1997 : // from a given {AllocationResult} and can be used to allocate memory without
1998 : // synchronization.
1999 : //
2000 : // The buffer is properly closed upon destruction and reassignment.
2001 : // Example:
2002 : // {
2003 : // AllocationResult result = ...;
2004 : // LocalAllocationBuffer a(heap, result, size);
2005 : // LocalAllocationBuffer b = a;
2006 : // CHECK(!a.IsValid());
2007 : // CHECK(b.IsValid());
2008 : // // {a} is invalid now and cannot be used for further allocations.
2009 : // }
2010 : // // Since {b} went out of scope, the LAB is closed, resulting in creating a
2011 : // // filler object for the remaining area.
2012 : class LocalAllocationBuffer {
2013 : public:
2014 : // Indicates that a buffer cannot be used for allocations anymore. Can result
2015 : // from either reassigning a buffer, or trying to construct it from an
2016 : // invalid {AllocationResult}.
2017 : static LocalAllocationBuffer InvalidBuffer() {
2018 : return LocalAllocationBuffer(
2019 188380 : nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
2020 : }
2021 :
2022 : // Creates a new LAB from a given {AllocationResult}. Results in
2023 : // InvalidBuffer if the result indicates a retry.
2024 : static inline LocalAllocationBuffer FromResult(Heap* heap,
2025 : AllocationResult result,
2026 : intptr_t size);
2027 :
2028 484355 : ~LocalAllocationBuffer() { Close(); }
2029 :
2030 : // Convert to C++11 move-semantics once allowed by the style guide.
2031 : LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
2032 : LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
2033 : V8_NOEXCEPT;
2034 :
2035 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2036 : int size_in_bytes, AllocationAlignment alignment);
2037 :
2038 10 : inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
2039 :
2040 : // Try to merge LABs, which is only possible when they are adjacent in memory.
2041 : // Returns true if the merge was successful, false otherwise.
2042 : inline bool TryMerge(LocalAllocationBuffer* other);
2043 :
2044 : inline bool TryFreeLast(HeapObject object, int object_size);
2045 :
2046 : // Close a LAB, effectively invalidating it. Returns the unused area.
2047 : LinearAllocationArea Close();
2048 :
2049 : private:
2050 : LocalAllocationBuffer(Heap* heap,
2051 : LinearAllocationArea allocation_info) V8_NOEXCEPT;
2052 :
2053 : Heap* heap_;
2054 : LinearAllocationArea allocation_info_;
2055 : };
2056 :
2057 529705 : class SpaceWithLinearArea : public Space {
2058 : public:
2059 : SpaceWithLinearArea(Heap* heap, AllocationSpace id)
2060 1059563 : : Space(heap, id), top_on_previous_step_(0) {
2061 : allocation_info_.Reset(kNullAddress, kNullAddress);
2062 : }
2063 :
2064 : virtual bool SupportsInlineAllocation() = 0;
2065 :
2066 : // Returns the allocation pointer in this space.
2067 0 : Address top() { return allocation_info_.top(); }
2068 : Address limit() { return allocation_info_.limit(); }
2069 :
2070 : // The allocation top address.
2071 : Address* allocation_top_address() { return allocation_info_.top_address(); }
2072 :
2073 : // The allocation limit address.
2074 : Address* allocation_limit_address() {
2075 : return allocation_info_.limit_address();
2076 : }
2077 :
2078 : V8_EXPORT_PRIVATE void AddAllocationObserver(
2079 : AllocationObserver* observer) override;
2080 : V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2081 : AllocationObserver* observer) override;
2082 : V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2083 : V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2084 :
2085 : // When allocation observers are active we may use a lower limit to allow the
2086 : // observers to 'interrupt' earlier than the natural limit. Given a linear
2087 : // area bounded by [start, end), this function computes the limit to use to
2088 : // allow proper observation based on existing observers. min_size specifies
2089 : // the minimum size that the limited area should have.
2090 : Address ComputeLimit(Address start, Address end, size_t min_size);
2091 : V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2092 : size_t min_size) = 0;
2093 :
2094 : protected:
2095 : // If we are doing inline allocation in steps, this method performs the 'step'
2096 : // operation. top is the memory address of the bump pointer at the last
2097 : // inline allocation (i.e. it determines the numbers of bytes actually
2098 : // allocated since the last step.) top_for_next_step is the address of the
2099 : // bump pointer where the next byte is going to be allocated from. top and
2100 : // top_for_next_step may be different when we cross a page boundary or reset
2101 : // the space.
2102 : // TODO(ofrobots): clarify the precise difference between this and
2103 : // Space::AllocationStep.
2104 : void InlineAllocationStep(Address top, Address top_for_next_step,
2105 : Address soon_object, size_t size);
2106 : V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2107 :
2108 : // TODO(ofrobots): make these private after refactoring is complete.
2109 : LinearAllocationArea allocation_info_;
2110 : Address top_on_previous_step_;
2111 : };
2112 :
2113 : class V8_EXPORT_PRIVATE PagedSpace
2114 : : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2115 : public:
2116 : typedef PageIterator iterator;
2117 :
2118 : static const size_t kCompactionMemoryWanted = 500 * KB;
2119 :
2120 : // Creates a space with an id.
2121 : PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2122 :
2123 936363 : ~PagedSpace() override { TearDown(); }
2124 :
2125 : // Checks whether an object/address is in this space.
2126 : inline bool Contains(Address a);
2127 : inline bool Contains(Object o);
2128 : bool ContainsSlow(Address addr);
2129 :
2130 : // Does the space need executable memory?
2131 : Executability executable() { return executable_; }
2132 :
2133 : // Prepares for a mark-compact GC.
2134 : void PrepareForMarkCompact();
2135 :
2136 : // Current capacity without growing (Size() + Available()).
2137 : size_t Capacity() { return accounting_stats_.Capacity(); }
2138 :
2139 : // Approximate amount of physical memory committed for this space.
2140 : size_t CommittedPhysicalMemory() override;
2141 :
2142 : void ResetFreeListStatistics();
2143 :
2144 : // Sets the capacity, the available space and the wasted space to zero.
2145 : // The stats are rebuilt during sweeping by adding each page to the
2146 : // capacity and the size when it is encountered. As free spaces are
2147 : // discovered during the sweeping they are subtracted from the size and added
2148 : // to the available and wasted totals.
2149 221865 : void ClearStats() {
2150 : accounting_stats_.ClearSize();
2151 221865 : free_list_.ResetStats();
2152 221865 : ResetFreeListStatistics();
2153 221865 : }
2154 :
2155 : // Available bytes without growing. These are the bytes on the free list.
2156 : // The bytes in the linear allocation area are not included in this total
2157 : // because updating the stats would slow down allocation. New pages are
2158 : // immediately added to the free list so they show up here.
2159 1711034 : size_t Available() override { return free_list_.Available(); }
2160 :
2161 : // Allocated bytes in this space. Garbage bytes that were not found due to
2162 : // concurrent sweeping are counted as being allocated! The bytes in the
2163 : // current linear allocation area (between top and limit) are also counted
2164 : // here.
2165 10431150 : size_t Size() override { return accounting_stats_.Size(); }
2166 :
2167 : // As size, but the bytes in lazily swept pages are estimated and the bytes
2168 : // in the current linear allocation area are not included.
2169 : size_t SizeOfObjects() override;
2170 :
2171 : // Wasted bytes in this space. These are just the bytes that were thrown away
2172 : // due to being too small to use for allocation.
2173 1138896 : virtual size_t Waste() { return free_list_.wasted_bytes(); }
2174 :
2175 : enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2176 :
2177 : // Allocate the requested number of bytes in the space if possible, return a
2178 : // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2179 : // to be manually updated later.
2180 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2181 : int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2182 :
2183 : // Allocate the requested number of bytes in the space double aligned if
2184 : // possible, return a failure object if not.
2185 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2186 : int size_in_bytes, AllocationAlignment alignment);
2187 :
2188 : // Allocate the requested number of bytes in the space and consider allocation
2189 : // alignment if needed.
2190 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2191 : int size_in_bytes, AllocationAlignment alignment);
2192 :
2193 21470441 : size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2194 21470441 : if (size_in_bytes == 0) return 0;
2195 : heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2196 21123998 : ClearRecordedSlots::kNo);
2197 21111755 : if (mode == SpaceAccountingMode::kSpaceAccounted) {
2198 1461823 : return AccountedFree(start, size_in_bytes);
2199 : } else {
2200 19635282 : return UnaccountedFree(start, size_in_bytes);
2201 : }
2202 : }
2203 :
2204 : // Give a block of memory to the space's free list. It might be added to
2205 : // the free list or accounted as waste.
2206 : // If add_to_freelist is false then just accounting stats are updated and
2207 : // no attempt to add area to free list is made.
2208 : size_t AccountedFree(Address start, size_t size_in_bytes) {
2209 1461829 : size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2210 : Page* page = Page::FromAddress(start);
2211 : accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2212 : DCHECK_GE(size_in_bytes, wasted);
2213 1461823 : return size_in_bytes - wasted;
2214 : }
2215 :
2216 : size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2217 19649926 : size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2218 : DCHECK_GE(size_in_bytes, wasted);
2219 19635282 : return size_in_bytes - wasted;
2220 : }
2221 :
2222 : inline bool TryFreeLast(HeapObject object, int object_size);
2223 :
2224 : void ResetFreeList();
2225 :
2226 : // Empty space linear allocation area, returning unused area to free list.
2227 : void FreeLinearAllocationArea();
2228 :
2229 : void MarkLinearAllocationAreaBlack();
2230 : void UnmarkLinearAllocationArea();
2231 :
2232 : void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2233 : accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2234 : }
2235 : void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2236 : accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2237 : }
2238 : void DecreaseCapacity(size_t bytes) {
2239 : accounting_stats_.DecreaseCapacity(bytes);
2240 : }
2241 : void IncreaseCapacity(size_t bytes) {
2242 553188 : accounting_stats_.IncreaseCapacity(bytes);
2243 : }
2244 :
2245 : void RefineAllocatedBytesAfterSweeping(Page* page);
2246 :
2247 : Page* InitializePage(MemoryChunk* chunk, Executability executable);
2248 :
2249 : void ReleasePage(Page* page);
2250 :
2251 : // Adds the page to this space and returns the number of bytes added to the
2252 : // free list of the space.
2253 : size_t AddPage(Page* page);
2254 : void RemovePage(Page* page);
2255 : // Remove a page if it has at least |size_in_bytes| bytes available that can
2256 : // be used for allocation.
2257 : Page* RemovePageSafe(int size_in_bytes);
2258 :
2259 : void SetReadable();
2260 : void SetReadAndExecutable();
2261 : void SetReadAndWritable();
2262 :
2263 285798 : void SetDefaultCodePermissions() {
2264 285798 : if (FLAG_jitless) {
2265 14985 : SetReadable();
2266 : } else {
2267 270813 : SetReadAndExecutable();
2268 : }
2269 285799 : }
2270 :
2271 : #ifdef VERIFY_HEAP
2272 : // Verify integrity of this space.
2273 : virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
2274 :
2275 : void VerifyLiveBytes();
2276 :
2277 : // Overridden by subclasses to verify space-specific object
2278 : // properties (e.g., only maps or free-list nodes are in map space).
2279 : virtual void VerifyObject(HeapObject obj) {}
2280 : #endif
2281 :
2282 : #ifdef DEBUG
2283 : void VerifyCountersAfterSweeping();
2284 : void VerifyCountersBeforeConcurrentSweeping();
2285 : // Print meta info and objects in this space.
2286 : void Print() override;
2287 :
2288 : // Report code object related statistics
2289 : static void ReportCodeStatistics(Isolate* isolate);
2290 : static void ResetCodeStatistics(Isolate* isolate);
2291 : #endif
2292 :
2293 : bool CanExpand(size_t size);
2294 :
2295 : // Returns the number of total pages in this space.
2296 : int CountTotalPages();
2297 :
2298 : // Return size of allocatable area on a page in this space.
2299 2507592 : inline int AreaSize() { return static_cast<int>(area_size_); }
2300 :
2301 149745893 : virtual bool is_local() { return false; }
2302 :
2303 : // Merges {other} into the current space. Note that this modifies {other},
2304 : // e.g., removes its bump pointer area and resets statistics.
2305 : void MergeCompactionSpace(CompactionSpace* other);
2306 :
2307 : // Refills the free list from the corresponding free list filled by the
2308 : // sweeper.
2309 : virtual void RefillFreeList();
2310 :
2311 2557361 : FreeList* free_list() { return &free_list_; }
2312 :
2313 1164128 : base::Mutex* mutex() { return &space_mutex_; }
2314 :
2315 : inline void UnlinkFreeListCategories(Page* page);
2316 : inline size_t RelinkFreeListCategories(Page* page);
2317 :
2318 : Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2319 :
2320 : iterator begin() { return iterator(first_page()); }
2321 : iterator end() { return iterator(nullptr); }
2322 :
2323 : // Shrink immortal immovable pages of the space to be exactly the size needed
2324 : // using the high water mark.
2325 : void ShrinkImmortalImmovablePages();
2326 :
2327 : size_t ShrinkPageToHighWaterMark(Page* page);
2328 :
2329 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2330 :
2331 : void SetLinearAllocationArea(Address top, Address limit);
2332 :
2333 : private:
2334 : // Set space linear allocation area.
2335 : void SetTopAndLimit(Address top, Address limit) {
2336 : DCHECK(top == limit ||
2337 : Page::FromAddress(top) == Page::FromAddress(limit - 1));
2338 2452344 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2339 : allocation_info_.Reset(top, limit);
2340 : }
2341 : void DecreaseLimit(Address new_limit);
2342 : void UpdateInlineAllocationLimit(size_t min_size) override;
2343 23221167 : bool SupportsInlineAllocation() override {
2344 23221167 : return identity() == OLD_SPACE && !is_local();
2345 : }
2346 :
2347 : protected:
2348 : // PagedSpaces that should be included in snapshots have different, i.e.,
2349 : // smaller, initial pages.
2350 0 : virtual bool snapshotable() { return true; }
2351 :
2352 : bool HasPages() { return first_page() != nullptr; }
2353 :
2354 : // Cleans up the space, frees all pages in this space except those belonging
2355 : // to the initial chunk, uncommits addresses in the initial chunk.
2356 : void TearDown();
2357 :
2358 : // Expands the space by allocating a fixed number of pages. Returns false if
2359 : // it cannot allocate requested number of pages from OS, or if the hard heap
2360 : // size limit has been hit.
2361 : bool Expand();
2362 :
2363 : // Sets up a linear allocation area that fits the given number of bytes.
2364 : // Returns false if there is not enough space and the caller has to retry
2365 : // after collecting garbage.
2366 : inline bool EnsureLinearAllocationArea(int size_in_bytes);
2367 : // Allocates an object from the linear allocation area. Assumes that the
2368 : // linear allocation area is large enought to fit the object.
2369 : inline HeapObject AllocateLinearly(int size_in_bytes);
2370 : // Tries to allocate an aligned object from the linear allocation area.
2371 : // Returns nullptr if the linear allocation area does not fit the object.
2372 : // Otherwise, returns the object pointer and writes the allocation size
2373 : // (object size + alignment filler size) to the size_in_bytes.
2374 : inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
2375 : AllocationAlignment alignment);
2376 :
2377 : V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2378 : size_t size_in_bytes);
2379 :
2380 : // If sweeping is still in progress try to sweep unswept pages. If that is
2381 : // not successful, wait for the sweeper threads and retry free-list
2382 : // allocation. Returns false if there is not enough space and the caller
2383 : // has to retry after collecting garbage.
2384 : V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2385 :
2386 : // Slow path of AllocateRaw. This function is space-dependent. Returns false
2387 : // if there is not enough space and the caller has to retry after
2388 : // collecting garbage.
2389 : V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2390 : int size_in_bytes);
2391 :
2392 : // Implementation of SlowAllocateRaw. Returns false if there is not enough
2393 : // space and the caller has to retry after collecting garbage.
2394 : V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2395 : int size_in_bytes);
2396 :
2397 : Executability executable_;
2398 :
2399 : size_t area_size_;
2400 :
2401 : // Accounting information for this space.
2402 : AllocationStats accounting_stats_;
2403 :
2404 : // The space's free list.
2405 : FreeList free_list_;
2406 :
2407 : // Mutex guarding any concurrent access to the space.
2408 : base::Mutex space_mutex_;
2409 :
2410 : friend class IncrementalMarking;
2411 : friend class MarkCompactCollector;
2412 :
2413 : // Used in cctest.
2414 : friend class heap::HeapTester;
2415 : };
2416 :
2417 : enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2418 :
2419 : // -----------------------------------------------------------------------------
2420 : // SemiSpace in young generation
2421 : //
2422 : // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2423 : // The mark-compact collector uses the memory of the first page in the from
2424 : // space as a marking stack when tracing live objects.
2425 61523 : class SemiSpace : public Space {
2426 : public:
2427 : typedef PageIterator iterator;
2428 :
2429 : static void Swap(SemiSpace* from, SemiSpace* to);
2430 :
2431 : SemiSpace(Heap* heap, SemiSpaceId semispace)
2432 : : Space(heap, NEW_SPACE),
2433 : current_capacity_(0),
2434 : maximum_capacity_(0),
2435 : minimum_capacity_(0),
2436 : age_mark_(kNullAddress),
2437 : committed_(false),
2438 : id_(semispace),
2439 : current_page_(nullptr),
2440 61539 : pages_used_(0) {}
2441 :
2442 : inline bool Contains(HeapObject o);
2443 : inline bool Contains(Object o);
2444 : inline bool ContainsSlow(Address a);
2445 :
2446 : void SetUp(size_t initial_capacity, size_t maximum_capacity);
2447 : void TearDown();
2448 :
2449 : bool Commit();
2450 : bool Uncommit();
2451 : bool is_committed() { return committed_; }
2452 :
2453 : // Grow the semispace to the new capacity. The new capacity requested must
2454 : // be larger than the current capacity and less than the maximum capacity.
2455 : bool GrowTo(size_t new_capacity);
2456 :
2457 : // Shrinks the semispace to the new capacity. The new capacity requested
2458 : // must be more than the amount of used memory in the semispace and less
2459 : // than the current capacity.
2460 : bool ShrinkTo(size_t new_capacity);
2461 :
2462 : bool EnsureCurrentCapacity();
2463 :
2464 : Address space_end() { return memory_chunk_list_.back()->area_end(); }
2465 :
2466 : // Returns the start address of the first page of the space.
2467 : Address space_start() {
2468 : DCHECK_NE(memory_chunk_list_.front(), nullptr);
2469 : return memory_chunk_list_.front()->area_start();
2470 : }
2471 :
2472 : Page* current_page() { return current_page_; }
2473 : int pages_used() { return pages_used_; }
2474 :
2475 : // Returns the start address of the current page of the space.
2476 : Address page_low() { return current_page_->area_start(); }
2477 :
2478 : // Returns one past the end address of the current page of the space.
2479 : Address page_high() { return current_page_->area_end(); }
2480 :
2481 : bool AdvancePage() {
2482 113524 : Page* next_page = current_page_->next_page();
2483 : // We cannot expand if we reached the maximum number of pages already. Note
2484 : // that we need to account for the next page already for this check as we
2485 : // could potentially fill the whole page after advancing.
2486 227048 : const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2487 113524 : if (next_page == nullptr || reached_max_pages) {
2488 : return false;
2489 : }
2490 98011 : current_page_ = next_page;
2491 98011 : pages_used_++;
2492 : return true;
2493 : }
2494 :
2495 : // Resets the space to using the first page.
2496 : void Reset();
2497 :
2498 : void RemovePage(Page* page);
2499 : void PrependPage(Page* page);
2500 :
2501 : Page* InitializePage(MemoryChunk* chunk, Executability executable);
2502 :
2503 : // Age mark accessors.
2504 : Address age_mark() { return age_mark_; }
2505 : void set_age_mark(Address mark);
2506 :
2507 : // Returns the current capacity of the semispace.
2508 : size_t current_capacity() { return current_capacity_; }
2509 :
2510 : // Returns the maximum capacity of the semispace.
2511 : size_t maximum_capacity() { return maximum_capacity_; }
2512 :
2513 : // Returns the initial capacity of the semispace.
2514 : size_t minimum_capacity() { return minimum_capacity_; }
2515 :
2516 : SemiSpaceId id() { return id_; }
2517 :
2518 : // Approximate amount of physical memory committed for this space.
2519 : size_t CommittedPhysicalMemory() override;
2520 :
2521 : // If we don't have these here then SemiSpace will be abstract. However
2522 : // they should never be called:
2523 :
2524 0 : size_t Size() override {
2525 0 : UNREACHABLE();
2526 : }
2527 :
2528 0 : size_t SizeOfObjects() override { return Size(); }
2529 :
2530 0 : size_t Available() override {
2531 0 : UNREACHABLE();
2532 : }
2533 :
2534 : Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2535 : Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
2536 :
2537 : iterator begin() { return iterator(first_page()); }
2538 : iterator end() { return iterator(nullptr); }
2539 :
2540 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2541 :
2542 : #ifdef DEBUG
2543 : void Print() override;
2544 : // Validate a range of of addresses in a SemiSpace.
2545 : // The "from" address must be on a page prior to the "to" address,
2546 : // in the linked page order, or it must be earlier on the same page.
2547 : static void AssertValidRange(Address from, Address to);
2548 : #else
2549 : // Do nothing.
2550 : inline static void AssertValidRange(Address from, Address to) {}
2551 : #endif
2552 :
2553 : #ifdef VERIFY_HEAP
2554 : virtual void Verify();
2555 : #endif
2556 :
2557 : private:
2558 : void RewindPages(int num_pages);
2559 :
2560 : inline int max_pages() {
2561 113524 : return static_cast<int>(current_capacity_ / Page::kPageSize);
2562 : }
2563 :
2564 : // Copies the flags into the masked positions on all pages in the space.
2565 : void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2566 :
2567 : // The currently committed space capacity.
2568 : size_t current_capacity_;
2569 :
2570 : // The maximum capacity that can be used by this space. A space cannot grow
2571 : // beyond that size.
2572 : size_t maximum_capacity_;
2573 :
2574 : // The minimum capacity for the space. A space cannot shrink below this size.
2575 : size_t minimum_capacity_;
2576 :
2577 : // Used to govern object promotion during mark-compact collection.
2578 : Address age_mark_;
2579 :
2580 : bool committed_;
2581 : SemiSpaceId id_;
2582 :
2583 : Page* current_page_;
2584 :
2585 : int pages_used_;
2586 :
2587 : friend class NewSpace;
2588 : friend class SemiSpaceIterator;
2589 : };
2590 :
2591 :
2592 : // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2593 : // semispace of the heap's new space. It iterates over the objects in the
2594 : // semispace from a given start address (defaulting to the bottom of the
2595 : // semispace) to the top of the semispace. New objects allocated after the
2596 : // iterator is created are not iterated.
2597 23460 : class SemiSpaceIterator : public ObjectIterator {
2598 : public:
2599 : // Create an iterator over the allocated objects in the given to-space.
2600 : explicit SemiSpaceIterator(NewSpace* space);
2601 :
2602 : inline HeapObject Next() override;
2603 :
2604 : private:
2605 : void Initialize(Address start, Address end);
2606 :
2607 : // The current iteration point.
2608 : Address current_;
2609 : // The end of iteration.
2610 : Address limit_;
2611 : };
2612 :
2613 : // -----------------------------------------------------------------------------
2614 : // The young generation space.
2615 : //
2616 : // The new space consists of a contiguous pair of semispaces. It simply
2617 : // forwards most functions to the appropriate semispace.
2618 :
2619 : class NewSpace : public SpaceWithLinearArea {
2620 : public:
2621 : typedef PageIterator iterator;
2622 :
2623 : NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
2624 : size_t initial_semispace_capacity, size_t max_semispace_capacity);
2625 :
2626 307608 : ~NewSpace() override { TearDown(); }
2627 :
2628 : inline bool ContainsSlow(Address a);
2629 : inline bool Contains(Object o);
2630 : inline bool Contains(HeapObject o);
2631 :
2632 : // Tears down the space. Heap memory was not allocated by the space, so it
2633 : // is not deallocated here.
2634 : void TearDown();
2635 :
2636 : // Flip the pair of spaces.
2637 : void Flip();
2638 :
2639 : // Grow the capacity of the semispaces. Assumes that they are not at
2640 : // their maximum capacity.
2641 : void Grow();
2642 :
2643 : // Shrink the capacity of the semispaces.
2644 : void Shrink();
2645 :
2646 : // Return the allocated bytes in the active semispace.
2647 1040636 : size_t Size() override {
2648 : DCHECK_GE(top(), to_space_.page_low());
2649 2054954 : return to_space_.pages_used() *
2650 1014318 : MemoryChunkLayout::AllocatableMemoryInDataPage() +
2651 1040636 : static_cast<size_t>(top() - to_space_.page_low());
2652 : }
2653 :
2654 726338 : size_t SizeOfObjects() override { return Size(); }
2655 :
2656 : // Return the allocatable capacity of a semispace.
2657 : size_t Capacity() {
2658 : SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2659 587582 : return (to_space_.current_capacity() / Page::kPageSize) *
2660 587582 : MemoryChunkLayout::AllocatableMemoryInDataPage();
2661 : }
2662 :
2663 : // Return the current size of a semispace, allocatable and non-allocatable
2664 : // memory.
2665 : size_t TotalCapacity() {
2666 : DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2667 : return to_space_.current_capacity();
2668 : }
2669 :
2670 : // Committed memory for NewSpace is the committed memory of both semi-spaces
2671 : // combined.
2672 641536 : size_t CommittedMemory() override {
2673 641536 : return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2674 : }
2675 :
2676 0 : size_t MaximumCommittedMemory() override {
2677 : return from_space_.MaximumCommittedMemory() +
2678 0 : to_space_.MaximumCommittedMemory();
2679 : }
2680 :
2681 : // Approximate amount of physical memory committed for this space.
2682 : size_t CommittedPhysicalMemory() override;
2683 :
2684 : // Return the available bytes without growing.
2685 95263 : size_t Available() override {
2686 : DCHECK_GE(Capacity(), Size());
2687 95263 : return Capacity() - Size();
2688 : }
2689 :
2690 30 : size_t ExternalBackingStoreBytes(
2691 : ExternalBackingStoreType type) const override {
2692 : DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
2693 30 : return to_space_.ExternalBackingStoreBytes(type);
2694 : }
2695 :
2696 190448 : size_t AllocatedSinceLastGC() {
2697 : const Address age_mark = to_space_.age_mark();
2698 : DCHECK_NE(age_mark, kNullAddress);
2699 : DCHECK_NE(top(), kNullAddress);
2700 : Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2701 : Page* const last_page = Page::FromAllocationAreaAddress(top());
2702 : Page* current_page = age_mark_page;
2703 : size_t allocated = 0;
2704 190448 : if (current_page != last_page) {
2705 : DCHECK_EQ(current_page, age_mark_page);
2706 : DCHECK_GE(age_mark_page->area_end(), age_mark);
2707 44212 : allocated += age_mark_page->area_end() - age_mark;
2708 : current_page = current_page->next_page();
2709 : } else {
2710 : DCHECK_GE(top(), age_mark);
2711 146236 : return top() - age_mark;
2712 : }
2713 279264 : while (current_page != last_page) {
2714 : DCHECK_NE(current_page, age_mark_page);
2715 117526 : allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
2716 : current_page = current_page->next_page();
2717 : }
2718 : DCHECK_GE(top(), current_page->area_start());
2719 44212 : allocated += top() - current_page->area_start();
2720 : DCHECK_LE(allocated, Size());
2721 44212 : return allocated;
2722 : }
2723 :
2724 : void MovePageFromSpaceToSpace(Page* page) {
2725 : DCHECK(page->IsFromPage());
2726 1544 : from_space_.RemovePage(page);
2727 1544 : to_space_.PrependPage(page);
2728 : }
2729 :
2730 : bool Rebalance();
2731 :
2732 : // Return the maximum capacity of a semispace.
2733 : size_t MaximumCapacity() {
2734 : DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2735 : return to_space_.maximum_capacity();
2736 : }
2737 :
2738 : bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2739 :
2740 : // Returns the initial capacity of a semispace.
2741 : size_t InitialTotalCapacity() {
2742 : DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2743 : return to_space_.minimum_capacity();
2744 : }
2745 :
2746 : void ResetOriginalTop() {
2747 : DCHECK_GE(top(), original_top_);
2748 : DCHECK_LE(top(), original_limit_);
2749 : original_top_.store(top(), std::memory_order_release);
2750 : }
2751 :
2752 : Address original_top_acquire() {
2753 : return original_top_.load(std::memory_order_acquire);
2754 : }
2755 : Address original_limit_relaxed() {
2756 : return original_limit_.load(std::memory_order_relaxed);
2757 : }
2758 :
2759 : // Return the address of the first allocatable address in the active
2760 : // semispace. This may be the address where the first object resides.
2761 : Address first_allocatable_address() { return to_space_.space_start(); }
2762 :
2763 : // Get the age mark of the inactive semispace.
2764 : Address age_mark() { return from_space_.age_mark(); }
2765 : // Set the age mark in the active semispace.
2766 94928 : void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2767 :
2768 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2769 : AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
2770 :
2771 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2772 : AllocateRawUnaligned(int size_in_bytes);
2773 :
2774 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2775 : AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
2776 :
2777 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2778 : int size_in_bytes, AllocationAlignment alignment);
2779 :
2780 : // Reset the allocation pointer to the beginning of the active semispace.
2781 : void ResetLinearAllocationArea();
2782 :
2783 : // When inline allocation stepping is active, either because of incremental
2784 : // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2785 : // inline allocation every once in a while. This is done by setting
2786 : // allocation_info_.limit to be lower than the actual limit and and increasing
2787 : // it in steps to guarantee that the observers are notified periodically.
2788 : void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2789 :
2790 : inline bool ToSpaceContainsSlow(Address a);
2791 : inline bool ToSpaceContains(Object o);
2792 : inline bool FromSpaceContains(Object o);
2793 :
2794 : // Try to switch the active semispace to a new, empty, page.
2795 : // Returns false if this isn't possible or reasonable (i.e., there
2796 : // are no pages, or the current page is already empty), or true
2797 : // if successful.
2798 : bool AddFreshPage();
2799 : bool AddFreshPageSynchronized();
2800 :
2801 : #ifdef VERIFY_HEAP
2802 : // Verify the active semispace.
2803 : virtual void Verify(Isolate* isolate);
2804 : #endif
2805 :
2806 : #ifdef DEBUG
2807 : // Print the active semispace.
2808 : void Print() override { to_space_.Print(); }
2809 : #endif
2810 :
2811 : // Return whether the operation succeeded.
2812 : bool CommitFromSpaceIfNeeded() {
2813 94928 : if (from_space_.is_committed()) return true;
2814 37965 : return from_space_.Commit();
2815 : }
2816 :
2817 : bool UncommitFromSpace() {
2818 26288 : if (!from_space_.is_committed()) return true;
2819 25034 : return from_space_.Uncommit();
2820 : }
2821 :
2822 : bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2823 :
2824 : SemiSpace* active_space() { return &to_space_; }
2825 :
2826 : Page* first_page() { return to_space_.first_page(); }
2827 : Page* last_page() { return to_space_.last_page(); }
2828 :
2829 : iterator begin() { return to_space_.begin(); }
2830 : iterator end() { return to_space_.end(); }
2831 :
2832 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2833 :
2834 937 : SemiSpace& from_space() { return from_space_; }
2835 : SemiSpace& to_space() { return to_space_; }
2836 :
2837 : private:
2838 : // Update linear allocation area to match the current to-space page.
2839 : void UpdateLinearAllocationArea();
2840 :
2841 : base::Mutex mutex_;
2842 :
2843 : // The top and the limit at the time of setting the linear allocation area.
2844 : // These values can be accessed by background tasks.
2845 : std::atomic<Address> original_top_;
2846 : std::atomic<Address> original_limit_;
2847 :
2848 : // The semispaces.
2849 : SemiSpace to_space_;
2850 : SemiSpace from_space_;
2851 : VirtualMemory reservation_;
2852 :
2853 : bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2854 507651 : bool SupportsInlineAllocation() override { return true; }
2855 :
2856 : friend class SemiSpaceIterator;
2857 : };
2858 :
2859 : class PauseAllocationObserversScope {
2860 : public:
2861 : explicit PauseAllocationObserversScope(Heap* heap);
2862 : ~PauseAllocationObserversScope();
2863 :
2864 : private:
2865 : Heap* heap_;
2866 : DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2867 : };
2868 :
2869 : // -----------------------------------------------------------------------------
2870 : // Compaction space that is used temporarily during compaction.
2871 :
2872 111050 : class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2873 : public:
2874 : CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2875 111049 : : PagedSpace(heap, id, executable) {}
2876 :
2877 67665697 : bool is_local() override { return true; }
2878 :
2879 : protected:
2880 : // The space is temporary and not included in any snapshots.
2881 0 : bool snapshotable() override { return false; }
2882 :
2883 : V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
2884 : int size_in_bytes) override;
2885 :
2886 : V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
2887 : int size_in_bytes) override;
2888 : };
2889 :
2890 :
2891 : // A collection of |CompactionSpace|s used by a single compaction task.
2892 111048 : class CompactionSpaceCollection : public Malloced {
2893 : public:
2894 111048 : explicit CompactionSpaceCollection(Heap* heap)
2895 : : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2896 111048 : code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2897 :
2898 : CompactionSpace* Get(AllocationSpace space) {
2899 : switch (space) {
2900 : case OLD_SPACE:
2901 111048 : return &old_space_;
2902 : case CODE_SPACE:
2903 111048 : return &code_space_;
2904 : default:
2905 : UNREACHABLE();
2906 : }
2907 : UNREACHABLE();
2908 : }
2909 :
2910 : private:
2911 : CompactionSpace old_space_;
2912 : CompactionSpace code_space_;
2913 : };
2914 :
2915 : // -----------------------------------------------------------------------------
2916 : // Old generation regular object space.
2917 :
2918 123051 : class OldSpace : public PagedSpace {
2919 : public:
2920 : // Creates an old space object. The constructor does not allocate pages
2921 : // from OS.
2922 61544 : explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
2923 :
2924 : static bool IsAtPageStart(Address addr) {
2925 : return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
2926 : MemoryChunkLayout::ObjectStartOffsetInDataPage();
2927 : }
2928 : };
2929 :
2930 : // -----------------------------------------------------------------------------
2931 : // Old generation code object space.
2932 :
2933 123037 : class CodeSpace : public PagedSpace {
2934 : public:
2935 : // Creates an old space object. The constructor does not allocate pages
2936 : // from OS.
2937 61534 : explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
2938 : };
2939 :
2940 : // For contiguous spaces, top should be in the space (or at the end) and limit
2941 : // should be the end of the space.
2942 : #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2943 : SLOW_DCHECK((space).page_low() <= (info).top() && \
2944 : (info).top() <= (space).page_high() && \
2945 : (info).limit() <= (space).page_high())
2946 :
2947 :
2948 : // -----------------------------------------------------------------------------
2949 : // Old space for all map objects
2950 :
2951 123037 : class MapSpace : public PagedSpace {
2952 : public:
2953 : // Creates a map space object.
2954 61534 : explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
2955 :
2956 0 : int RoundSizeDownToObjectAlignment(int size) override {
2957 : if (base::bits::IsPowerOfTwo(Map::kSize)) {
2958 : return RoundDown(size, Map::kSize);
2959 : } else {
2960 0 : return (size / Map::kSize) * Map::kSize;
2961 : }
2962 : }
2963 :
2964 : #ifdef VERIFY_HEAP
2965 : void VerifyObject(HeapObject obj) override;
2966 : #endif
2967 : };
2968 :
2969 : // -----------------------------------------------------------------------------
2970 : // Read Only space for all Immortal Immovable and Immutable objects
2971 :
2972 : class ReadOnlySpace : public PagedSpace {
2973 : public:
2974 : class WritableScope {
2975 : public:
2976 448 : explicit WritableScope(ReadOnlySpace* space) : space_(space) {
2977 : space_->MarkAsReadWrite();
2978 : }
2979 :
2980 896 : ~WritableScope() { space_->MarkAsReadOnly(); }
2981 :
2982 : private:
2983 : ReadOnlySpace* space_;
2984 : };
2985 :
2986 : explicit ReadOnlySpace(Heap* heap);
2987 :
2988 : // TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
2989 : // memory_chunk_list_.
2990 123038 : ~ReadOnlySpace() override { MarkAsReadWrite(); }
2991 :
2992 : bool writable() const { return !is_marked_read_only_; }
2993 :
2994 : void ClearStringPaddingIfNeeded();
2995 : void MarkAsReadOnly();
2996 : // Make the heap forget the space for memory bookkeeping purposes
2997 : // (e.g. prevent space's memory from registering as leaked).
2998 : void Forget();
2999 :
3000 : // During boot the free_space_map is created, and afterwards we may need
3001 : // to write it into the free list nodes that were already created.
3002 : void RepairFreeListsAfterDeserialization();
3003 :
3004 : private:
3005 : void MarkAsReadWrite();
3006 : void SetPermissionsForPages(PageAllocator::Permission access);
3007 :
3008 : bool is_marked_read_only_ = false;
3009 : //
3010 : // String padding must be cleared just before serialization and therefore the
3011 : // string padding in the space will already have been cleared if the space was
3012 : // deserialized.
3013 : bool is_string_padding_cleared_;
3014 : };
3015 :
3016 : // -----------------------------------------------------------------------------
3017 : // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
3018 : // managed by the large object space.
3019 : // Large objects do not move during garbage collections.
3020 :
3021 : class LargeObjectSpace : public Space {
3022 : public:
3023 : typedef LargePageIterator iterator;
3024 :
3025 : explicit LargeObjectSpace(Heap* heap);
3026 : LargeObjectSpace(Heap* heap, AllocationSpace id);
3027 :
3028 246075 : ~LargeObjectSpace() override { TearDown(); }
3029 :
3030 : // Releases internal resources, frees objects in this space.
3031 : void TearDown();
3032 :
3033 : V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3034 : AllocateRaw(int object_size);
3035 :
3036 : // Available bytes for objects in this space.
3037 : size_t Available() override;
3038 :
3039 2051635 : size_t Size() override { return size_; }
3040 9291873 : size_t SizeOfObjects() override { return objects_size_; }
3041 :
3042 : // Approximate amount of physical memory committed for this space.
3043 : size_t CommittedPhysicalMemory() override;
3044 :
3045 : int PageCount() { return page_count_; }
3046 :
3047 : // Clears the marking state of live objects.
3048 : void ClearMarkingStateOfLiveObjects();
3049 :
3050 : // Frees unmarked objects.
3051 : void FreeUnmarkedObjects();
3052 :
3053 : void PromoteNewLargeObject(LargePage* page);
3054 :
3055 : // Checks whether a heap object is in this space; O(1).
3056 : bool Contains(HeapObject obj);
3057 : // Checks whether an address is in the object area in this space. Iterates
3058 : // all objects in the space. May be slow.
3059 : bool ContainsSlow(Address addr);
3060 :
3061 : // Checks whether the space is empty.
3062 5 : bool IsEmpty() { return first_page() == nullptr; }
3063 :
3064 : virtual void AddPage(LargePage* page, size_t object_size);
3065 : virtual void RemovePage(LargePage* page, size_t object_size);
3066 :
3067 112 : LargePage* first_page() {
3068 112 : return reinterpret_cast<LargePage*>(Space::first_page());
3069 : }
3070 :
3071 : // Collect code statistics.
3072 : void CollectCodeStatistics();
3073 :
3074 : iterator begin() { return iterator(first_page()); }
3075 : iterator end() { return iterator(nullptr); }
3076 :
3077 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
3078 :
3079 : #ifdef VERIFY_HEAP
3080 : virtual void Verify(Isolate* isolate);
3081 : #endif
3082 :
3083 : #ifdef DEBUG
3084 : void Print() override;
3085 : #endif
3086 :
3087 : protected:
3088 : LargePage* AllocateLargePage(int object_size, Executability executable);
3089 : V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3090 : Executability executable);
3091 :
3092 : size_t size_; // allocated bytes
3093 : int page_count_; // number of chunks
3094 : size_t objects_size_; // size of objects
3095 :
3096 : private:
3097 : friend class LargeObjectIterator;
3098 : };
3099 :
3100 184554 : class NewLargeObjectSpace : public LargeObjectSpace {
3101 : public:
3102 : NewLargeObjectSpace(Heap* heap, size_t capacity);
3103 :
3104 : V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
3105 :
3106 : // Available bytes for objects in this space.
3107 : size_t Available() override;
3108 :
3109 : void Flip();
3110 :
3111 : void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
3112 :
3113 : void SetCapacity(size_t capacity);
3114 :
3115 : // The last allocated object that is not guaranteed to be initialized when
3116 : // the concurrent marker visits it.
3117 : Address pending_object() {
3118 : return pending_object_.load(std::memory_order_relaxed);
3119 : }
3120 :
3121 : void ResetPendingObject() { pending_object_.store(0); }
3122 :
3123 : private:
3124 : std::atomic<Address> pending_object_;
3125 : size_t capacity_;
3126 : };
3127 :
3128 307592 : class CodeLargeObjectSpace : public LargeObjectSpace {
3129 : public:
3130 : explicit CodeLargeObjectSpace(Heap* heap);
3131 :
3132 : V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3133 : AllocateRaw(int object_size);
3134 :
3135 : // Finds a large object page containing the given address, returns nullptr
3136 : // if such a page doesn't exist.
3137 : LargePage* FindPage(Address a);
3138 :
3139 : protected:
3140 : void AddPage(LargePage* page, size_t object_size) override;
3141 : void RemovePage(LargePage* page, size_t object_size) override;
3142 :
3143 : private:
3144 : static const size_t kInitialChunkMapCapacity = 1024;
3145 : void InsertChunkMapEntries(LargePage* page);
3146 : void RemoveChunkMapEntries(LargePage* page);
3147 :
3148 : // Page-aligned addresses to their corresponding LargePage.
3149 : std::unordered_map<Address, LargePage*> chunk_map_;
3150 : };
3151 :
3152 70380 : class LargeObjectIterator : public ObjectIterator {
3153 : public:
3154 : explicit LargeObjectIterator(LargeObjectSpace* space);
3155 :
3156 : HeapObject Next() override;
3157 :
3158 : private:
3159 : LargePage* current_;
3160 : };
3161 :
3162 : // Iterates over the chunks (pages and large object pages) that can contain
3163 : // pointers to new space or to evacuation candidates.
3164 : class OldGenerationMemoryChunkIterator {
3165 : public:
3166 : inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
3167 :
3168 : // Return nullptr when the iterator is done.
3169 : inline MemoryChunk* next();
3170 :
3171 : private:
3172 : enum State {
3173 : kOldSpaceState,
3174 : kMapState,
3175 : kCodeState,
3176 : kLargeObjectState,
3177 : kCodeLargeObjectState,
3178 : kFinishedState
3179 : };
3180 : Heap* heap_;
3181 : State state_;
3182 : PageIterator old_iterator_;
3183 : PageIterator code_iterator_;
3184 : PageIterator map_iterator_;
3185 : LargePageIterator lo_iterator_;
3186 : LargePageIterator code_lo_iterator_;
3187 : };
3188 :
3189 : } // namespace internal
3190 : } // namespace v8
3191 :
3192 : #endif // V8_HEAP_SPACES_H_
|