Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_H_
6 : #define V8_HEAP_SPACES_H_
7 :
8 : #include <list>
9 : #include <map>
10 : #include <memory>
11 : #include <unordered_map>
12 : #include <unordered_set>
13 : #include <vector>
14 :
15 : #include "src/allocation.h"
16 : #include "src/base/atomic-utils.h"
17 : #include "src/base/bounded-page-allocator.h"
18 : #include "src/base/export-template.h"
19 : #include "src/base/iterator.h"
20 : #include "src/base/list.h"
21 : #include "src/base/platform/mutex.h"
22 : #include "src/cancelable-task.h"
23 : #include "src/flags.h"
24 : #include "src/globals.h"
25 : #include "src/heap/heap.h"
26 : #include "src/heap/invalidated-slots.h"
27 : #include "src/heap/marking.h"
28 : #include "src/objects.h"
29 : #include "src/objects/free-space.h"
30 : #include "src/objects/heap-object.h"
31 : #include "src/objects/map.h"
32 : #include "src/utils.h"
33 :
34 : namespace v8 {
35 : namespace internal {
36 :
37 : namespace heap {
38 : class HeapTester;
39 : class TestCodePageAllocatorScope;
40 : } // namespace heap
41 :
42 : class AllocationObserver;
43 : class CompactionSpace;
44 : class CompactionSpaceCollection;
45 : class FreeList;
46 : class Isolate;
47 : class LinearAllocationArea;
48 : class LocalArrayBufferTracker;
49 : class MemoryAllocator;
50 : class MemoryChunk;
51 : class MemoryChunkLayout;
52 : class Page;
53 : class PagedSpace;
54 : class SemiSpace;
55 : class SkipList;
56 : class SlotsBuffer;
57 : class SlotSet;
58 : class TypedSlotSet;
59 : class Space;
60 :
61 : // -----------------------------------------------------------------------------
62 : // Heap structures:
63 : //
64 : // A JS heap consists of a young generation, an old generation, and a large
65 : // object space. The young generation is divided into two semispaces. A
66 : // scavenger implements Cheney's copying algorithm. The old generation is
67 : // separated into a map space and an old object space. The map space contains
68 : // all (and only) map objects, the rest of old objects go into the old space.
69 : // The old generation is collected by a mark-sweep-compact collector.
70 : //
71 : // The semispaces of the young generation are contiguous. The old and map
72 : // spaces consists of a list of pages. A page has a page header and an object
73 : // area.
74 : //
75 : // There is a separate large object space for objects larger than
76 : // kMaxRegularHeapObjectSize, so that they do not have to move during
77 : // collection. The large object space is paged. Pages in large object space
78 : // may be larger than the page size.
79 : //
80 : // A store-buffer based write barrier is used to keep track of intergenerational
81 : // references. See heap/store-buffer.h.
82 : //
83 : // During scavenges and mark-sweep collections we sometimes (after a store
84 : // buffer overflow) iterate intergenerational pointers without decoding heap
85 : // object maps so if the page belongs to old space or large object space
86 : // it is essential to guarantee that the page does not contain any
87 : // garbage pointers to new space: every pointer aligned word which satisfies
88 : // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
89 : // new space. Thus objects in old space and large object spaces should have a
90 : // special layout (e.g. no bare integer fields). This requirement does not
91 : // apply to map space which is iterated in a special fashion. However we still
92 : // require pointer fields of dead maps to be cleaned.
93 : //
94 : // To enable lazy cleaning of old space pages we can mark chunks of the page
95 : // as being garbage. Garbage sections are marked with a special map. These
96 : // sections are skipped when scanning the page, even if we are otherwise
97 : // scanning without regard for object boundaries. Garbage sections are chained
98 : // together to form a free list after a GC. Garbage sections created outside
99 : // of GCs by object trunctation etc. may not be in the free list chain. Very
100 : // small free spaces are ignored, they need only be cleaned of bogus pointers
101 : // into new space.
102 : //
103 : // Each page may have up to one special garbage section. The start of this
104 : // section is denoted by the top field in the space. The end of the section
105 : // is denoted by the limit field in the space. This special garbage section
106 : // is not marked with a free space map in the data. The point of this section
107 : // is to enable linear allocation without having to constantly update the byte
108 : // array every time the top field is updated and a new object is created. The
109 : // special garbage section is not in the chain of garbage sections.
110 : //
111 : // Since the top and limit fields are in the space, not the page, only one page
112 : // has a special garbage section, and if the top and limit are equal then there
113 : // is no special garbage section.
114 :
115 : // Some assertion macros used in the debugging mode.
116 :
117 : #define DCHECK_OBJECT_SIZE(size) \
118 : DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
119 :
120 : #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
121 : DCHECK((0 < size) && (size <= code_space->AreaSize()))
122 :
123 : enum FreeListCategoryType {
124 : kTiniest,
125 : kTiny,
126 : kSmall,
127 : kMedium,
128 : kLarge,
129 : kHuge,
130 :
131 : kFirstCategory = kTiniest,
132 : kLastCategory = kHuge,
133 : kNumberOfCategories = kLastCategory + 1,
134 : kInvalidCategory
135 : };
136 :
137 : enum FreeMode { kLinkCategory, kDoNotLinkCategory };
138 :
139 : enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
140 :
141 : enum RememberedSetType {
142 : OLD_TO_NEW,
143 : OLD_TO_OLD,
144 : NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
145 : };
146 :
147 : // A free list category maintains a linked list of free memory blocks.
148 : class FreeListCategory {
149 : public:
150 : FreeListCategory(FreeList* free_list, Page* page)
151 : : free_list_(free_list),
152 : page_(page),
153 : type_(kInvalidCategory),
154 : available_(0),
155 : prev_(nullptr),
156 2574593 : next_(nullptr) {}
157 :
158 : void Initialize(FreeListCategoryType type) {
159 2575212 : type_ = type;
160 2575212 : available_ = 0;
161 2575212 : prev_ = nullptr;
162 2575212 : next_ = nullptr;
163 : }
164 :
165 : void Reset();
166 :
167 0 : void ResetStats() { Reset(); }
168 :
169 : void RepairFreeList(Heap* heap);
170 :
171 : // Relinks the category into the currently owning free list. Requires that the
172 : // category is currently unlinked.
173 : void Relink();
174 :
175 : void Free(Address address, size_t size_in_bytes, FreeMode mode);
176 :
177 : // Performs a single try to pick a node of at least |minimum_size| from the
178 : // category. Stores the actual size in |node_size|. Returns nullptr if no
179 : // node is found.
180 : FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
181 :
182 : // Picks a node of at least |minimum_size| from the category. Stores the
183 : // actual size in |node_size|. Returns nullptr if no node is found.
184 : FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
185 :
186 : inline FreeList* owner();
187 : inline Page* page() const { return page_; }
188 : inline bool is_linked();
189 : bool is_empty() { return top().is_null(); }
190 : size_t available() const { return available_; }
191 :
192 6937579 : void set_free_list(FreeList* free_list) { free_list_ = free_list; }
193 :
194 : #ifdef DEBUG
195 : size_t SumFreeList();
196 : int FreeListLength();
197 : #endif
198 :
199 : private:
200 : // For debug builds we accurately compute free lists lengths up until
201 : // {kVeryLongFreeList} by manually walking the list.
202 : static const int kVeryLongFreeList = 500;
203 :
204 : FreeSpace top() { return top_; }
205 24800690 : void set_top(FreeSpace top) { top_ = top; }
206 : FreeListCategory* prev() { return prev_; }
207 3738598 : void set_prev(FreeListCategory* prev) { prev_ = prev; }
208 : FreeListCategory* next() { return next_; }
209 5128359 : void set_next(FreeListCategory* next) { next_ = next; }
210 :
211 : // This FreeListCategory is owned by the given free_list_.
212 : FreeList* free_list_;
213 :
214 : // This FreeListCategory holds free list entries of the given page_.
215 : Page* const page_;
216 :
217 : // |type_|: The type of this free list category.
218 : FreeListCategoryType type_;
219 :
220 : // |available_|: Total available bytes in all blocks of this free list
221 : // category.
222 : size_t available_;
223 :
224 : // |top_|: Points to the top FreeSpace in the free list category.
225 : FreeSpace top_;
226 :
227 : FreeListCategory* prev_;
228 : FreeListCategory* next_;
229 :
230 : friend class FreeList;
231 : friend class PagedSpace;
232 :
233 : DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
234 : };
235 :
236 : class MemoryChunkLayout {
237 : public:
238 : static size_t CodePageGuardStartOffset();
239 : static size_t CodePageGuardSize();
240 : static intptr_t ObjectStartOffsetInCodePage();
241 : static intptr_t ObjectEndOffsetInCodePage();
242 : static size_t AllocatableMemoryInCodePage();
243 : static intptr_t ObjectStartOffsetInDataPage();
244 : V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
245 : static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
246 : static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
247 : };
248 :
249 : // MemoryChunk represents a memory region owned by a specific space.
250 : // It is divided into the header and the body. Chunk start is always
251 : // 1MB aligned. Start of the body is aligned so it can accommodate
252 : // any heap object.
253 : class MemoryChunk {
254 : public:
255 : // Use with std data structures.
256 : struct Hasher {
257 : size_t operator()(MemoryChunk* const chunk) const {
258 489110803 : return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
259 : }
260 : };
261 :
262 : enum Flag {
263 : NO_FLAGS = 0u,
264 : IS_EXECUTABLE = 1u << 0,
265 : POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
266 : POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
267 : // A page in the from-space or a young large page that was not scavenged
268 : // yet.
269 : FROM_PAGE = 1u << 3,
270 : // A page in the to-space or a young large page that was scavenged.
271 : TO_PAGE = 1u << 4,
272 : LARGE_PAGE = 1u << 5,
273 : EVACUATION_CANDIDATE = 1u << 6,
274 : NEVER_EVACUATE = 1u << 7,
275 :
276 : // Large objects can have a progress bar in their page header. These object
277 : // are scanned in increments and will be kept black while being scanned.
278 : // Even if the mutator writes to them they will be kept black and a white
279 : // to grey transition is performed in the value.
280 : HAS_PROGRESS_BAR = 1u << 8,
281 :
282 : // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
283 : // from new to old space during evacuation.
284 : PAGE_NEW_OLD_PROMOTION = 1u << 9,
285 :
286 : // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
287 : // within the new space during evacuation.
288 : PAGE_NEW_NEW_PROMOTION = 1u << 10,
289 :
290 : // This flag is intended to be used for testing. Works only when both
291 : // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
292 : // are set. It forces the page to become an evacuation candidate at next
293 : // candidates selection cycle.
294 : FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
295 :
296 : // This flag is intended to be used for testing.
297 : NEVER_ALLOCATE_ON_PAGE = 1u << 12,
298 :
299 : // The memory chunk is already logically freed, however the actual freeing
300 : // still has to be performed.
301 : PRE_FREED = 1u << 13,
302 :
303 : // |POOLED|: When actually freeing this chunk, only uncommit and do not
304 : // give up the reservation as we still reuse the chunk at some point.
305 : POOLED = 1u << 14,
306 :
307 : // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
308 : // has been aborted and needs special handling by the sweeper.
309 : COMPACTION_WAS_ABORTED = 1u << 15,
310 :
311 : // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
312 : // on pages is sometimes aborted. The flag is used to avoid repeatedly
313 : // triggering on the same page.
314 : COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
315 :
316 : // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
317 : // to iterate the page.
318 : SWEEP_TO_ITERATE = 1u << 17,
319 :
320 : // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
321 : // enabled.
322 : INCREMENTAL_MARKING = 1u << 18,
323 : NEW_SPACE_BELOW_AGE_MARK = 1u << 19
324 : };
325 :
326 : using Flags = uintptr_t;
327 :
328 : static const Flags kPointersToHereAreInterestingMask =
329 : POINTERS_TO_HERE_ARE_INTERESTING;
330 :
331 : static const Flags kPointersFromHereAreInterestingMask =
332 : POINTERS_FROM_HERE_ARE_INTERESTING;
333 :
334 : static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
335 :
336 : static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
337 :
338 : static const Flags kIsLargePageMask = LARGE_PAGE;
339 :
340 : static const Flags kSkipEvacuationSlotsRecordingMask =
341 : kEvacuationCandidateMask | kIsInYoungGenerationMask;
342 :
343 : // |kSweepingDone|: The page state when sweeping is complete or sweeping must
344 : // not be performed on that page. Sweeper threads that are done with their
345 : // work will set this value and not touch the page anymore.
346 : // |kSweepingPending|: This page is ready for parallel sweeping.
347 : // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
348 : enum ConcurrentSweepingState {
349 : kSweepingDone,
350 : kSweepingPending,
351 : kSweepingInProgress,
352 : };
353 :
354 : static const intptr_t kAlignment =
355 : (static_cast<uintptr_t>(1) << kPageSizeBits);
356 :
357 : static const intptr_t kAlignmentMask = kAlignment - 1;
358 :
359 : static const intptr_t kSizeOffset = 0;
360 : static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
361 : static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
362 : static const intptr_t kReservationOffset =
363 : kMarkBitmapOffset + kSystemPointerSize;
364 : static const intptr_t kHeapOffset =
365 : kReservationOffset + 3 * kSystemPointerSize;
366 : static const intptr_t kHeaderSentinelOffset =
367 : kHeapOffset + kSystemPointerSize;
368 : static const intptr_t kOwnerOffset =
369 : kHeaderSentinelOffset + kSystemPointerSize;
370 :
371 : static const size_t kHeaderSize =
372 : kSizeOffset // NOLINT
373 : + kSizetSize // size_t size
374 : + kUIntptrSize // uintptr_t flags_
375 : + kSystemPointerSize // Bitmap* marking_bitmap_
376 : + 3 * kSystemPointerSize // VirtualMemory reservation_
377 : + kSystemPointerSize // Heap* heap_
378 : + kSystemPointerSize // Address header_sentinel_
379 : + kSystemPointerSize // Address area_start_
380 : + kSystemPointerSize // Address area_end_
381 : + kSystemPointerSize // Address owner_
382 : + kIntptrSize // intptr_t progress_bar_
383 : + kIntptrSize // intptr_t live_byte_count_
384 : + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
385 : + kSystemPointerSize *
386 : NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
387 : + kSystemPointerSize // InvalidatedSlots* invalidated_slots_
388 : + kSystemPointerSize // SkipList* skip_list_
389 : + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
390 : + kSystemPointerSize // base::Mutex* mutex_
391 : + kSystemPointerSize // std::atomic<ConcurrentSweepingState>
392 : // concurrent_sweeping_
393 : + kSystemPointerSize // base::Mutex* page_protection_change_mutex_
394 : + kSystemPointerSize // unitptr_t write_unprotect_counter_
395 : + kSizetSize * ExternalBackingStoreType::kNumTypes
396 : // std::atomic<size_t> external_backing_store_bytes_
397 : + kSizetSize // size_t allocated_bytes_
398 : + kSizetSize // size_t wasted_memory_
399 : + kSystemPointerSize * 2 // base::ListNode
400 : + kSystemPointerSize * kNumberOfCategories
401 : // FreeListCategory categories_[kNumberOfCategories]
402 : + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
403 : + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
404 : + kSystemPointerSize; // Bitmap* young_generation_bitmap_
405 :
406 : // Page size in bytes. This must be a multiple of the OS page size.
407 : static const int kPageSize = 1 << kPageSizeBits;
408 :
409 : // Maximum number of nested code memory modification scopes.
410 : // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
411 : static const int kMaxWriteUnprotectCounter = 4;
412 :
413 10617071925 : static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
414 :
415 : // Only works if the pointer is in the first kPageSize of the MemoryChunk.
416 : static MemoryChunk* FromAddress(Address a) {
417 134883638 : return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
418 : }
419 : // Only works if the object is in the first kPageSize of the MemoryChunk.
420 8152761845 : static MemoryChunk* FromHeapObject(const HeapObject o) {
421 10218454241 : return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
422 : }
423 :
424 : void SetOldGenerationPageFlags(bool is_marking);
425 : void SetYoungGenerationPageFlags(bool is_marking);
426 :
427 : static inline MemoryChunk* FromAnyPointerAddress(Address addr);
428 :
429 2792392 : static inline void UpdateHighWaterMark(Address mark) {
430 4085999 : if (mark == kNullAddress) return;
431 : // Need to subtract one from the mark because when a chunk is full the
432 : // top points to the next address after the chunk, which effectively belongs
433 : // to another chunk. See the comment to Page::FromAllocationAreaAddress.
434 1498785 : MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
435 1498785 : intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
436 1498785 : intptr_t old_mark = 0;
437 1498785 : do {
438 1498785 : old_mark = chunk->high_water_mark_;
439 : } while (
440 2173696 : (new_mark > old_mark) &&
441 674911 : !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
442 : }
443 :
444 : static inline void MoveExternalBackingStoreBytes(
445 : ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
446 : size_t amount);
447 :
448 : void DiscardUnusedMemory(Address addr, size_t size);
449 :
450 : Address address() const {
451 9231686667 : return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
452 : }
453 :
454 : base::Mutex* mutex() { return mutex_; }
455 :
456 1802220 : bool Contains(Address addr) {
457 1878757 : return addr >= area_start() && addr < area_end();
458 : }
459 :
460 : // Checks whether |addr| can be a limit of addresses in this page. It's a
461 : // limit if it's in the page, or if it's just after the last byte of the page.
462 149163978 : bool ContainsLimit(Address addr) {
463 149163978 : return addr >= area_start() && addr <= area_end();
464 : }
465 :
466 : void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
467 : concurrent_sweeping_ = state;
468 : }
469 :
470 : ConcurrentSweepingState concurrent_sweeping_state() {
471 : return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
472 : }
473 :
474 488628 : bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
475 :
476 : size_t size() const { return size_; }
477 : void set_size(size_t size) { size_ = size; }
478 :
479 : inline Heap* heap() const { return heap_; }
480 :
481 : Heap* synchronized_heap();
482 :
483 0 : inline SkipList* skip_list() { return skip_list_; }
484 :
485 89290 : inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
486 :
487 : template <RememberedSetType type>
488 1233 : bool ContainsSlots() {
489 : return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
490 1730 : invalidated_slots() != nullptr;
491 : }
492 :
493 : template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
494 : SlotSet* slot_set() {
495 : if (access_mode == AccessMode::ATOMIC)
496 227015912 : return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
497 : return slot_set_[type];
498 : }
499 :
500 : template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
501 : TypedSlotSet* typed_slot_set() {
502 : if (access_mode == AccessMode::ATOMIC)
503 3712579 : return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
504 : return typed_slot_set_[type];
505 : }
506 :
507 : template <RememberedSetType type>
508 : SlotSet* AllocateSlotSet();
509 : // Not safe to be called concurrently.
510 : template <RememberedSetType type>
511 : void ReleaseSlotSet();
512 : template <RememberedSetType type>
513 : TypedSlotSet* AllocateTypedSlotSet();
514 : // Not safe to be called concurrently.
515 : template <RememberedSetType type>
516 : void ReleaseTypedSlotSet();
517 :
518 : InvalidatedSlots* AllocateInvalidatedSlots();
519 : void ReleaseInvalidatedSlots();
520 : void RegisterObjectWithInvalidatedSlots(HeapObject object, int size);
521 : // Updates invalidated_slots after array left-trimming.
522 : void MoveObjectWithInvalidatedSlots(HeapObject old_start,
523 : HeapObject new_start);
524 : bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
525 : InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
526 :
527 : void ReleaseLocalTracker();
528 :
529 : void AllocateYoungGenerationBitmap();
530 : void ReleaseYoungGenerationBitmap();
531 :
532 : void AllocateMarkingBitmap();
533 : void ReleaseMarkingBitmap();
534 :
535 : Address area_start() { return area_start_; }
536 : Address area_end() { return area_end_; }
537 10050699 : size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
538 :
539 : // Approximate amount of physical memory committed for this chunk.
540 : size_t CommittedPhysicalMemory();
541 :
542 182844 : Address HighWaterMark() { return address() + high_water_mark_; }
543 :
544 240565 : int progress_bar() {
545 : DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
546 255377 : return static_cast<int>(progress_bar_.load(std::memory_order_relaxed));
547 : }
548 :
549 240560 : void set_progress_bar(int progress_bar) {
550 : DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
551 : progress_bar_.store(progress_bar, std::memory_order_relaxed);
552 240560 : }
553 :
554 81172 : void ResetProgressBar() {
555 81172 : if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
556 : set_progress_bar(0);
557 : }
558 81172 : }
559 :
560 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
561 : size_t amount);
562 :
563 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
564 : size_t amount);
565 :
566 : size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
567 1443465 : return external_backing_store_bytes_[type];
568 : }
569 :
570 : // Some callers rely on the fact that this can operate on both
571 : // tagged and aligned object addresses.
572 8156126780 : inline uint32_t AddressToMarkbitIndex(Address addr) const {
573 17646845169 : return static_cast<uint32_t>(addr - this->address()) >>
574 8823569309 : kSystemPointerSizeLog2;
575 : }
576 :
577 : inline Address MarkbitIndexToAddress(uint32_t index) const {
578 : return this->address() + (index << kSystemPointerSizeLog2);
579 : }
580 :
581 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
582 : void SetFlag(Flag flag) {
583 : if (access_mode == AccessMode::NON_ATOMIC) {
584 3582024 : flags_ |= flag;
585 : } else {
586 7139 : base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
587 : }
588 : }
589 :
590 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
591 172902600 : bool IsFlagSet(Flag flag) {
592 6696282105 : return (GetFlags<access_mode>() & flag) != 0;
593 : }
594 :
595 2368434 : void ClearFlag(Flag flag) { flags_ &= ~flag; }
596 : // Set or clear multiple flags at a time. The flags in the mask are set to
597 : // the value in "flags", the rest retain the current value in |flags_|.
598 : void SetFlags(uintptr_t flags, uintptr_t mask) {
599 591323 : flags_ = (flags_ & ~mask) | (flags & mask);
600 : }
601 :
602 : // Return all current flags.
603 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
604 : uintptr_t GetFlags() {
605 : if (access_mode == AccessMode::NON_ATOMIC) {
606 : return flags_;
607 : } else {
608 6462645924 : return base::AsAtomicWord::Relaxed_Load(&flags_);
609 : }
610 : }
611 :
612 : bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
613 :
614 : void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
615 :
616 : bool CanAllocate() {
617 249806 : return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
618 : }
619 :
620 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
621 6403753130 : bool IsEvacuationCandidate() {
622 : DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
623 : IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
624 6403753130 : return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
625 : }
626 :
627 : template <AccessMode access_mode = AccessMode::NON_ATOMIC>
628 50313171 : bool ShouldSkipEvacuationSlotRecording() {
629 : uintptr_t flags = GetFlags<access_mode>();
630 : return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
631 50313171 : ((flags & COMPACTION_WAS_ABORTED) == 0);
632 : }
633 :
634 : Executability executable() {
635 2643764 : return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
636 : }
637 :
638 371219766 : bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
639 42601152 : bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
640 5485826 : bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
641 :
642 : bool InYoungGeneration() const {
643 1256191151 : return (flags_ & kIsInYoungGenerationMask) != 0;
644 : }
645 3039 : bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
646 0 : bool InNewLargeObjectSpace() const {
647 0 : return InYoungGeneration() && IsLargePage();
648 : }
649 : bool InOldSpace() const;
650 : bool InLargeObjectSpace() const;
651 :
652 :
653 : Space* owner() const { return owner_; }
654 :
655 : void set_owner(Space* space) { owner_ = space; }
656 :
657 : static inline bool HasHeaderSentinel(Address slot_addr);
658 :
659 : // Emits a memory barrier. For TSAN builds the other thread needs to perform
660 : // MemoryChunk::synchronized_heap() to simulate the barrier.
661 : void InitializationMemoryFence();
662 :
663 : void SetReadable();
664 : void SetReadAndExecutable();
665 : void SetReadAndWritable();
666 :
667 3014257 : void SetDefaultCodePermissions() {
668 3014257 : if (FLAG_jitless) {
669 71547 : SetReadable();
670 : } else {
671 2942710 : SetReadAndExecutable();
672 : }
673 3014256 : }
674 :
675 : base::ListNode<MemoryChunk>& list_node() { return list_node_; }
676 :
677 : protected:
678 : static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
679 : Address area_start, Address area_end,
680 : Executability executable, Space* owner,
681 : VirtualMemory reservation);
682 :
683 : // Should be called when memory chunk is about to be freed.
684 : void ReleaseAllocatedMemory();
685 :
686 : // Sets the requested page permissions only if the write unprotect counter
687 : // has reached 0.
688 : void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
689 : PageAllocator::Permission permission);
690 :
691 : VirtualMemory* reserved_memory() { return &reservation_; }
692 :
693 : size_t size_;
694 : uintptr_t flags_;
695 :
696 : Bitmap* marking_bitmap_;
697 :
698 : // If the chunk needs to remember its memory reservation, it is stored here.
699 : VirtualMemory reservation_;
700 :
701 : Heap* heap_;
702 :
703 : // This is used to distinguish the memory chunk header from the interior of a
704 : // large page. The memory chunk header stores here an impossible tagged
705 : // pointer: the tagger pointer of the page start. A field in a large object is
706 : // guaranteed to not contain such a pointer.
707 : Address header_sentinel_;
708 :
709 : // The space owning this memory chunk.
710 : std::atomic<Space*> owner_;
711 :
712 : // Start and end of allocatable memory on this chunk.
713 : Address area_start_;
714 : Address area_end_;
715 :
716 : // Used by the incremental marker to keep track of the scanning progress in
717 : // large objects that have a progress bar and are scanned in increments.
718 : std::atomic<intptr_t> progress_bar_;
719 :
720 : // Count of bytes marked black on page.
721 : intptr_t live_byte_count_;
722 :
723 : // A single slot set for small pages (of size kPageSize) or an array of slot
724 : // set for large pages. In the latter case the number of entries in the array
725 : // is ceil(size() / kPageSize).
726 : SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
727 : TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
728 : InvalidatedSlots* invalidated_slots_;
729 :
730 : SkipList* skip_list_;
731 :
732 : // Assuming the initial allocation on a page is sequential,
733 : // count highest number of bytes ever allocated on the page.
734 : std::atomic<intptr_t> high_water_mark_;
735 :
736 : base::Mutex* mutex_;
737 :
738 : std::atomic<intptr_t> concurrent_sweeping_;
739 :
740 : base::Mutex* page_protection_change_mutex_;
741 :
742 : // This field is only relevant for code pages. It depicts the number of
743 : // times a component requested this page to be read+writeable. The
744 : // counter is decremented when a component resets to read+executable.
745 : // If Value() == 0 => The memory is read and executable.
746 : // If Value() >= 1 => The Memory is read and writable (and maybe executable).
747 : // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
748 : // excessive nesting of scopes.
749 : // All executable MemoryChunks are allocated rw based on the assumption that
750 : // they will be used immediatelly for an allocation. They are initialized
751 : // with the number of open CodeSpaceMemoryModificationScopes. The caller
752 : // that triggers the page allocation is responsible for decrementing the
753 : // counter.
754 : uintptr_t write_unprotect_counter_;
755 :
756 : // Byte allocated on the page, which includes all objects on the page
757 : // and the linear allocation area.
758 : size_t allocated_bytes_;
759 :
760 : // Tracks off-heap memory used by this memory chunk.
761 : std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
762 :
763 : // Freed memory that was not added to the free list.
764 : size_t wasted_memory_;
765 :
766 : base::ListNode<MemoryChunk> list_node_;
767 :
768 : FreeListCategory* categories_[kNumberOfCategories];
769 :
770 : LocalArrayBufferTracker* local_tracker_;
771 :
772 : std::atomic<intptr_t> young_generation_live_byte_count_;
773 : Bitmap* young_generation_bitmap_;
774 :
775 : private:
776 706877 : void InitializeReservedMemory() { reservation_.Reset(); }
777 :
778 : friend class ConcurrentMarkingState;
779 : friend class IncrementalMarkingState;
780 : friend class MajorAtomicMarkingState;
781 : friend class MajorMarkingState;
782 : friend class MajorNonAtomicMarkingState;
783 : friend class MemoryAllocator;
784 : friend class MemoryChunkValidator;
785 : friend class MinorMarkingState;
786 : friend class MinorNonAtomicMarkingState;
787 : friend class PagedSpace;
788 : };
789 :
790 : static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
791 : "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
792 :
793 : // -----------------------------------------------------------------------------
794 : // A page is a memory chunk of a size 512K. Large object pages may be larger.
795 : //
796 : // The only way to get a page pointer is by calling factory methods:
797 : // Page* p = Page::FromAddress(addr); or
798 : // Page* p = Page::FromAllocationAreaAddress(address);
799 : class Page : public MemoryChunk {
800 : public:
801 : static const intptr_t kCopyAllFlags = ~0;
802 :
803 : // Page flags copied from from-space to to-space when flipping semispaces.
804 : static const intptr_t kCopyOnFlipFlagsMask =
805 : static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
806 : static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
807 : static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
808 :
809 : // Returns the page containing a given address. The address ranges
810 : // from [page_addr .. page_addr + kPageSize[. This only works if the object
811 : // is in fact in a page.
812 0 : static Page* FromAddress(Address addr) {
813 298384544 : return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
814 : }
815 12703088167 : static Page* FromHeapObject(const HeapObject o) {
816 12768682985 : return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
817 : }
818 :
819 : // Returns the page containing the address provided. The address can
820 : // potentially point righter after the page. To be also safe for tagged values
821 : // we subtract a hole word. The valid address ranges from
822 : // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
823 : static Page* FromAllocationAreaAddress(Address address) {
824 1114527 : return Page::FromAddress(address - kTaggedSize);
825 : }
826 :
827 : // Checks if address1 and address2 are on the same new space page.
828 : static bool OnSamePage(Address address1, Address address2) {
829 : return Page::FromAddress(address1) == Page::FromAddress(address2);
830 : }
831 :
832 : // Checks whether an address is page aligned.
833 : static bool IsAlignedToPageSize(Address addr) {
834 1677098 : return (addr & kPageAlignmentMask) == 0;
835 : }
836 :
837 : static Page* ConvertNewToOld(Page* old_page);
838 :
839 : inline void MarkNeverAllocateForTesting();
840 : inline void MarkEvacuationCandidate();
841 : inline void ClearEvacuationCandidate();
842 :
843 7627749 : Page* next_page() { return static_cast<Page*>(list_node_.next()); }
844 341 : Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
845 :
846 : template <typename Callback>
847 0 : inline void ForAllFreeListCategories(Callback callback) {
848 8205659 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
849 8205660 : callback(categories_[i]);
850 : }
851 0 : }
852 :
853 : // Returns the offset of a given address to this page.
854 370 : inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
855 :
856 : // Returns the address for a given offset to the this page.
857 : Address OffsetToAddress(size_t offset) {
858 158 : Address address_in_page = address() + offset;
859 : DCHECK_GE(address_in_page, area_start_);
860 : DCHECK_LT(address_in_page, area_end_);
861 : return address_in_page;
862 : }
863 :
864 : // WaitUntilSweepingCompleted only works when concurrent sweeping is in
865 : // progress. In particular, when we know that right before this call a
866 : // sweeper thread was sweeping this page.
867 : void WaitUntilSweepingCompleted() {
868 0 : mutex_->Lock();
869 0 : mutex_->Unlock();
870 : DCHECK(SweepingDone());
871 : }
872 :
873 : void AllocateLocalTracker();
874 : inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
875 : bool contains_array_buffers();
876 :
877 : void ResetFreeListStatistics();
878 :
879 : size_t AvailableInFreeList();
880 :
881 : size_t AvailableInFreeListFromAllocatedBytes() {
882 : DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
883 : return area_size() - wasted_memory() - allocated_bytes();
884 : }
885 :
886 : FreeListCategory* free_list_category(FreeListCategoryType type) {
887 21266078 : return categories_[type];
888 : }
889 :
890 : size_t wasted_memory() { return wasted_memory_; }
891 482899 : void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
892 : size_t allocated_bytes() { return allocated_bytes_; }
893 : void IncreaseAllocatedBytes(size_t bytes) {
894 : DCHECK_LE(bytes, area_size());
895 1231789 : allocated_bytes_ += bytes;
896 : }
897 : void DecreaseAllocatedBytes(size_t bytes) {
898 : DCHECK_LE(bytes, area_size());
899 : DCHECK_GE(allocated_bytes(), bytes);
900 21750926 : allocated_bytes_ -= bytes;
901 : }
902 :
903 : void ResetAllocatedBytes();
904 :
905 : size_t ShrinkToHighWaterMark();
906 :
907 : V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
908 : void DestroyBlackArea(Address start, Address end);
909 :
910 : void InitializeFreeListCategories();
911 : void AllocateFreeListCategories();
912 : void ReleaseFreeListCategories();
913 :
914 : #ifdef DEBUG
915 : void Print();
916 : #endif // DEBUG
917 :
918 : private:
919 : enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
920 :
921 : friend class MemoryAllocator;
922 : };
923 :
924 : class ReadOnlyPage : public Page {
925 : public:
926 : // Clears any pointers in the header that point out of the page that would
927 : // otherwise make the header non-relocatable.
928 : void MakeHeaderRelocatable();
929 :
930 : private:
931 : friend class ReadOnlySpace;
932 : };
933 :
934 : class LargePage : public MemoryChunk {
935 : public:
936 : // A limit to guarantee that we do not overflow typed slot offset in
937 : // the old to old remembered set.
938 : // Note that this limit is higher than what assembler already imposes on
939 : // x64 and ia32 architectures.
940 : static const int kMaxCodePageSize = 512 * MB;
941 :
942 : static LargePage* FromHeapObject(const HeapObject o) {
943 : return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
944 : }
945 :
946 : inline HeapObject GetObject();
947 :
948 6622 : inline LargePage* next_page() {
949 1836430 : return static_cast<LargePage*>(list_node_.next());
950 : }
951 :
952 : // Uncommit memory that is not in use anymore by the object. If the object
953 : // cannot be shrunk 0 is returned.
954 : Address GetAddressToShrink(Address object_address, size_t object_size);
955 :
956 : void ClearOutOfLiveRangeSlots(Address free_start);
957 :
958 : private:
959 : static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
960 : Executability executable);
961 :
962 : friend class MemoryAllocator;
963 : };
964 :
965 :
966 : // ----------------------------------------------------------------------------
967 : // Space is the abstract superclass for all allocation spaces.
968 : class Space : public Malloced {
969 : public:
970 840721 : Space(Heap* heap, AllocationSpace id)
971 : : allocation_observers_paused_(false),
972 : heap_(heap),
973 : id_(id),
974 : committed_(0),
975 1681442 : max_committed_(0) {
976 : external_backing_store_bytes_ =
977 840721 : new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
978 : external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
979 840721 : external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
980 : 0;
981 : CheckOffsetsAreConsistent();
982 840721 : }
983 :
984 : void CheckOffsetsAreConsistent() const;
985 :
986 : static inline void MoveExternalBackingStoreBytes(
987 : ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
988 :
989 840574 : virtual ~Space() {
990 840574 : delete[] external_backing_store_bytes_;
991 840574 : external_backing_store_bytes_ = nullptr;
992 840574 : }
993 :
994 0 : Heap* heap() const { return heap_; }
995 :
996 : // Identity used in error reporting.
997 0 : AllocationSpace identity() { return id_; }
998 :
999 0 : const char* name() { return AllocationSpaceName(id_); }
1000 :
1001 : V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
1002 : AllocationObserver* observer);
1003 :
1004 : V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
1005 : AllocationObserver* observer);
1006 :
1007 : V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
1008 :
1009 : V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
1010 :
1011 166196 : V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
1012 :
1013 : void AllocationStep(int bytes_since_last, Address soon_object, int size);
1014 :
1015 : // Return the total amount committed memory for this space, i.e., allocatable
1016 : // memory and page headers.
1017 4783901 : virtual size_t CommittedMemory() { return committed_; }
1018 :
1019 0 : virtual size_t MaximumCommittedMemory() { return max_committed_; }
1020 :
1021 : // Returns allocated size.
1022 : virtual size_t Size() = 0;
1023 :
1024 : // Returns size of objects. Can differ from the allocated size
1025 : // (e.g. see LargeObjectSpace).
1026 0 : virtual size_t SizeOfObjects() { return Size(); }
1027 :
1028 : // Approximate amount of physical memory committed for this space.
1029 : virtual size_t CommittedPhysicalMemory() = 0;
1030 :
1031 : // Return the available bytes without growing.
1032 : virtual size_t Available() = 0;
1033 :
1034 21564474 : virtual int RoundSizeDownToObjectAlignment(int size) {
1035 21564474 : if (id_ == CODE_SPACE) {
1036 0 : return RoundDown(size, kCodeAlignment);
1037 : } else {
1038 21564474 : return RoundDown(size, kTaggedSize);
1039 : }
1040 : }
1041 :
1042 : virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
1043 :
1044 : void AccountCommitted(size_t bytes) {
1045 : DCHECK_GE(committed_ + bytes, committed_);
1046 736214 : committed_ += bytes;
1047 736214 : if (committed_ > max_committed_) {
1048 642471 : max_committed_ = committed_;
1049 : }
1050 : }
1051 :
1052 : void AccountUncommitted(size_t bytes) {
1053 : DCHECK_GE(committed_, committed_ - bytes);
1054 445999 : committed_ -= bytes;
1055 : }
1056 :
1057 : inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1058 : size_t amount);
1059 :
1060 : inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1061 : size_t amount);
1062 :
1063 : // Returns amount of off-heap memory in-use by objects in this Space.
1064 65 : virtual size_t ExternalBackingStoreBytes(
1065 : ExternalBackingStoreType type) const {
1066 160 : return external_backing_store_bytes_[type];
1067 : }
1068 :
1069 : V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
1070 :
1071 5841716 : MemoryChunk* first_page() { return memory_chunk_list_.front(); }
1072 17833 : MemoryChunk* last_page() { return memory_chunk_list_.back(); }
1073 :
1074 : base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
1075 :
1076 : #ifdef DEBUG
1077 : virtual void Print() = 0;
1078 : #endif
1079 :
1080 : protected:
1081 : intptr_t GetNextInlineAllocationStepSize();
1082 : bool AllocationObserversActive() {
1083 526324591 : return !allocation_observers_paused_ && !allocation_observers_.empty();
1084 : }
1085 :
1086 : std::vector<AllocationObserver*> allocation_observers_;
1087 :
1088 : // The List manages the pages that belong to the given space.
1089 : base::List<MemoryChunk> memory_chunk_list_;
1090 :
1091 : // Tracks off-heap memory used by this space.
1092 : std::atomic<size_t>* external_backing_store_bytes_;
1093 :
1094 : private:
1095 : static const intptr_t kIdOffset = 9 * kSystemPointerSize;
1096 :
1097 : bool allocation_observers_paused_;
1098 : Heap* heap_;
1099 : AllocationSpace id_;
1100 :
1101 : // Keeps track of committed memory in a space.
1102 : size_t committed_;
1103 : size_t max_committed_;
1104 :
1105 : DISALLOW_COPY_AND_ASSIGN(Space);
1106 : };
1107 :
1108 :
1109 : class MemoryChunkValidator {
1110 : // Computed offsets should match the compiler generated ones.
1111 : STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1112 :
1113 : // Validate our estimates on the header size.
1114 : STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1115 : STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1116 : STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1117 : };
1118 :
1119 :
1120 : // The process-wide singleton that keeps track of code range regions with the
1121 : // intention to reuse free code range regions as a workaround for CFG memory
1122 : // leaks (see crbug.com/870054).
1123 115564 : class CodeRangeAddressHint {
1124 : public:
1125 : // Returns the most recently freed code range start address for the given
1126 : // size. If there is no such entry, then a random address is returned.
1127 : V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
1128 :
1129 : V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
1130 : size_t code_range_size);
1131 :
1132 : private:
1133 : base::Mutex mutex_;
1134 : // A map from code range size to an array of recently freed code range
1135 : // addresses. There should be O(1) different code range sizes.
1136 : // The length of each array is limited by the peak number of code ranges,
1137 : // which should be also O(1).
1138 : std::unordered_map<size_t, std::vector<Address>> recently_freed_;
1139 : };
1140 :
1141 : class SkipList {
1142 : public:
1143 0 : SkipList() { Clear(); }
1144 :
1145 0 : void Clear() {
1146 12469504 : for (int idx = 0; idx < kSize; idx++) {
1147 12469504 : starts_[idx] = static_cast<Address>(-1);
1148 : }
1149 0 : }
1150 :
1151 529728 : Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1152 :
1153 0 : void AddObject(Address addr, int size) {
1154 0 : int start_region = RegionNumber(addr);
1155 93865909 : int end_region = RegionNumber(addr + size - kTaggedSize);
1156 96890234 : for (int idx = start_region; idx <= end_region; idx++) {
1157 96890234 : if (starts_[idx] > addr) {
1158 2649215 : starts_[idx] = addr;
1159 : } else {
1160 : // In the first region, there may already be an object closer to the
1161 : // start of the region. Do not change the start in that case. If this
1162 : // is not the first region, you probably added overlapping objects.
1163 : DCHECK_EQ(start_region, idx);
1164 : }
1165 : }
1166 0 : }
1167 :
1168 0 : static inline int RegionNumber(Address addr) {
1169 413807836 : return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
1170 : }
1171 :
1172 93865909 : static void Update(Address addr, int size) {
1173 0 : Page* page = Page::FromAddress(addr);
1174 93865909 : SkipList* list = page->skip_list();
1175 93865909 : if (list == nullptr) {
1176 89290 : list = new SkipList();
1177 0 : page->set_skip_list(list);
1178 : }
1179 :
1180 0 : list->AddObject(addr, size);
1181 93865909 : }
1182 :
1183 : private:
1184 : static const int kRegionSizeLog2 = 13;
1185 : static const int kRegionSize = 1 << kRegionSizeLog2;
1186 : static const int kSize = Page::kPageSize / kRegionSize;
1187 :
1188 : STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1189 :
1190 : Address starts_[kSize];
1191 : };
1192 :
1193 :
1194 : // ----------------------------------------------------------------------------
1195 : // A space acquires chunks of memory from the operating system. The memory
1196 : // allocator allocates and deallocates pages for the paged heap spaces and large
1197 : // pages for large object space.
1198 186147 : class V8_EXPORT_PRIVATE MemoryAllocator {
1199 : public:
1200 : // Unmapper takes care of concurrently unmapping and uncommitting memory
1201 : // chunks.
1202 124098 : class Unmapper {
1203 : public:
1204 : class UnmapFreeMemoryTask;
1205 :
1206 62063 : Unmapper(Heap* heap, MemoryAllocator* allocator)
1207 : : heap_(heap),
1208 : allocator_(allocator),
1209 : pending_unmapping_tasks_semaphore_(0),
1210 : pending_unmapping_tasks_(0),
1211 248252 : active_unmapping_tasks_(0) {
1212 62063 : chunks_[kRegular].reserve(kReservedQueueingSlots);
1213 62064 : chunks_[kPooled].reserve(kReservedQueueingSlots);
1214 62064 : }
1215 :
1216 236394 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1217 467715 : if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
1218 229286 : AddMemoryChunkSafe<kRegular>(chunk);
1219 : } else {
1220 7108 : AddMemoryChunkSafe<kNonRegular>(chunk);
1221 : }
1222 236394 : }
1223 :
1224 216320 : MemoryChunk* TryGetPooledMemoryChunkSafe() {
1225 : // Procedure:
1226 : // (1) Try to get a chunk that was declared as pooled and already has
1227 : // been uncommitted.
1228 : // (2) Try to steal any memory chunk of kPageSize that would've been
1229 : // unmapped.
1230 216320 : MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1231 216320 : if (chunk == nullptr) {
1232 197322 : chunk = GetMemoryChunkSafe<kRegular>();
1233 197322 : if (chunk != nullptr) {
1234 : // For stolen chunks we need to manually free any allocated memory.
1235 8872 : chunk->ReleaseAllocatedMemory();
1236 : }
1237 : }
1238 216320 : return chunk;
1239 : }
1240 :
1241 : V8_EXPORT_PRIVATE void FreeQueuedChunks();
1242 : void CancelAndWaitForPendingTasks();
1243 : void PrepareForMarkCompact();
1244 : void EnsureUnmappingCompleted();
1245 : V8_EXPORT_PRIVATE void TearDown();
1246 : size_t NumberOfCommittedChunks();
1247 : int NumberOfChunks();
1248 : size_t CommittedBufferedMemory();
1249 :
1250 : private:
1251 : static const int kReservedQueueingSlots = 64;
1252 : static const int kMaxUnmapperTasks = 4;
1253 :
1254 : enum ChunkQueueType {
1255 : kRegular, // Pages of kPageSize that do not live in a CodeRange and
1256 : // can thus be used for stealing.
1257 : kNonRegular, // Large chunks and executable chunks.
1258 : kPooled, // Pooled chunks, already uncommited and ready for reuse.
1259 : kNumberOfChunkQueues,
1260 : };
1261 :
1262 : enum class FreeMode {
1263 : kUncommitPooled,
1264 : kReleasePooled,
1265 : };
1266 :
1267 : template <ChunkQueueType type>
1268 443272 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1269 443272 : base::MutexGuard guard(&mutex_);
1270 443274 : chunks_[type].push_back(chunk);
1271 443274 : }
1272 :
1273 : template <ChunkQueueType type>
1274 1777379 : MemoryChunk* GetMemoryChunkSafe() {
1275 1777379 : base::MutexGuard guard(&mutex_);
1276 1777599 : if (chunks_[type].empty()) return nullptr;
1277 443274 : MemoryChunk* chunk = chunks_[type].back();
1278 : chunks_[type].pop_back();
1279 443274 : return chunk;
1280 : }
1281 :
1282 : bool MakeRoomForNewTasks();
1283 :
1284 : template <FreeMode mode>
1285 : void PerformFreeMemoryOnQueuedChunks();
1286 :
1287 : void PerformFreeMemoryOnQueuedNonRegularChunks();
1288 :
1289 : Heap* const heap_;
1290 : MemoryAllocator* const allocator_;
1291 : base::Mutex mutex_;
1292 : std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1293 : CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1294 : base::Semaphore pending_unmapping_tasks_semaphore_;
1295 : intptr_t pending_unmapping_tasks_;
1296 : std::atomic<intptr_t> active_unmapping_tasks_;
1297 :
1298 : friend class MemoryAllocator;
1299 : };
1300 :
1301 : enum AllocationMode {
1302 : kRegular,
1303 : kPooled,
1304 : };
1305 :
1306 : enum FreeMode {
1307 : kFull,
1308 : kAlreadyPooled,
1309 : kPreFreeAndQueue,
1310 : kPooledAndQueue,
1311 : };
1312 :
1313 : static intptr_t GetCommitPageSize();
1314 :
1315 : // Computes the memory area of discardable memory within a given memory area
1316 : // [addr, addr+size) and returns the result as base::AddressRegion. If the
1317 : // memory is not discardable base::AddressRegion is an empty region.
1318 : static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
1319 : size_t size);
1320 :
1321 : MemoryAllocator(Isolate* isolate, size_t max_capacity,
1322 : size_t code_range_size);
1323 :
1324 : void TearDown();
1325 :
1326 : // Allocates a Page from the allocator. AllocationMode is used to indicate
1327 : // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1328 : // should be tried first.
1329 : template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1330 : typename SpaceType>
1331 : EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1332 : Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1333 :
1334 : LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1335 : Executability executable);
1336 :
1337 : template <MemoryAllocator::FreeMode mode = kFull>
1338 : EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1339 187882 : void Free(MemoryChunk* chunk);
1340 :
1341 : // Returns allocated spaces in bytes.
1342 : size_t Size() { return size_; }
1343 :
1344 : // Returns allocated executable spaces in bytes.
1345 : size_t SizeExecutable() { return size_executable_; }
1346 :
1347 : // Returns the maximum available bytes of heaps.
1348 : size_t Available() {
1349 : const size_t size = Size();
1350 326 : return capacity_ < size ? 0 : capacity_ - size;
1351 : }
1352 :
1353 : // Returns an indication of whether a pointer is in a space that has
1354 : // been allocated by this MemoryAllocator.
1355 : V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1356 3873818 : return address < lowest_ever_allocated_ ||
1357 1936909 : address >= highest_ever_allocated_;
1358 : }
1359 :
1360 : // Returns a MemoryChunk in which the memory region from commit_area_size to
1361 : // reserve_area_size of the chunk area is reserved but not committed, it
1362 : // could be committed later by calling MemoryChunk::CommitArea.
1363 : MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1364 : Executability executable, Space* space);
1365 :
1366 : Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1367 : size_t alignment, Executability executable,
1368 : void* hint, VirtualMemory* controller);
1369 :
1370 : void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
1371 :
1372 : // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1373 : // internally memory is freed from |start_free| to the end of the reservation.
1374 : // Additional memory beyond the page is not accounted though, so
1375 : // |bytes_to_free| is computed by the caller.
1376 : void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1377 : size_t bytes_to_free, Address new_area_end);
1378 :
1379 : // Checks if an allocated MemoryChunk was intended to be used for executable
1380 : // memory.
1381 6622 : bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1382 6622 : return executable_memory_.find(chunk) != executable_memory_.end();
1383 : }
1384 :
1385 : // Commit memory region owned by given reservation object. Returns true if
1386 : // it succeeded and false otherwise.
1387 : bool CommitMemory(VirtualMemory* reservation);
1388 :
1389 : // Uncommit memory region owned by given reservation object. Returns true if
1390 : // it succeeded and false otherwise.
1391 : bool UncommitMemory(VirtualMemory* reservation);
1392 :
1393 : // Zaps a contiguous block of memory [start..(start+size)[ with
1394 : // a given zap value.
1395 : void ZapBlock(Address start, size_t size, uintptr_t zap_value);
1396 :
1397 : V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1398 : Address start,
1399 : size_t commit_size,
1400 : size_t reserved_size);
1401 :
1402 : // Page allocator instance for allocating non-executable pages.
1403 : // Guaranteed to be a valid pointer.
1404 : v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
1405 :
1406 : // Page allocator instance for allocating executable pages.
1407 : // Guaranteed to be a valid pointer.
1408 : v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
1409 :
1410 : // Returns page allocator suitable for allocating pages with requested
1411 : // executability.
1412 : v8::PageAllocator* page_allocator(Executability executable) {
1413 : return executable == EXECUTABLE ? code_page_allocator_
1414 918712 : : data_page_allocator_;
1415 : }
1416 :
1417 : // A region of memory that may contain executable code including reserved
1418 : // OS page with read-write access in the beginning.
1419 84280 : const base::AddressRegion& code_range() const {
1420 : // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
1421 : DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
1422 : DCHECK_IMPLIES(!code_range_.is_empty(),
1423 : code_range_.contains(code_page_allocator_instance_->begin(),
1424 : code_page_allocator_instance_->size()));
1425 84280 : return code_range_;
1426 : }
1427 :
1428 : Unmapper* unmapper() { return &unmapper_; }
1429 :
1430 : private:
1431 : void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
1432 : size_t requested);
1433 :
1434 : // PreFree logically frees the object, i.e., it takes care of the size
1435 : // bookkeeping and calls the allocation callback.
1436 : void PreFreeMemory(MemoryChunk* chunk);
1437 :
1438 : // FreeMemory can be called concurrently when PreFree was executed before.
1439 : void PerformFreeMemory(MemoryChunk* chunk);
1440 :
1441 : // See AllocatePage for public interface. Note that currently we only support
1442 : // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1443 : template <typename SpaceType>
1444 : MemoryChunk* AllocatePagePooled(SpaceType* owner);
1445 :
1446 : // Initializes pages in a chunk. Returns the first page address.
1447 : // This function and GetChunkId() are provided for the mark-compact
1448 : // collector to rebuild page headers in the from space, which is
1449 : // used as a marking stack and its page headers are destroyed.
1450 : Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1451 : PagedSpace* owner);
1452 :
1453 706878 : void UpdateAllocatedSpaceLimits(Address low, Address high) {
1454 : // The use of atomic primitives does not guarantee correctness (wrt.
1455 : // desired semantics) by default. The loop here ensures that we update the
1456 : // values only if they did not change in between.
1457 706878 : Address ptr = kNullAddress;
1458 706879 : do {
1459 706878 : ptr = lowest_ever_allocated_;
1460 908226 : } while ((low < ptr) &&
1461 201347 : !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
1462 706879 : do {
1463 706879 : ptr = highest_ever_allocated_;
1464 778675 : } while ((high > ptr) &&
1465 71796 : !highest_ever_allocated_.compare_exchange_weak(ptr, high));
1466 706879 : }
1467 :
1468 : void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1469 : DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1470 : DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1471 : executable_memory_.insert(chunk);
1472 : }
1473 :
1474 133742 : void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1475 : DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1476 : executable_memory_.erase(chunk);
1477 133742 : chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1478 133742 : }
1479 :
1480 : Isolate* isolate_;
1481 :
1482 : // This object controls virtual space reserved for V8 heap instance.
1483 : // Depending on the configuration it may contain the following:
1484 : // - no reservation (on 32-bit architectures)
1485 : // - code range reservation used by bounded code page allocator (on 64-bit
1486 : // architectures without pointers compression in V8 heap)
1487 : // - data + code range reservation (on 64-bit architectures with pointers
1488 : // compression in V8 heap)
1489 : VirtualMemory heap_reservation_;
1490 :
1491 : // Page allocator used for allocating data pages. Depending on the
1492 : // configuration it may be a page allocator instance provided by v8::Platform
1493 : // or a BoundedPageAllocator (when pointer compression is enabled).
1494 : v8::PageAllocator* data_page_allocator_;
1495 :
1496 : // Page allocator used for allocating code pages. Depending on the
1497 : // configuration it may be a page allocator instance provided by v8::Platform
1498 : // or a BoundedPageAllocator (when pointer compression is enabled or
1499 : // on those 64-bit architectures where pc-relative 32-bit displacement
1500 : // can be used for call and jump instructions).
1501 : v8::PageAllocator* code_page_allocator_;
1502 :
1503 : // A part of the |heap_reservation_| that may contain executable code
1504 : // including reserved page with read-write access in the beginning.
1505 : // See details below.
1506 : base::AddressRegion code_range_;
1507 :
1508 : // This unique pointer owns the instance of bounded code allocator
1509 : // that controls executable pages allocation. It does not control the
1510 : // optionally existing page in the beginning of the |code_range_|.
1511 : // So, summarizing all above, the following conditions hold:
1512 : // 1) |heap_reservation_| >= |code_range_|
1513 : // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
1514 : // 3) |heap_reservation_| is AllocatePageSize()-aligned
1515 : // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
1516 : // 5) |code_range_| is CommitPageSize()-aligned
1517 : std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
1518 :
1519 : // Maximum space size in bytes.
1520 : size_t capacity_;
1521 :
1522 : // Allocated space size in bytes.
1523 : std::atomic<size_t> size_;
1524 : // Allocated executable space size in bytes.
1525 : std::atomic<size_t> size_executable_;
1526 :
1527 : // We keep the lowest and highest addresses allocated as a quick way
1528 : // of determining that pointers are outside the heap. The estimate is
1529 : // conservative, i.e. not all addresses in 'allocated' space are allocated
1530 : // to our heap. The range is [lowest, highest[, inclusive on the low end
1531 : // and exclusive on the high end.
1532 : std::atomic<Address> lowest_ever_allocated_;
1533 : std::atomic<Address> highest_ever_allocated_;
1534 :
1535 : VirtualMemory last_chunk_;
1536 : Unmapper unmapper_;
1537 :
1538 : // Data structure to remember allocated executable memory chunks.
1539 : std::unordered_set<MemoryChunk*> executable_memory_;
1540 :
1541 : friend class heap::TestCodePageAllocatorScope;
1542 :
1543 : DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1544 : };
1545 :
1546 : extern template Page*
1547 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1548 : size_t size, PagedSpace* owner, Executability executable);
1549 : extern template Page*
1550 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1551 : size_t size, SemiSpace* owner, Executability executable);
1552 : extern template Page*
1553 : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1554 : size_t size, SemiSpace* owner, Executability executable);
1555 :
1556 : // -----------------------------------------------------------------------------
1557 : // Interface for heap object iterator to be implemented by all object space
1558 : // object iterators.
1559 : //
1560 : // NOTE: The space specific object iterators also implements the own next()
1561 : // method which is used to avoid using virtual functions
1562 : // iterating a specific space.
1563 :
1564 61129 : class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1565 : public:
1566 61123 : virtual ~ObjectIterator() = default;
1567 : virtual HeapObject Next() = 0;
1568 : };
1569 :
1570 : template <class PAGE_TYPE>
1571 : class PageIteratorImpl
1572 : : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1573 : public:
1574 265869 : explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
1575 : PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1576 : PAGE_TYPE* operator*() { return p_; }
1577 : bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1578 81934 : return rhs.p_ == p_;
1579 : }
1580 : bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1581 1089773 : return rhs.p_ != p_;
1582 : }
1583 : inline PageIteratorImpl<PAGE_TYPE>& operator++();
1584 : inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1585 :
1586 : private:
1587 : PAGE_TYPE* p_;
1588 : };
1589 :
1590 : typedef PageIteratorImpl<Page> PageIterator;
1591 : typedef PageIteratorImpl<LargePage> LargePageIterator;
1592 :
1593 : class PageRange {
1594 : public:
1595 : typedef PageIterator iterator;
1596 30789 : PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1597 : explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1598 : inline PageRange(Address start, Address limit);
1599 :
1600 : iterator begin() { return iterator(begin_); }
1601 : iterator end() { return iterator(end_); }
1602 :
1603 : private:
1604 : Page* begin_;
1605 : Page* end_;
1606 : };
1607 :
1608 : // -----------------------------------------------------------------------------
1609 : // Heap object iterator in new/old/map spaces.
1610 : //
1611 : // A HeapObjectIterator iterates objects from the bottom of the given space
1612 : // to its top or from the bottom of the given page to its top.
1613 : //
1614 : // If objects are allocated in the page during iteration the iterator may
1615 : // or may not iterate over those objects. The caller must create a new
1616 : // iterator in order to be sure to visit these new objects.
1617 61123 : class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1618 : public:
1619 : // Creates a new object iterator in a given space.
1620 : explicit HeapObjectIterator(PagedSpace* space);
1621 : explicit HeapObjectIterator(Page* page);
1622 :
1623 : // Advance to the next object, skipping free spaces and other fillers and
1624 : // skipping the special garbage section of which there is one per space.
1625 : // Returns nullptr when the iteration has ended.
1626 : inline HeapObject Next() override;
1627 :
1628 : private:
1629 : // Fast (inlined) path of next().
1630 : inline HeapObject FromCurrentPage();
1631 :
1632 : // Slow path of next(), goes into the next page. Returns false if the
1633 : // iteration has ended.
1634 : bool AdvanceToNextPage();
1635 :
1636 : Address cur_addr_; // Current iteration point.
1637 : Address cur_end_; // End iteration point.
1638 : PagedSpace* space_;
1639 : PageRange page_range_;
1640 : PageRange::iterator current_page_;
1641 : };
1642 :
1643 :
1644 : // -----------------------------------------------------------------------------
1645 : // A space has a circular list of pages. The next page can be accessed via
1646 : // Page::next_page() call.
1647 :
1648 : // An abstraction of allocation and relocation pointers in a page-structured
1649 : // space.
1650 : class LinearAllocationArea {
1651 : public:
1652 729652 : LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
1653 386626 : LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1654 :
1655 : void Reset(Address top, Address limit) {
1656 : set_top(top);
1657 : set_limit(limit);
1658 : }
1659 :
1660 : V8_INLINE void set_top(Address top) {
1661 : SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1662 524078833 : top_ = top;
1663 : }
1664 :
1665 : V8_INLINE Address top() const {
1666 : SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1667 0 : return top_;
1668 : }
1669 :
1670 : Address* top_address() { return &top_; }
1671 :
1672 3832353 : V8_INLINE void set_limit(Address limit) { limit_ = limit; }
1673 :
1674 0 : V8_INLINE Address limit() const { return limit_; }
1675 :
1676 : Address* limit_address() { return &limit_; }
1677 :
1678 : #ifdef DEBUG
1679 : bool VerifyPagedAllocation() {
1680 : return (Page::FromAllocationAreaAddress(top_) ==
1681 : Page::FromAllocationAreaAddress(limit_)) &&
1682 : (top_ <= limit_);
1683 : }
1684 : #endif
1685 :
1686 : private:
1687 : // Current allocation top.
1688 : Address top_;
1689 : // Current allocation limit.
1690 : Address limit_;
1691 : };
1692 :
1693 :
1694 : // An abstraction of the accounting statistics of a page-structured space.
1695 : //
1696 : // The stats are only set by functions that ensure they stay balanced. These
1697 : // functions increase or decrease one of the non-capacity stats in conjunction
1698 : // with capacity, or else they always balance increases and decreases to the
1699 : // non-capacity stats.
1700 : class AllocationStats {
1701 : public:
1702 : AllocationStats() { Clear(); }
1703 :
1704 : // Zero out all the allocation statistics (i.e., no capacity).
1705 : void Clear() {
1706 : capacity_ = 0;
1707 1423185 : max_capacity_ = 0;
1708 : ClearSize();
1709 : }
1710 :
1711 : void ClearSize() {
1712 1646715 : size_ = 0;
1713 : #ifdef DEBUG
1714 : allocated_on_page_.clear();
1715 : #endif
1716 : }
1717 :
1718 : // Accessors for the allocation statistics.
1719 : size_t Capacity() { return capacity_; }
1720 : size_t MaxCapacity() { return max_capacity_; }
1721 : size_t Size() { return size_; }
1722 : #ifdef DEBUG
1723 : size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1724 : #endif
1725 :
1726 : void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1727 : DCHECK_GE(size_ + bytes, size_);
1728 2283602 : size_ += bytes;
1729 : #ifdef DEBUG
1730 : allocated_on_page_[page] += bytes;
1731 : #endif
1732 : }
1733 :
1734 : void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1735 : DCHECK_GE(size_, bytes);
1736 1606703 : size_ -= bytes;
1737 : #ifdef DEBUG
1738 : DCHECK_GE(allocated_on_page_[page], bytes);
1739 : allocated_on_page_[page] -= bytes;
1740 : #endif
1741 : }
1742 :
1743 : void DecreaseCapacity(size_t bytes) {
1744 : DCHECK_GE(capacity_, bytes);
1745 : DCHECK_GE(capacity_ - bytes, size_);
1746 : capacity_ -= bytes;
1747 : }
1748 :
1749 573337 : void IncreaseCapacity(size_t bytes) {
1750 : DCHECK_GE(capacity_ + bytes, capacity_);
1751 : capacity_ += bytes;
1752 573337 : if (capacity_ > max_capacity_) {
1753 504178 : max_capacity_ = capacity_;
1754 : }
1755 573337 : }
1756 :
1757 : private:
1758 : // |capacity_|: The number of object-area bytes (i.e., not including page
1759 : // bookkeeping structures) currently in the space.
1760 : // During evacuation capacity of the main spaces is accessed from multiple
1761 : // threads to check the old generation hard limit.
1762 : std::atomic<size_t> capacity_;
1763 :
1764 : // |max_capacity_|: The maximum capacity ever observed.
1765 : size_t max_capacity_;
1766 :
1767 : // |size_|: The number of allocated bytes.
1768 : size_t size_;
1769 :
1770 : #ifdef DEBUG
1771 : std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1772 : #endif
1773 : };
1774 :
1775 : // A free list maintaining free blocks of memory. The free list is organized in
1776 : // a way to encourage objects allocated around the same time to be near each
1777 : // other. The normal way to allocate is intended to be by bumping a 'top'
1778 : // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1779 : // find a new space to allocate from. This is done with the free list, which is
1780 : // divided up into rough categories to cut down on waste. Having finer
1781 : // categories would scatter allocation more.
1782 :
1783 : // The free list is organized in categories as follows:
1784 : // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1785 : // allocation, when categories >= small do not have entries anymore.
1786 : // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1787 : // categories >= small do not have entries anymore.
1788 : // 32-255 words (small): Used for allocating free space between 1-31 words in
1789 : // size.
1790 : // 256-2047 words (medium): Used for allocating free space between 32-255 words
1791 : // in size.
1792 : // 1048-16383 words (large): Used for allocating free space between 256-2047
1793 : // words in size.
1794 : // At least 16384 words (huge): This list is for objects of 2048 words or
1795 : // larger. Empty pages are also added to this list.
1796 : class V8_EXPORT_PRIVATE FreeList {
1797 : public:
1798 : // This method returns how much memory can be allocated after freeing
1799 : // maximum_freed memory.
1800 : static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1801 478276 : if (maximum_freed <= kTiniestListMax) {
1802 : // Since we are not iterating over all list entries, we cannot guarantee
1803 : // that we can find the maximum freed block in that free list.
1804 : return 0;
1805 371683 : } else if (maximum_freed <= kTinyListMax) {
1806 : return kTinyAllocationMax;
1807 352607 : } else if (maximum_freed <= kSmallListMax) {
1808 : return kSmallAllocationMax;
1809 315101 : } else if (maximum_freed <= kMediumListMax) {
1810 : return kMediumAllocationMax;
1811 240012 : } else if (maximum_freed <= kLargeListMax) {
1812 : return kLargeAllocationMax;
1813 : }
1814 : return maximum_freed;
1815 : }
1816 :
1817 : static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1818 21702169 : if (size_in_bytes <= kTiniestListMax) {
1819 : return kTiniest;
1820 10541887 : } else if (size_in_bytes <= kTinyListMax) {
1821 : return kTiny;
1822 4420318 : } else if (size_in_bytes <= kSmallListMax) {
1823 : return kSmall;
1824 1782432 : } else if (size_in_bytes <= kMediumListMax) {
1825 : return kMedium;
1826 1349265 : } else if (size_in_bytes <= kLargeListMax) {
1827 : return kLarge;
1828 : }
1829 : return kHuge;
1830 : }
1831 :
1832 : FreeList();
1833 :
1834 : // Adds a node on the free list. The block of size {size_in_bytes} starting
1835 : // at {start} is placed on the free list. The return value is the number of
1836 : // bytes that were not added to the free list, because they freed memory block
1837 : // was too small. Bookkeeping information will be written to the block, i.e.,
1838 : // its contents will be destroyed. The start address should be word aligned,
1839 : // and the size should be a non-zero multiple of the word size.
1840 : size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1841 :
1842 : // Allocates a free space node frome the free list of at least size_in_bytes
1843 : // bytes. Returns the actual node size in node_size which can be bigger than
1844 : // size_in_bytes. This method returns null if the allocation request cannot be
1845 : // handled by the free list.
1846 : V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
1847 : size_t* node_size);
1848 :
1849 : // Clear the free list.
1850 : void Reset();
1851 :
1852 921475 : void ResetStats() {
1853 : wasted_bytes_ = 0;
1854 : ForAllFreeListCategories(
1855 223530 : [](FreeListCategory* category) { category->ResetStats(); });
1856 921475 : }
1857 :
1858 : // Return the number of bytes available on the free list.
1859 : size_t Available() {
1860 : size_t available = 0;
1861 1800182 : ForAllFreeListCategories([&available](FreeListCategory* category) {
1862 1800182 : available += category->available();
1863 : });
1864 : return available;
1865 : }
1866 :
1867 : bool IsEmpty() {
1868 : bool empty = true;
1869 : ForAllFreeListCategories([&empty](FreeListCategory* category) {
1870 : if (!category->is_empty()) empty = false;
1871 : });
1872 : return empty;
1873 : }
1874 :
1875 : // Used after booting the VM.
1876 : void RepairLists(Heap* heap);
1877 :
1878 : size_t EvictFreeListItems(Page* page);
1879 : bool ContainsPageFreeListItems(Page* page);
1880 :
1881 : size_t wasted_bytes() { return wasted_bytes_; }
1882 :
1883 : template <typename Callback>
1884 : void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1885 15381492 : FreeListCategory* current = categories_[type];
1886 18276282 : while (current != nullptr) {
1887 : FreeListCategory* next = current->next();
1888 : callback(current);
1889 : current = next;
1890 : }
1891 : }
1892 :
1893 : template <typename Callback>
1894 284523 : void ForAllFreeListCategories(Callback callback) {
1895 15442485 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1896 15381492 : ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1897 : }
1898 284523 : }
1899 :
1900 : bool AddCategory(FreeListCategory* category);
1901 : void RemoveCategory(FreeListCategory* category);
1902 : void PrintCategories(FreeListCategoryType type);
1903 :
1904 : // Returns a page containing an entry for a given type, or nullptr otherwise.
1905 : inline Page* GetPageForCategoryType(FreeListCategoryType type);
1906 :
1907 : #ifdef DEBUG
1908 : size_t SumFreeLists();
1909 : bool IsVeryLong();
1910 : #endif
1911 :
1912 : private:
1913 : class FreeListCategoryIterator {
1914 : public:
1915 : FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1916 4280872 : : current_(free_list->categories_[type]) {}
1917 :
1918 : bool HasNext() { return current_ != nullptr; }
1919 :
1920 : FreeListCategory* Next() {
1921 : DCHECK(HasNext());
1922 : FreeListCategory* tmp = current_;
1923 1274999 : current_ = current_->next();
1924 : return tmp;
1925 : }
1926 :
1927 : private:
1928 : FreeListCategory* current_;
1929 : };
1930 :
1931 : // The size range of blocks, in bytes.
1932 : static const size_t kMinBlockSize = 3 * kTaggedSize;
1933 :
1934 : // This is a conservative upper bound. The actual maximum block size takes
1935 : // padding and alignment of data and code pages into account.
1936 : static const size_t kMaxBlockSize = Page::kPageSize;
1937 :
1938 : static const size_t kTiniestListMax = 0xa * kTaggedSize;
1939 : static const size_t kTinyListMax = 0x1f * kTaggedSize;
1940 : static const size_t kSmallListMax = 0xff * kTaggedSize;
1941 : static const size_t kMediumListMax = 0x7ff * kTaggedSize;
1942 : static const size_t kLargeListMax = 0x3fff * kTaggedSize;
1943 : static const size_t kTinyAllocationMax = kTiniestListMax;
1944 : static const size_t kSmallAllocationMax = kTinyListMax;
1945 : static const size_t kMediumAllocationMax = kSmallListMax;
1946 : static const size_t kLargeAllocationMax = kMediumListMax;
1947 :
1948 : // Walks all available categories for a given |type| and tries to retrieve
1949 : // a node. Returns nullptr if the category is empty.
1950 : FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
1951 : size_t* node_size);
1952 :
1953 : // Tries to retrieve a node from the first category in a given |type|.
1954 : // Returns nullptr if the category is empty or the top entry is smaller
1955 : // than minimum_size.
1956 : FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
1957 : size_t* node_size);
1958 :
1959 : // Searches a given |type| for a node of at least |minimum_size|.
1960 : FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1961 : size_t minimum_size);
1962 :
1963 : // The tiny categories are not used for fast allocation.
1964 : FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1965 : size_t size_in_bytes) {
1966 1796864 : if (size_in_bytes <= kSmallAllocationMax) {
1967 : return kSmall;
1968 590842 : } else if (size_in_bytes <= kMediumAllocationMax) {
1969 : return kMedium;
1970 481904 : } else if (size_in_bytes <= kLargeAllocationMax) {
1971 : return kLarge;
1972 : }
1973 : return kHuge;
1974 : }
1975 :
1976 : FreeListCategory* top(FreeListCategoryType type) const {
1977 59491 : return categories_[type];
1978 : }
1979 :
1980 : std::atomic<size_t> wasted_bytes_;
1981 : FreeListCategory* categories_[kNumberOfCategories];
1982 :
1983 : friend class FreeListCategory;
1984 : };
1985 :
1986 : // LocalAllocationBuffer represents a linear allocation area that is created
1987 : // from a given {AllocationResult} and can be used to allocate memory without
1988 : // synchronization.
1989 : //
1990 : // The buffer is properly closed upon destruction and reassignment.
1991 : // Example:
1992 : // {
1993 : // AllocationResult result = ...;
1994 : // LocalAllocationBuffer a(heap, result, size);
1995 : // LocalAllocationBuffer b = a;
1996 : // CHECK(!a.IsValid());
1997 : // CHECK(b.IsValid());
1998 : // // {a} is invalid now and cannot be used for further allocations.
1999 : // }
2000 : // // Since {b} went out of scope, the LAB is closed, resulting in creating a
2001 : // // filler object for the remaining area.
2002 : class LocalAllocationBuffer {
2003 : public:
2004 : // Indicates that a buffer cannot be used for allocations anymore. Can result
2005 : // from either reassigning a buffer, or trying to construct it from an
2006 : // invalid {AllocationResult}.
2007 : static LocalAllocationBuffer InvalidBuffer() {
2008 : return LocalAllocationBuffer(
2009 192594 : nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
2010 : }
2011 :
2012 : // Creates a new LAB from a given {AllocationResult}. Results in
2013 : // InvalidBuffer if the result indicates a retry.
2014 : static inline LocalAllocationBuffer FromResult(Heap* heap,
2015 : AllocationResult result,
2016 : intptr_t size);
2017 :
2018 580812 : ~LocalAllocationBuffer() { Close(); }
2019 :
2020 : // Convert to C++11 move-semantics once allowed by the style guide.
2021 : LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
2022 : LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
2023 : V8_NOEXCEPT;
2024 :
2025 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2026 : int size_in_bytes, AllocationAlignment alignment);
2027 :
2028 90585793 : inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
2029 :
2030 : // Try to merge LABs, which is only possible when they are adjacent in memory.
2031 : // Returns true if the merge was successful, false otherwise.
2032 : inline bool TryMerge(LocalAllocationBuffer* other);
2033 :
2034 : inline bool TryFreeLast(HeapObject object, int object_size);
2035 :
2036 : // Close a LAB, effectively invalidating it. Returns the unused area.
2037 : LinearAllocationArea Close();
2038 :
2039 : private:
2040 : LocalAllocationBuffer(Heap* heap,
2041 : LinearAllocationArea allocation_info) V8_NOEXCEPT;
2042 :
2043 : Heap* heap_;
2044 : LinearAllocationArea allocation_info_;
2045 : };
2046 :
2047 535394 : class SpaceWithLinearArea : public Space {
2048 : public:
2049 : SpaceWithLinearArea(Heap* heap, AllocationSpace id)
2050 1070938 : : Space(heap, id), top_on_previous_step_(0) {
2051 : allocation_info_.Reset(kNullAddress, kNullAddress);
2052 : }
2053 :
2054 : virtual bool SupportsInlineAllocation() = 0;
2055 :
2056 : // Returns the allocation pointer in this space.
2057 362660910 : Address top() { return allocation_info_.top(); }
2058 68022736 : Address limit() { return allocation_info_.limit(); }
2059 :
2060 : // The allocation top address.
2061 : Address* allocation_top_address() { return allocation_info_.top_address(); }
2062 :
2063 : // The allocation limit address.
2064 : Address* allocation_limit_address() {
2065 : return allocation_info_.limit_address();
2066 : }
2067 :
2068 : V8_EXPORT_PRIVATE void AddAllocationObserver(
2069 : AllocationObserver* observer) override;
2070 : V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2071 : AllocationObserver* observer) override;
2072 : V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2073 : V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2074 :
2075 : // When allocation observers are active we may use a lower limit to allow the
2076 : // observers to 'interrupt' earlier than the natural limit. Given a linear
2077 : // area bounded by [start, end), this function computes the limit to use to
2078 : // allow proper observation based on existing observers. min_size specifies
2079 : // the minimum size that the limited area should have.
2080 : Address ComputeLimit(Address start, Address end, size_t min_size);
2081 : V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2082 : size_t min_size) = 0;
2083 :
2084 : protected:
2085 : // If we are doing inline allocation in steps, this method performs the 'step'
2086 : // operation. top is the memory address of the bump pointer at the last
2087 : // inline allocation (i.e. it determines the numbers of bytes actually
2088 : // allocated since the last step.) top_for_next_step is the address of the
2089 : // bump pointer where the next byte is going to be allocated from. top and
2090 : // top_for_next_step may be different when we cross a page boundary or reset
2091 : // the space.
2092 : // TODO(ofrobots): clarify the precise difference between this and
2093 : // Space::AllocationStep.
2094 : void InlineAllocationStep(Address top, Address top_for_next_step,
2095 : Address soon_object, size_t size);
2096 : V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2097 :
2098 : // TODO(ofrobots): make these private after refactoring is complete.
2099 : LinearAllocationArea allocation_info_;
2100 : Address top_on_previous_step_;
2101 : };
2102 :
2103 : class V8_EXPORT_PRIVATE PagedSpace
2104 : : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2105 : public:
2106 : typedef PageIterator iterator;
2107 :
2108 : static const size_t kCompactionMemoryWanted = 500 * KB;
2109 :
2110 : // Creates a space with an id.
2111 : PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2112 :
2113 948710 : ~PagedSpace() override { TearDown(); }
2114 :
2115 : // Checks whether an object/address is in this space.
2116 : inline bool Contains(Address a);
2117 : inline bool Contains(Object o);
2118 : bool ContainsSlow(Address addr);
2119 :
2120 : // Does the space need executable memory?
2121 : Executability executable() { return executable_; }
2122 :
2123 : // Prepares for a mark-compact GC.
2124 : void PrepareForMarkCompact();
2125 :
2126 : // Current capacity without growing (Size() + Available()).
2127 : size_t Capacity() { return accounting_stats_.Capacity(); }
2128 :
2129 : // Approximate amount of physical memory committed for this space.
2130 : size_t CommittedPhysicalMemory() override;
2131 :
2132 : void ResetFreeListStatistics();
2133 :
2134 : // Sets the capacity, the available space and the wasted space to zero.
2135 : // The stats are rebuilt during sweeping by adding each page to the
2136 : // capacity and the size when it is encountered. As free spaces are
2137 : // discovered during the sweeping they are subtracted from the size and added
2138 : // to the available and wasted totals.
2139 : void ClearStats() {
2140 : accounting_stats_.ClearSize();
2141 223530 : free_list_.ResetStats();
2142 223530 : ResetFreeListStatistics();
2143 : }
2144 :
2145 : // Available bytes without growing. These are the bytes on the free list.
2146 : // The bytes in the linear allocation area are not included in this total
2147 : // because updating the stats would slow down allocation. New pages are
2148 : // immediately added to the free list so they show up here.
2149 1766338 : size_t Available() override { return free_list_.Available(); }
2150 :
2151 : // Allocated bytes in this space. Garbage bytes that were not found due to
2152 : // concurrent sweeping are counted as being allocated! The bytes in the
2153 : // current linear allocation area (between top and limit) are also counted
2154 : // here.
2155 10491366 : size_t Size() override { return accounting_stats_.Size(); }
2156 :
2157 : // As size, but the bytes in lazily swept pages are estimated and the bytes
2158 : // in the current linear allocation area are not included.
2159 : size_t SizeOfObjects() override;
2160 :
2161 : // Wasted bytes in this space. These are just the bytes that were thrown away
2162 : // due to being too small to use for allocation.
2163 1175760 : virtual size_t Waste() { return free_list_.wasted_bytes(); }
2164 :
2165 : enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2166 :
2167 : // Allocate the requested number of bytes in the space if possible, return a
2168 : // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2169 : // to be manually updated later.
2170 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2171 : int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2172 :
2173 : // Allocate the requested number of bytes in the space double aligned if
2174 : // possible, return a failure object if not.
2175 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2176 : int size_in_bytes, AllocationAlignment alignment);
2177 :
2178 : // Allocate the requested number of bytes in the space and consider allocation
2179 : // alignment if needed.
2180 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2181 : int size_in_bytes, AllocationAlignment alignment);
2182 :
2183 22129904 : size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2184 22129904 : if (size_in_bytes == 0) return 0;
2185 : heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2186 21760905 : ClearRecordedSlots::kNo);
2187 21743630 : if (mode == SpaceAccountingMode::kSpaceAccounted) {
2188 1450021 : return AccountedFree(start, size_in_bytes);
2189 : } else {
2190 20291470 : return UnaccountedFree(start, size_in_bytes);
2191 : }
2192 : }
2193 :
2194 : // Give a block of memory to the space's free list. It might be added to
2195 : // the free list or accounted as waste.
2196 : // If add_to_freelist is false then just accounting stats are updated and
2197 : // no attempt to add area to free list is made.
2198 : size_t AccountedFree(Address start, size_t size_in_bytes) {
2199 1450023 : size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2200 : Page* page = Page::FromAddress(start);
2201 : accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2202 : DCHECK_GE(size_in_bytes, wasted);
2203 1450021 : return size_in_bytes - wasted;
2204 : }
2205 :
2206 : size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2207 20293607 : size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2208 : DCHECK_GE(size_in_bytes, wasted);
2209 20291470 : return size_in_bytes - wasted;
2210 : }
2211 :
2212 : inline bool TryFreeLast(HeapObject object, int object_size);
2213 :
2214 : void ResetFreeList();
2215 :
2216 : // Empty space linear allocation area, returning unused area to free list.
2217 : void FreeLinearAllocationArea();
2218 :
2219 : void MarkLinearAllocationAreaBlack();
2220 : void UnmarkLinearAllocationArea();
2221 :
2222 : void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2223 : accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2224 : }
2225 : void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2226 : accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2227 : }
2228 : void DecreaseCapacity(size_t bytes) {
2229 : accounting_stats_.DecreaseCapacity(bytes);
2230 : }
2231 : void IncreaseCapacity(size_t bytes) {
2232 573336 : accounting_stats_.IncreaseCapacity(bytes);
2233 : }
2234 :
2235 : void RefineAllocatedBytesAfterSweeping(Page* page);
2236 :
2237 : Page* InitializePage(MemoryChunk* chunk, Executability executable);
2238 :
2239 : void ReleasePage(Page* page);
2240 :
2241 : // Adds the page to this space and returns the number of bytes added to the
2242 : // free list of the space.
2243 : size_t AddPage(Page* page);
2244 : void RemovePage(Page* page);
2245 : // Remove a page if it has at least |size_in_bytes| bytes available that can
2246 : // be used for allocation.
2247 : Page* RemovePageSafe(int size_in_bytes);
2248 :
2249 : void SetReadable();
2250 : void SetReadAndExecutable();
2251 : void SetReadAndWritable();
2252 :
2253 420616 : void SetDefaultCodePermissions() {
2254 420616 : if (FLAG_jitless) {
2255 15067 : SetReadable();
2256 : } else {
2257 405549 : SetReadAndExecutable();
2258 : }
2259 420619 : }
2260 :
2261 : #ifdef VERIFY_HEAP
2262 : // Verify integrity of this space.
2263 : virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
2264 :
2265 : void VerifyLiveBytes();
2266 :
2267 : // Overridden by subclasses to verify space-specific object
2268 : // properties (e.g., only maps or free-list nodes are in map space).
2269 : virtual void VerifyObject(HeapObject obj) {}
2270 : #endif
2271 :
2272 : #ifdef DEBUG
2273 : void VerifyCountersAfterSweeping();
2274 : void VerifyCountersBeforeConcurrentSweeping();
2275 : // Print meta info and objects in this space.
2276 : void Print() override;
2277 :
2278 : // Report code object related statistics
2279 : static void ReportCodeStatistics(Isolate* isolate);
2280 : static void ResetCodeStatistics(Isolate* isolate);
2281 : #endif
2282 :
2283 : bool CanExpand(size_t size);
2284 :
2285 : // Returns the number of total pages in this space.
2286 : int CountTotalPages();
2287 :
2288 : // Return size of allocatable area on a page in this space.
2289 2487197 : inline int AreaSize() { return static_cast<int>(area_size_); }
2290 :
2291 144291364 : virtual bool is_local() { return false; }
2292 :
2293 : // Merges {other} into the current space. Note that this modifies {other},
2294 : // e.g., removes its bump pointer area and resets statistics.
2295 : void MergeCompactionSpace(CompactionSpace* other);
2296 :
2297 : // Refills the free list from the corresponding free list filled by the
2298 : // sweeper.
2299 : virtual void RefillFreeList();
2300 :
2301 : FreeList* free_list() { return &free_list_; }
2302 :
2303 : base::Mutex* mutex() { return &space_mutex_; }
2304 :
2305 : inline void UnlinkFreeListCategories(Page* page);
2306 : inline size_t RelinkFreeListCategories(Page* page);
2307 :
2308 : Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2309 :
2310 : iterator begin() { return iterator(first_page()); }
2311 : iterator end() { return iterator(nullptr); }
2312 :
2313 : // Shrink immortal immovable pages of the space to be exactly the size needed
2314 : // using the high water mark.
2315 : void ShrinkImmortalImmovablePages();
2316 :
2317 : size_t ShrinkPageToHighWaterMark(Page* page);
2318 :
2319 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2320 :
2321 : void SetLinearAllocationArea(Address top, Address limit);
2322 :
2323 : private:
2324 : // Set space linear allocation area.
2325 : void SetTopAndLimit(Address top, Address limit) {
2326 : DCHECK(top == limit ||
2327 : Page::FromAddress(top) == Page::FromAddress(limit - 1));
2328 2386362 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2329 : allocation_info_.Reset(top, limit);
2330 : }
2331 : void DecreaseLimit(Address new_limit);
2332 : void UpdateInlineAllocationLimit(size_t min_size) override;
2333 22914299 : bool SupportsInlineAllocation() override {
2334 22914299 : return identity() == OLD_SPACE && !is_local();
2335 : }
2336 :
2337 : protected:
2338 : // PagedSpaces that should be included in snapshots have different, i.e.,
2339 : // smaller, initial pages.
2340 0 : virtual bool snapshotable() { return true; }
2341 :
2342 : bool HasPages() { return first_page() != nullptr; }
2343 :
2344 : // Cleans up the space, frees all pages in this space except those belonging
2345 : // to the initial chunk, uncommits addresses in the initial chunk.
2346 : void TearDown();
2347 :
2348 : // Expands the space by allocating a fixed number of pages. Returns false if
2349 : // it cannot allocate requested number of pages from OS, or if the hard heap
2350 : // size limit has been hit.
2351 : bool Expand();
2352 :
2353 : // Sets up a linear allocation area that fits the given number of bytes.
2354 : // Returns false if there is not enough space and the caller has to retry
2355 : // after collecting garbage.
2356 : inline bool EnsureLinearAllocationArea(int size_in_bytes);
2357 : // Allocates an object from the linear allocation area. Assumes that the
2358 : // linear allocation area is large enought to fit the object.
2359 : inline HeapObject AllocateLinearly(int size_in_bytes);
2360 : // Tries to allocate an aligned object from the linear allocation area.
2361 : // Returns nullptr if the linear allocation area does not fit the object.
2362 : // Otherwise, returns the object pointer and writes the allocation size
2363 : // (object size + alignment filler size) to the size_in_bytes.
2364 : inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
2365 : AllocationAlignment alignment);
2366 :
2367 : V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2368 : size_t size_in_bytes);
2369 :
2370 : // If sweeping is still in progress try to sweep unswept pages. If that is
2371 : // not successful, wait for the sweeper threads and retry free-list
2372 : // allocation. Returns false if there is not enough space and the caller
2373 : // has to retry after collecting garbage.
2374 : V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2375 :
2376 : // Slow path of AllocateRaw. This function is space-dependent. Returns false
2377 : // if there is not enough space and the caller has to retry after
2378 : // collecting garbage.
2379 : V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2380 : int size_in_bytes);
2381 :
2382 : // Implementation of SlowAllocateRaw. Returns false if there is not enough
2383 : // space and the caller has to retry after collecting garbage.
2384 : V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2385 : int size_in_bytes);
2386 :
2387 : Executability executable_;
2388 :
2389 : size_t area_size_;
2390 :
2391 : // Accounting information for this space.
2392 : AllocationStats accounting_stats_;
2393 :
2394 : // The space's free list.
2395 : FreeList free_list_;
2396 :
2397 : // Mutex guarding any concurrent access to the space.
2398 : base::Mutex space_mutex_;
2399 :
2400 : friend class IncrementalMarking;
2401 : friend class MarkCompactCollector;
2402 :
2403 : // Used in cctest.
2404 : friend class heap::HeapTester;
2405 : };
2406 :
2407 : enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2408 :
2409 : // -----------------------------------------------------------------------------
2410 : // SemiSpace in young generation
2411 : //
2412 : // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2413 : // The mark-compact collector uses the memory of the first page in the from
2414 : // space as a marking stack when tracing live objects.
2415 61039 : class SemiSpace : public Space {
2416 : public:
2417 : typedef PageIterator iterator;
2418 :
2419 : static void Swap(SemiSpace* from, SemiSpace* to);
2420 :
2421 : SemiSpace(Heap* heap, SemiSpaceId semispace)
2422 : : Space(heap, NEW_SPACE),
2423 : current_capacity_(0),
2424 : maximum_capacity_(0),
2425 : minimum_capacity_(0),
2426 : age_mark_(kNullAddress),
2427 : committed_(false),
2428 : id_(semispace),
2429 : current_page_(nullptr),
2430 61054 : pages_used_(0) {}
2431 :
2432 : inline bool Contains(HeapObject o);
2433 : inline bool Contains(Object o);
2434 : inline bool ContainsSlow(Address a);
2435 :
2436 : void SetUp(size_t initial_capacity, size_t maximum_capacity);
2437 : void TearDown();
2438 :
2439 : bool Commit();
2440 : bool Uncommit();
2441 : bool is_committed() { return committed_; }
2442 :
2443 : // Grow the semispace to the new capacity. The new capacity requested must
2444 : // be larger than the current capacity and less than the maximum capacity.
2445 : bool GrowTo(size_t new_capacity);
2446 :
2447 : // Shrinks the semispace to the new capacity. The new capacity requested
2448 : // must be more than the amount of used memory in the semispace and less
2449 : // than the current capacity.
2450 : bool ShrinkTo(size_t new_capacity);
2451 :
2452 : bool EnsureCurrentCapacity();
2453 :
2454 : Address space_end() { return memory_chunk_list_.back()->area_end(); }
2455 :
2456 : // Returns the start address of the first page of the space.
2457 : Address space_start() {
2458 : DCHECK_NE(memory_chunk_list_.front(), nullptr);
2459 254605 : return memory_chunk_list_.front()->area_start();
2460 : }
2461 :
2462 : Page* current_page() { return current_page_; }
2463 : int pages_used() { return pages_used_; }
2464 :
2465 : // Returns the start address of the current page of the space.
2466 1290704 : Address page_low() { return current_page_->area_start(); }
2467 :
2468 : // Returns one past the end address of the current page of the space.
2469 1333021 : Address page_high() { return current_page_->area_end(); }
2470 :
2471 83014 : bool AdvancePage() {
2472 83014 : Page* next_page = current_page_->next_page();
2473 : // We cannot expand if we reached the maximum number of pages already. Note
2474 : // that we need to account for the next page already for this check as we
2475 : // could potentially fill the whole page after advancing.
2476 166028 : const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2477 83014 : if (next_page == nullptr || reached_max_pages) {
2478 : return false;
2479 : }
2480 62911 : current_page_ = next_page;
2481 62911 : pages_used_++;
2482 : return true;
2483 : }
2484 :
2485 : // Resets the space to using the first page.
2486 : void Reset();
2487 :
2488 : void RemovePage(Page* page);
2489 : void PrependPage(Page* page);
2490 :
2491 : Page* InitializePage(MemoryChunk* chunk, Executability executable);
2492 :
2493 : // Age mark accessors.
2494 : Address age_mark() { return age_mark_; }
2495 : void set_age_mark(Address mark);
2496 :
2497 : // Returns the current capacity of the semispace.
2498 : size_t current_capacity() { return current_capacity_; }
2499 :
2500 : // Returns the maximum capacity of the semispace.
2501 : size_t maximum_capacity() { return maximum_capacity_; }
2502 :
2503 : // Returns the initial capacity of the semispace.
2504 : size_t minimum_capacity() { return minimum_capacity_; }
2505 :
2506 : SemiSpaceId id() { return id_; }
2507 :
2508 : // Approximate amount of physical memory committed for this space.
2509 : size_t CommittedPhysicalMemory() override;
2510 :
2511 : // If we don't have these here then SemiSpace will be abstract. However
2512 : // they should never be called:
2513 :
2514 0 : size_t Size() override {
2515 0 : UNREACHABLE();
2516 : }
2517 :
2518 0 : size_t SizeOfObjects() override { return Size(); }
2519 :
2520 0 : size_t Available() override {
2521 0 : UNREACHABLE();
2522 : }
2523 :
2524 : Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2525 : Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
2526 :
2527 : iterator begin() { return iterator(first_page()); }
2528 : iterator end() { return iterator(nullptr); }
2529 :
2530 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2531 :
2532 : #ifdef DEBUG
2533 : void Print() override;
2534 : // Validate a range of of addresses in a SemiSpace.
2535 : // The "from" address must be on a page prior to the "to" address,
2536 : // in the linked page order, or it must be earlier on the same page.
2537 : static void AssertValidRange(Address from, Address to);
2538 : #else
2539 : // Do nothing.
2540 : inline static void AssertValidRange(Address from, Address to) {}
2541 : #endif
2542 :
2543 : #ifdef VERIFY_HEAP
2544 : virtual void Verify();
2545 : #endif
2546 :
2547 : private:
2548 : void RewindPages(int num_pages);
2549 :
2550 : inline int max_pages() {
2551 83014 : return static_cast<int>(current_capacity_ / Page::kPageSize);
2552 : }
2553 :
2554 : // Copies the flags into the masked positions on all pages in the space.
2555 : void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2556 :
2557 : // The currently committed space capacity.
2558 : size_t current_capacity_;
2559 :
2560 : // The maximum capacity that can be used by this space. A space cannot grow
2561 : // beyond that size.
2562 : size_t maximum_capacity_;
2563 :
2564 : // The minimum capacity for the space. A space cannot shrink below this size.
2565 : size_t minimum_capacity_;
2566 :
2567 : // Used to govern object promotion during mark-compact collection.
2568 : Address age_mark_;
2569 :
2570 : bool committed_;
2571 : SemiSpaceId id_;
2572 :
2573 : Page* current_page_;
2574 :
2575 : int pages_used_;
2576 :
2577 : friend class NewSpace;
2578 : friend class SemiSpaceIterator;
2579 : };
2580 :
2581 :
2582 : // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2583 : // semispace of the heap's new space. It iterates over the objects in the
2584 : // semispace from a given start address (defaulting to the bottom of the
2585 : // semispace) to the top of the semispace. New objects allocated after the
2586 : // iterator is created are not iterated.
2587 15170 : class SemiSpaceIterator : public ObjectIterator {
2588 : public:
2589 : // Create an iterator over the allocated objects in the given to-space.
2590 : explicit SemiSpaceIterator(NewSpace* space);
2591 :
2592 : inline HeapObject Next() override;
2593 :
2594 : private:
2595 : void Initialize(Address start, Address end);
2596 :
2597 : // The current iteration point.
2598 : Address current_;
2599 : // The end of iteration.
2600 : Address limit_;
2601 : };
2602 :
2603 : // -----------------------------------------------------------------------------
2604 : // The young generation space.
2605 : //
2606 : // The new space consists of a contiguous pair of semispaces. It simply
2607 : // forwards most functions to the appropriate semispace.
2608 :
2609 : class NewSpace : public SpaceWithLinearArea {
2610 : public:
2611 : typedef PageIterator iterator;
2612 :
2613 : NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
2614 : size_t initial_semispace_capacity, size_t max_semispace_capacity);
2615 :
2616 305185 : ~NewSpace() override { TearDown(); }
2617 :
2618 : inline bool ContainsSlow(Address a);
2619 : inline bool Contains(Object o);
2620 : inline bool Contains(HeapObject o);
2621 :
2622 : // Tears down the space. Heap memory was not allocated by the space, so it
2623 : // is not deallocated here.
2624 : void TearDown();
2625 :
2626 : // Flip the pair of spaces.
2627 : void Flip();
2628 :
2629 : // Grow the capacity of the semispaces. Assumes that they are not at
2630 : // their maximum capacity.
2631 : void Grow();
2632 :
2633 : // Shrink the capacity of the semispaces.
2634 : void Shrink();
2635 :
2636 : // Return the allocated bytes in the active semispace.
2637 1068739 : size_t Size() override {
2638 : DCHECK_GE(top(), to_space_.page_low());
2639 2137478 : return to_space_.pages_used() *
2640 0 : MemoryChunkLayout::AllocatableMemoryInDataPage() +
2641 1068739 : static_cast<size_t>(top() - to_space_.page_low());
2642 : }
2643 :
2644 745985 : size_t SizeOfObjects() override { return Size(); }
2645 :
2646 : // Return the allocatable capacity of a semispace.
2647 : size_t Capacity() {
2648 : SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2649 647282 : return (to_space_.current_capacity() / Page::kPageSize) *
2650 647282 : MemoryChunkLayout::AllocatableMemoryInDataPage();
2651 : }
2652 :
2653 : // Return the current size of a semispace, allocatable and non-allocatable
2654 : // memory.
2655 : size_t TotalCapacity() {
2656 : DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2657 367146 : return to_space_.current_capacity();
2658 : }
2659 :
2660 : // Committed memory for NewSpace is the committed memory of both semi-spaces
2661 : // combined.
2662 659678 : size_t CommittedMemory() override {
2663 659678 : return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2664 : }
2665 :
2666 0 : size_t MaximumCommittedMemory() override {
2667 : return from_space_.MaximumCommittedMemory() +
2668 0 : to_space_.MaximumCommittedMemory();
2669 : }
2670 :
2671 : // Approximate amount of physical memory committed for this space.
2672 : size_t CommittedPhysicalMemory() override;
2673 :
2674 : // Return the available bytes without growing.
2675 98326 : size_t Available() override {
2676 : DCHECK_GE(Capacity(), Size());
2677 98326 : return Capacity() - Size();
2678 : }
2679 :
2680 30 : size_t ExternalBackingStoreBytes(
2681 : ExternalBackingStoreType type) const override {
2682 : DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
2683 30 : return to_space_.ExternalBackingStoreBytes(type);
2684 : }
2685 :
2686 196616 : size_t AllocatedSinceLastGC() {
2687 196616 : const Address age_mark = to_space_.age_mark();
2688 : DCHECK_NE(age_mark, kNullAddress);
2689 : DCHECK_NE(top(), kNullAddress);
2690 : Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2691 : Page* const last_page = Page::FromAllocationAreaAddress(top());
2692 : Page* current_page = age_mark_page;
2693 : size_t allocated = 0;
2694 196616 : if (current_page != last_page) {
2695 : DCHECK_EQ(current_page, age_mark_page);
2696 : DCHECK_GE(age_mark_page->area_end(), age_mark);
2697 49564 : allocated += age_mark_page->area_end() - age_mark;
2698 : current_page = current_page->next_page();
2699 : } else {
2700 : DCHECK_GE(top(), age_mark);
2701 147052 : return top() - age_mark;
2702 : }
2703 145993 : while (current_page != last_page) {
2704 : DCHECK_NE(current_page, age_mark_page);
2705 46865 : allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
2706 : current_page = current_page->next_page();
2707 : }
2708 : DCHECK_GE(top(), current_page->area_start());
2709 49564 : allocated += top() - current_page->area_start();
2710 : DCHECK_LE(allocated, Size());
2711 49564 : return allocated;
2712 : }
2713 :
2714 : void MovePageFromSpaceToSpace(Page* page) {
2715 : DCHECK(page->IsFromPage());
2716 1779 : from_space_.RemovePage(page);
2717 1779 : to_space_.PrependPage(page);
2718 : }
2719 :
2720 : bool Rebalance();
2721 :
2722 : // Return the maximum capacity of a semispace.
2723 : size_t MaximumCapacity() {
2724 : DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2725 296061 : return to_space_.maximum_capacity();
2726 : }
2727 :
2728 : bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2729 :
2730 : // Returns the initial capacity of a semispace.
2731 : size_t InitialTotalCapacity() {
2732 : DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2733 24484 : return to_space_.minimum_capacity();
2734 : }
2735 :
2736 1062067 : void ResetOriginalTop() {
2737 : DCHECK_GE(top(), original_top_);
2738 : DCHECK_LE(top(), original_limit_);
2739 : original_top_.store(top(), std::memory_order_release);
2740 1062067 : }
2741 :
2742 : Address original_top_acquire() {
2743 : return original_top_.load(std::memory_order_acquire);
2744 : }
2745 : Address original_limit_relaxed() {
2746 : return original_limit_.load(std::memory_order_relaxed);
2747 : }
2748 :
2749 : // Return the address of the first allocatable address in the active
2750 : // semispace. This may be the address where the first object resides.
2751 : Address first_allocatable_address() { return to_space_.space_start(); }
2752 :
2753 : // Get the age mark of the inactive semispace.
2754 150715791 : Address age_mark() { return from_space_.age_mark(); }
2755 : // Set the age mark in the active semispace.
2756 98000 : void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2757 :
2758 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2759 : AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
2760 :
2761 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2762 : AllocateRawUnaligned(int size_in_bytes);
2763 :
2764 : V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2765 : AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
2766 :
2767 : V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2768 : int size_in_bytes, AllocationAlignment alignment);
2769 :
2770 : // Reset the allocation pointer to the beginning of the active semispace.
2771 : void ResetLinearAllocationArea();
2772 :
2773 : // When inline allocation stepping is active, either because of incremental
2774 : // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2775 : // inline allocation every once in a while. This is done by setting
2776 : // allocation_info_.limit to be lower than the actual limit and and increasing
2777 : // it in steps to guarantee that the observers are notified periodically.
2778 : void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2779 :
2780 : inline bool ToSpaceContainsSlow(Address a);
2781 : inline bool ToSpaceContains(Object o);
2782 : inline bool FromSpaceContains(Object o);
2783 :
2784 : // Try to switch the active semispace to a new, empty, page.
2785 : // Returns false if this isn't possible or reasonable (i.e., there
2786 : // are no pages, or the current page is already empty), or true
2787 : // if successful.
2788 : bool AddFreshPage();
2789 : bool AddFreshPageSynchronized();
2790 :
2791 : #ifdef VERIFY_HEAP
2792 : // Verify the active semispace.
2793 : virtual void Verify(Isolate* isolate);
2794 : #endif
2795 :
2796 : #ifdef DEBUG
2797 : // Print the active semispace.
2798 : void Print() override { to_space_.Print(); }
2799 : #endif
2800 :
2801 : // Return whether the operation succeeded.
2802 : bool CommitFromSpaceIfNeeded() {
2803 98000 : if (from_space_.is_committed()) return true;
2804 36765 : return from_space_.Commit();
2805 : }
2806 :
2807 : bool UncommitFromSpace() {
2808 24454 : if (!from_space_.is_committed()) return true;
2809 23198 : return from_space_.Uncommit();
2810 : }
2811 :
2812 0 : bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2813 :
2814 : SemiSpace* active_space() { return &to_space_; }
2815 :
2816 : Page* first_page() { return to_space_.first_page(); }
2817 : Page* last_page() { return to_space_.last_page(); }
2818 :
2819 : iterator begin() { return to_space_.begin(); }
2820 : iterator end() { return to_space_.end(); }
2821 :
2822 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2823 :
2824 : SemiSpace& from_space() { return from_space_; }
2825 : SemiSpace& to_space() { return to_space_; }
2826 :
2827 : private:
2828 : // Update linear allocation area to match the current to-space page.
2829 : void UpdateLinearAllocationArea();
2830 :
2831 : base::Mutex mutex_;
2832 :
2833 : // The top and the limit at the time of setting the linear allocation area.
2834 : // These values can be accessed by background tasks.
2835 : std::atomic<Address> original_top_;
2836 : std::atomic<Address> original_limit_;
2837 :
2838 : // The semispaces.
2839 : SemiSpace to_space_;
2840 : SemiSpace from_space_;
2841 : VirtualMemory reservation_;
2842 :
2843 : bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2844 538395 : bool SupportsInlineAllocation() override { return true; }
2845 :
2846 : friend class SemiSpaceIterator;
2847 : };
2848 :
2849 : class PauseAllocationObserversScope {
2850 : public:
2851 : explicit PauseAllocationObserversScope(Heap* heap);
2852 : ~PauseAllocationObserversScope();
2853 :
2854 : private:
2855 : Heap* heap_;
2856 : DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2857 : };
2858 :
2859 : // -----------------------------------------------------------------------------
2860 : // Compaction space that is used temporarily during compaction.
2861 :
2862 115106 : class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2863 : public:
2864 : CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2865 115105 : : PagedSpace(heap, id, executable) {}
2866 :
2867 96084067 : bool is_local() override { return true; }
2868 :
2869 : protected:
2870 : // The space is temporary and not included in any snapshots.
2871 0 : bool snapshotable() override { return false; }
2872 :
2873 : V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
2874 : int size_in_bytes) override;
2875 :
2876 : V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
2877 : int size_in_bytes) override;
2878 : };
2879 :
2880 :
2881 : // A collection of |CompactionSpace|s used by a single compaction task.
2882 : class CompactionSpaceCollection : public Malloced {
2883 : public:
2884 115104 : explicit CompactionSpaceCollection(Heap* heap)
2885 : : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2886 115104 : code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2887 :
2888 96752260 : CompactionSpace* Get(AllocationSpace space) {
2889 96752260 : switch (space) {
2890 : case OLD_SPACE:
2891 96635238 : return &old_space_;
2892 : case CODE_SPACE:
2893 117022 : return &code_space_;
2894 : default:
2895 0 : UNREACHABLE();
2896 : }
2897 : UNREACHABLE();
2898 : }
2899 :
2900 : private:
2901 : CompactionSpace old_space_;
2902 : CompactionSpace code_space_;
2903 : };
2904 :
2905 : // -----------------------------------------------------------------------------
2906 : // Old generation regular object space.
2907 :
2908 122083 : class OldSpace : public PagedSpace {
2909 : public:
2910 : // Creates an old space object. The constructor does not allocate pages
2911 : // from OS.
2912 61059 : explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
2913 :
2914 : static bool IsAtPageStart(Address addr) {
2915 : return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
2916 : MemoryChunkLayout::ObjectStartOffsetInDataPage();
2917 : }
2918 : };
2919 :
2920 : // -----------------------------------------------------------------------------
2921 : // Old generation code object space.
2922 :
2923 122068 : class CodeSpace : public PagedSpace {
2924 : public:
2925 : // Creates an old space object. The constructor does not allocate pages
2926 : // from OS.
2927 61049 : explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
2928 : };
2929 :
2930 : // For contiguous spaces, top should be in the space (or at the end) and limit
2931 : // should be the end of the space.
2932 : #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2933 : SLOW_DCHECK((space).page_low() <= (info).top() && \
2934 : (info).top() <= (space).page_high() && \
2935 : (info).limit() <= (space).page_high())
2936 :
2937 :
2938 : // -----------------------------------------------------------------------------
2939 : // Old space for all map objects
2940 :
2941 122068 : class MapSpace : public PagedSpace {
2942 : public:
2943 : // Creates a map space object.
2944 61049 : explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
2945 :
2946 0 : int RoundSizeDownToObjectAlignment(int size) override {
2947 : if (base::bits::IsPowerOfTwo(Map::kSize)) {
2948 : return RoundDown(size, Map::kSize);
2949 : } else {
2950 0 : return (size / Map::kSize) * Map::kSize;
2951 : }
2952 : }
2953 :
2954 : #ifdef VERIFY_HEAP
2955 : void VerifyObject(HeapObject obj) override;
2956 : #endif
2957 : };
2958 :
2959 : // -----------------------------------------------------------------------------
2960 : // Read Only space for all Immortal Immovable and Immutable objects
2961 :
2962 : class ReadOnlySpace : public PagedSpace {
2963 : public:
2964 : class WritableScope {
2965 : public:
2966 : explicit WritableScope(ReadOnlySpace* space) : space_(space) {
2967 : space_->MarkAsReadWrite();
2968 : }
2969 :
2970 443 : ~WritableScope() { space_->MarkAsReadOnly(); }
2971 :
2972 : private:
2973 : ReadOnlySpace* space_;
2974 : };
2975 :
2976 : explicit ReadOnlySpace(Heap* heap);
2977 :
2978 : // TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
2979 : // memory_chunk_list_.
2980 244136 : ~ReadOnlySpace() override { MarkAsReadWrite(); }
2981 :
2982 : bool writable() const { return !is_marked_read_only_; }
2983 :
2984 : void ClearStringPaddingIfNeeded();
2985 : void MarkAsReadOnly();
2986 :
2987 : // During boot the free_space_map is created, and afterwards we may need
2988 : // to write it into the free list nodes that were already created.
2989 : void RepairFreeListsAfterDeserialization();
2990 :
2991 : private:
2992 : void MarkAsReadWrite();
2993 : void SetPermissionsForPages(PageAllocator::Permission access);
2994 :
2995 : bool is_marked_read_only_ = false;
2996 : //
2997 : // String padding must be cleared just before serialization and therefore the
2998 : // string padding in the space will already have been cleared if the space was
2999 : // deserialized.
3000 : bool is_string_padding_cleared_;
3001 : };
3002 :
3003 : // -----------------------------------------------------------------------------
3004 : // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
3005 : // managed by the large object space.
3006 : // Large objects do not move during garbage collections.
3007 :
3008 : class LargeObjectSpace : public Space {
3009 : public:
3010 : typedef LargePageIterator iterator;
3011 :
3012 : explicit LargeObjectSpace(Heap* heap);
3013 : LargeObjectSpace(Heap* heap, AllocationSpace id);
3014 :
3015 244136 : ~LargeObjectSpace() override { TearDown(); }
3016 :
3017 : // Releases internal resources, frees objects in this space.
3018 : void TearDown();
3019 :
3020 : V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3021 : AllocateRaw(int object_size);
3022 :
3023 : // Available bytes for objects in this space.
3024 : size_t Available() override;
3025 :
3026 2104678 : size_t Size() override { return size_; }
3027 9634379 : size_t SizeOfObjects() override { return objects_size_; }
3028 :
3029 : // Approximate amount of physical memory committed for this space.
3030 : size_t CommittedPhysicalMemory() override;
3031 :
3032 : int PageCount() { return page_count_; }
3033 :
3034 : // Clears the marking state of live objects.
3035 : void ClearMarkingStateOfLiveObjects();
3036 :
3037 : // Frees unmarked objects.
3038 : void FreeUnmarkedObjects();
3039 :
3040 : void PromoteNewLargeObject(LargePage* page);
3041 :
3042 : // Checks whether a heap object is in this space; O(1).
3043 : bool Contains(HeapObject obj);
3044 : // Checks whether an address is in the object area in this space. Iterates
3045 : // all objects in the space. May be slow.
3046 : bool ContainsSlow(Address addr);
3047 :
3048 : // Checks whether the space is empty.
3049 5 : bool IsEmpty() { return first_page() == nullptr; }
3050 :
3051 : virtual void AddPage(LargePage* page, size_t object_size);
3052 : virtual void RemovePage(LargePage* page, size_t object_size);
3053 :
3054 20991 : LargePage* first_page() {
3055 20991 : return reinterpret_cast<LargePage*>(Space::first_page());
3056 : }
3057 :
3058 : // Collect code statistics.
3059 : void CollectCodeStatistics();
3060 :
3061 : iterator begin() { return iterator(first_page()); }
3062 : iterator end() { return iterator(nullptr); }
3063 :
3064 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
3065 :
3066 : #ifdef VERIFY_HEAP
3067 : virtual void Verify(Isolate* isolate);
3068 : #endif
3069 :
3070 : #ifdef DEBUG
3071 : void Print() override;
3072 : #endif
3073 :
3074 : protected:
3075 : LargePage* AllocateLargePage(int object_size, Executability executable);
3076 : V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3077 : Executability executable);
3078 :
3079 : size_t size_; // allocated bytes
3080 : int page_count_; // number of chunks
3081 : size_t objects_size_; // size of objects
3082 :
3083 : private:
3084 : friend class LargeObjectIterator;
3085 : };
3086 :
3087 122068 : class NewLargeObjectSpace : public LargeObjectSpace {
3088 : public:
3089 : NewLargeObjectSpace(Heap* heap, size_t capacity);
3090 :
3091 : V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
3092 :
3093 : // Available bytes for objects in this space.
3094 : size_t Available() override;
3095 :
3096 : void Flip();
3097 :
3098 : void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
3099 :
3100 : void SetCapacity(size_t capacity);
3101 :
3102 : // The last allocated object that is not guaranteed to be initialized when
3103 : // the concurrent marker visits it.
3104 : Address pending_object() {
3105 : return pending_object_.load(std::memory_order_relaxed);
3106 : }
3107 :
3108 2124134 : void ResetPendingObject() { pending_object_.store(0); }
3109 :
3110 : private:
3111 : std::atomic<Address> pending_object_;
3112 : size_t capacity_;
3113 : };
3114 :
3115 305170 : class CodeLargeObjectSpace : public LargeObjectSpace {
3116 : public:
3117 : explicit CodeLargeObjectSpace(Heap* heap);
3118 :
3119 : V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3120 : AllocateRaw(int object_size);
3121 :
3122 : // Finds a large object page containing the given address, returns nullptr
3123 : // if such a page doesn't exist.
3124 : LargePage* FindPage(Address a);
3125 :
3126 : protected:
3127 : void AddPage(LargePage* page, size_t object_size) override;
3128 : void RemovePage(LargePage* page, size_t object_size) override;
3129 :
3130 : private:
3131 : static const size_t kInitialChunkMapCapacity = 1024;
3132 : void InsertChunkMapEntries(LargePage* page);
3133 : void RemoveChunkMapEntries(LargePage* page);
3134 :
3135 : // Page-aligned addresses to their corresponding LargePage.
3136 : std::unordered_map<Address, LargePage*> chunk_map_;
3137 : };
3138 :
3139 45510 : class LargeObjectIterator : public ObjectIterator {
3140 : public:
3141 : explicit LargeObjectIterator(LargeObjectSpace* space);
3142 :
3143 : HeapObject Next() override;
3144 :
3145 : private:
3146 : LargePage* current_;
3147 : };
3148 :
3149 : // Iterates over the chunks (pages and large object pages) that can contain
3150 : // pointers to new space or to evacuation candidates.
3151 : class OldGenerationMemoryChunkIterator {
3152 : public:
3153 : inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
3154 :
3155 : // Return nullptr when the iterator is done.
3156 : inline MemoryChunk* next();
3157 :
3158 : private:
3159 : enum State {
3160 : kOldSpaceState,
3161 : kMapState,
3162 : kCodeState,
3163 : kLargeObjectState,
3164 : kCodeLargeObjectState,
3165 : kFinishedState
3166 : };
3167 : Heap* heap_;
3168 : State state_;
3169 : PageIterator old_iterator_;
3170 : PageIterator code_iterator_;
3171 : PageIterator map_iterator_;
3172 : LargePageIterator lo_iterator_;
3173 : LargePageIterator code_lo_iterator_;
3174 : };
3175 :
3176 : } // namespace internal
3177 : } // namespace v8
3178 :
3179 : #endif // V8_HEAP_SPACES_H_
|