Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SPACES_H_
6 : #define V8_HEAP_SPACES_H_
7 :
8 : #include <list>
9 : #include <memory>
10 : #include <unordered_set>
11 :
12 : #include "src/allocation.h"
13 : #include "src/base/atomic-utils.h"
14 : #include "src/base/atomicops.h"
15 : #include "src/base/bits.h"
16 : #include "src/base/hashmap.h"
17 : #include "src/base/platform/mutex.h"
18 : #include "src/flags.h"
19 : #include "src/globals.h"
20 : #include "src/heap/heap.h"
21 : #include "src/heap/marking.h"
22 : #include "src/list.h"
23 : #include "src/objects.h"
24 : #include "src/utils.h"
25 :
26 : namespace v8 {
27 : namespace internal {
28 :
29 : class AllocationInfo;
30 : class AllocationObserver;
31 : class CompactionSpace;
32 : class CompactionSpaceCollection;
33 : class FreeList;
34 : class Isolate;
35 : class LocalArrayBufferTracker;
36 : class MemoryAllocator;
37 : class MemoryChunk;
38 : class Page;
39 : class PagedSpace;
40 : class SemiSpace;
41 : class SkipList;
42 : class SlotsBuffer;
43 : class SlotSet;
44 : class TypedSlotSet;
45 : class Space;
46 :
47 : // -----------------------------------------------------------------------------
48 : // Heap structures:
49 : //
50 : // A JS heap consists of a young generation, an old generation, and a large
51 : // object space. The young generation is divided into two semispaces. A
52 : // scavenger implements Cheney's copying algorithm. The old generation is
53 : // separated into a map space and an old object space. The map space contains
54 : // all (and only) map objects, the rest of old objects go into the old space.
55 : // The old generation is collected by a mark-sweep-compact collector.
56 : //
57 : // The semispaces of the young generation are contiguous. The old and map
58 : // spaces consists of a list of pages. A page has a page header and an object
59 : // area.
60 : //
61 : // There is a separate large object space for objects larger than
62 : // kMaxRegularHeapObjectSize, so that they do not have to move during
63 : // collection. The large object space is paged. Pages in large object space
64 : // may be larger than the page size.
65 : //
66 : // A store-buffer based write barrier is used to keep track of intergenerational
67 : // references. See heap/store-buffer.h.
68 : //
69 : // During scavenges and mark-sweep collections we sometimes (after a store
70 : // buffer overflow) iterate intergenerational pointers without decoding heap
71 : // object maps so if the page belongs to old space or large object space
72 : // it is essential to guarantee that the page does not contain any
73 : // garbage pointers to new space: every pointer aligned word which satisfies
74 : // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
75 : // new space. Thus objects in old space and large object spaces should have a
76 : // special layout (e.g. no bare integer fields). This requirement does not
77 : // apply to map space which is iterated in a special fashion. However we still
78 : // require pointer fields of dead maps to be cleaned.
79 : //
80 : // To enable lazy cleaning of old space pages we can mark chunks of the page
81 : // as being garbage. Garbage sections are marked with a special map. These
82 : // sections are skipped when scanning the page, even if we are otherwise
83 : // scanning without regard for object boundaries. Garbage sections are chained
84 : // together to form a free list after a GC. Garbage sections created outside
85 : // of GCs by object trunctation etc. may not be in the free list chain. Very
86 : // small free spaces are ignored, they need only be cleaned of bogus pointers
87 : // into new space.
88 : //
89 : // Each page may have up to one special garbage section. The start of this
90 : // section is denoted by the top field in the space. The end of the section
91 : // is denoted by the limit field in the space. This special garbage section
92 : // is not marked with a free space map in the data. The point of this section
93 : // is to enable linear allocation without having to constantly update the byte
94 : // array every time the top field is updated and a new object is created. The
95 : // special garbage section is not in the chain of garbage sections.
96 : //
97 : // Since the top and limit fields are in the space, not the page, only one page
98 : // has a special garbage section, and if the top and limit are equal then there
99 : // is no special garbage section.
100 :
101 : // Some assertion macros used in the debugging mode.
102 :
103 : #define DCHECK_PAGE_ALIGNED(address) \
104 : DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
105 :
106 : #define DCHECK_OBJECT_ALIGNED(address) \
107 : DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
108 :
109 : #define DCHECK_OBJECT_SIZE(size) \
110 : DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
111 :
112 : #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
113 : DCHECK((0 < size) && (size <= code_space->AreaSize()))
114 :
115 : #define DCHECK_PAGE_OFFSET(offset) \
116 : DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
117 :
118 : enum FreeListCategoryType {
119 : kTiniest,
120 : kTiny,
121 : kSmall,
122 : kMedium,
123 : kLarge,
124 : kHuge,
125 :
126 : kFirstCategory = kTiniest,
127 : kLastCategory = kHuge,
128 : kNumberOfCategories = kLastCategory + 1,
129 : kInvalidCategory
130 : };
131 :
132 : enum FreeMode { kLinkCategory, kDoNotLinkCategory };
133 :
134 : enum RememberedSetType {
135 : OLD_TO_NEW,
136 : OLD_TO_OLD,
137 : NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
138 : };
139 :
140 : // A free list category maintains a linked list of free memory blocks.
141 : class FreeListCategory {
142 : public:
143 : static const int kSize = kIntSize + // FreeListCategoryType type_
144 : kIntSize + // padding for type_
145 : kSizetSize + // size_t available_
146 : kPointerSize + // FreeSpace* top_
147 : kPointerSize + // FreeListCategory* prev_
148 : kPointerSize; // FreeListCategory* next_
149 :
150 : FreeListCategory()
151 : : type_(kInvalidCategory),
152 : available_(0),
153 : top_(nullptr),
154 : prev_(nullptr),
155 2534670 : next_(nullptr) {}
156 :
157 : void Initialize(FreeListCategoryType type) {
158 3036378 : type_ = type;
159 3036378 : available_ = 0;
160 3036378 : top_ = nullptr;
161 3036378 : prev_ = nullptr;
162 3036378 : next_ = nullptr;
163 : }
164 :
165 : void Invalidate();
166 :
167 : void Reset();
168 :
169 0 : void ResetStats() { Reset(); }
170 :
171 : void RepairFreeList(Heap* heap);
172 :
173 : // Relinks the category into the currently owning free list. Requires that the
174 : // category is currently unlinked.
175 : void Relink();
176 :
177 : bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
178 :
179 : // Picks a node from the list and stores its size in |node_size|. Returns
180 : // nullptr if the category is empty.
181 : FreeSpace* PickNodeFromList(size_t* node_size);
182 :
183 : // Performs a single try to pick a node of at least |minimum_size| from the
184 : // category. Stores the actual size in |node_size|. Returns nullptr if no
185 : // node is found.
186 : FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
187 :
188 : // Picks a node of at least |minimum_size| from the category. Stores the
189 : // actual size in |node_size|. Returns nullptr if no node is found.
190 : FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
191 :
192 : inline FreeList* owner();
193 : inline bool is_linked();
194 4402308 : bool is_empty() { return top() == nullptr; }
195 : size_t available() const { return available_; }
196 :
197 : #ifdef DEBUG
198 : size_t SumFreeList();
199 : int FreeListLength();
200 : #endif
201 :
202 : private:
203 : // For debug builds we accurately compute free lists lengths up until
204 : // {kVeryLongFreeList} by manually walking the list.
205 : static const int kVeryLongFreeList = 500;
206 :
207 : inline Page* page();
208 :
209 : FreeSpace* top() { return top_; }
210 38732274 : void set_top(FreeSpace* top) { top_ = top; }
211 : FreeListCategory* prev() { return prev_; }
212 2587706 : void set_prev(FreeListCategory* prev) { prev_ = prev; }
213 : FreeListCategory* next() { return next_; }
214 3949683 : void set_next(FreeListCategory* next) { next_ = next; }
215 :
216 : // |type_|: The type of this free list category.
217 : FreeListCategoryType type_;
218 :
219 : // |available_|: Total available bytes in all blocks of this free list
220 : // category.
221 : size_t available_;
222 :
223 : // |top_|: Points to the top FreeSpace* in the free list category.
224 : FreeSpace* top_;
225 :
226 : FreeListCategory* prev_;
227 : FreeListCategory* next_;
228 :
229 : friend class FreeList;
230 : friend class PagedSpace;
231 : };
232 :
233 : // MemoryChunk represents a memory region owned by a specific space.
234 : // It is divided into the header and the body. Chunk start is always
235 : // 1MB aligned. Start of the body is aligned so it can accommodate
236 : // any heap object.
237 5974416 : class MemoryChunk {
238 : public:
239 : enum Flag {
240 : NO_FLAGS = 0u,
241 : IS_EXECUTABLE = 1u << 0,
242 : POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
243 : POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
244 : // A page in new space has one of the next to flags set.
245 : IN_FROM_SPACE = 1u << 3,
246 : IN_TO_SPACE = 1u << 4,
247 : NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
248 : EVACUATION_CANDIDATE = 1u << 6,
249 : NEVER_EVACUATE = 1u << 7,
250 :
251 : // Large objects can have a progress bar in their page header. These object
252 : // are scanned in increments and will be kept black while being scanned.
253 : // Even if the mutator writes to them they will be kept black and a white
254 : // to grey transition is performed in the value.
255 : HAS_PROGRESS_BAR = 1u << 8,
256 :
257 : // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
258 : // from new to old space during evacuation.
259 : PAGE_NEW_OLD_PROMOTION = 1u << 9,
260 :
261 : // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
262 : // within the new space during evacuation.
263 : PAGE_NEW_NEW_PROMOTION = 1u << 10,
264 :
265 : // This flag is intended to be used for testing. Works only when both
266 : // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
267 : // are set. It forces the page to become an evacuation candidate at next
268 : // candidates selection cycle.
269 : FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
270 :
271 : // This flag is intended to be used for testing.
272 : NEVER_ALLOCATE_ON_PAGE = 1u << 12,
273 :
274 : // The memory chunk is already logically freed, however the actual freeing
275 : // still has to be performed.
276 : PRE_FREED = 1u << 13,
277 :
278 : // |POOLED|: When actually freeing this chunk, only uncommit and do not
279 : // give up the reservation as we still reuse the chunk at some point.
280 : POOLED = 1u << 14,
281 :
282 : // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
283 : // has been aborted and needs special handling by the sweeper.
284 : COMPACTION_WAS_ABORTED = 1u << 15,
285 :
286 : // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
287 : // on pages is sometimes aborted. The flag is used to avoid repeatedly
288 : // triggering on the same page.
289 : COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
290 :
291 : // |ANCHOR|: Flag is set if page is an anchor.
292 : ANCHOR = 1u << 17,
293 : };
294 : typedef base::Flags<Flag, uintptr_t> Flags;
295 :
296 : static const int kPointersToHereAreInterestingMask =
297 : POINTERS_TO_HERE_ARE_INTERESTING;
298 :
299 : static const int kPointersFromHereAreInterestingMask =
300 : POINTERS_FROM_HERE_ARE_INTERESTING;
301 :
302 : static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
303 :
304 : static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
305 :
306 : static const int kSkipEvacuationSlotsRecordingMask =
307 : kEvacuationCandidateMask | kIsInNewSpaceMask;
308 :
309 : // |kSweepingDone|: The page state when sweeping is complete or sweeping must
310 : // not be performed on that page. Sweeper threads that are done with their
311 : // work will set this value and not touch the page anymore.
312 : // |kSweepingPending|: This page is ready for parallel sweeping.
313 : // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
314 : enum ConcurrentSweepingState {
315 : kSweepingDone,
316 : kSweepingPending,
317 : kSweepingInProgress,
318 : };
319 :
320 : static const intptr_t kAlignment =
321 : (static_cast<uintptr_t>(1) << kPageSizeBits);
322 :
323 : static const intptr_t kAlignmentMask = kAlignment - 1;
324 :
325 : static const intptr_t kSizeOffset = 0;
326 : static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
327 : static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
328 : static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
329 : static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
330 : static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
331 :
332 : static const size_t kMinHeaderSize =
333 : kSizeOffset // NOLINT
334 : + kSizetSize // size_t size
335 : + kIntptrSize // Flags flags_
336 : + kPointerSize // Address area_start_
337 : + kPointerSize // Address area_end_
338 : + 2 * kPointerSize // base::VirtualMemory reservation_
339 : + kPointerSize // Address owner_
340 : + kPointerSize // Heap* heap_
341 : + kIntptrSize // intptr_t progress_bar_
342 : + kIntptrSize // intptr_t live_byte_count_
343 : + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
344 : + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
345 : + kPointerSize // SkipList* skip_list_
346 : + kPointerSize // AtomicValue high_water_mark_
347 : + kPointerSize // base::RecursiveMutex* mutex_
348 : + kPointerSize // base::AtomicWord concurrent_sweeping_
349 : + 2 * kSizetSize // AtomicNumber free-list statistics
350 : + kPointerSize // AtomicValue next_chunk_
351 : + kPointerSize // AtomicValue prev_chunk_
352 : + FreeListCategory::kSize * kNumberOfCategories
353 : // FreeListCategory categories_[kNumberOfCategories]
354 : + kPointerSize // LocalArrayBufferTracker* local_tracker_
355 : + kIntptrSize // intptr_t young_generation_live_byte_count_
356 : + kPointerSize; // Bitmap* young_generation_bitmap_
357 :
358 : // We add some more space to the computed header size to amount for missing
359 : // alignment requirements in our computation.
360 : // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
361 : static const size_t kHeaderSize = kMinHeaderSize;
362 :
363 : static const int kBodyOffset =
364 : CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
365 :
366 : // The start offset of the object area in a page. Aligned to both maps and
367 : // code alignment to be suitable for both. Also aligned to 32 words because
368 : // the marking bitmap is arranged in 32 bit chunks.
369 : static const int kObjectStartAlignment = 32 * kPointerSize;
370 : static const int kObjectStartOffset =
371 : kBodyOffset - 1 +
372 : (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
373 :
374 : // Page size in bytes. This must be a multiple of the OS page size.
375 : static const int kPageSize = 1 << kPageSizeBits;
376 : static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
377 :
378 : static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
379 :
380 : // Only works if the pointer is in the first kPageSize of the MemoryChunk.
381 7961657451 : static MemoryChunk* FromAddress(Address a) {
382 26748366586 : return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
383 : }
384 :
385 : static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
386 :
387 4801957 : static inline void UpdateHighWaterMark(Address mark) {
388 9603915 : if (mark == nullptr) return;
389 : // Need to subtract one from the mark because when a chunk is full the
390 : // top points to the next address after the chunk, which effectively belongs
391 : // to another chunk. See the comment to Page::FromTopOrLimit.
392 2513234 : MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
393 2513234 : intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
394 : intptr_t old_mark = 0;
395 2513235 : do {
396 : old_mark = chunk->high_water_mark_.Value();
397 3080924 : } while ((new_mark > old_mark) &&
398 : !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
399 : }
400 :
401 : static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
402 :
403 : Address address() const {
404 : return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
405 : }
406 :
407 : base::RecursiveMutex* mutex() { return mutex_; }
408 :
409 108763755 : bool Contains(Address addr) {
410 108817966 : return addr >= area_start() && addr < area_end();
411 : }
412 :
413 : // Checks whether |addr| can be a limit of addresses in this page. It's a
414 : // limit if it's in the page, or if it's just after the last byte of the page.
415 113859532 : bool ContainsLimit(Address addr) {
416 113859532 : return addr >= area_start() && addr <= area_end();
417 : }
418 :
419 : base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
420 : return concurrent_sweeping_;
421 : }
422 :
423 6821469 : bool SweepingDone() {
424 6821457 : return concurrent_sweeping_state().Value() == kSweepingDone;
425 : }
426 :
427 : size_t size() const { return size_; }
428 37 : void set_size(size_t size) { size_ = size; }
429 :
430 : inline Heap* heap() const { return heap_; }
431 :
432 : inline SkipList* skip_list() { return skip_list_; }
433 :
434 220676 : inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
435 :
436 : template <RememberedSetType type>
437 : SlotSet* slot_set() {
438 : return slot_set_[type].Value();
439 : }
440 :
441 : template <RememberedSetType type>
442 : TypedSlotSet* typed_slot_set() {
443 : return typed_slot_set_[type].Value();
444 : }
445 :
446 : inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
447 :
448 : template <RememberedSetType type>
449 : SlotSet* AllocateSlotSet();
450 : template <RememberedSetType type>
451 : void ReleaseSlotSet();
452 : template <RememberedSetType type>
453 : TypedSlotSet* AllocateTypedSlotSet();
454 : template <RememberedSetType type>
455 : void ReleaseTypedSlotSet();
456 : void AllocateLocalTracker();
457 : void ReleaseLocalTracker();
458 : void AllocateYoungGenerationBitmap();
459 : void ReleaseYoungGenerationBitmap();
460 :
461 : Address area_start() { return area_start_; }
462 : Address area_end() { return area_end_; }
463 1627593 : size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
464 :
465 : bool CommitArea(size_t requested);
466 :
467 : // Approximate amount of physical memory committed for this chunk.
468 : size_t CommittedPhysicalMemory();
469 :
470 607133 : Address HighWaterMark() { return address() + high_water_mark_.Value(); }
471 :
472 : int progress_bar() {
473 : DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
474 43634 : return static_cast<int>(progress_bar_);
475 : }
476 :
477 : void set_progress_bar(int progress_bar) {
478 : DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
479 44661 : progress_bar_ = progress_bar;
480 : }
481 :
482 : void ResetProgressBar() {
483 1588 : if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
484 : set_progress_bar(0);
485 : }
486 : }
487 :
488 7961659165 : inline uint32_t AddressToMarkbitIndex(Address addr) const {
489 8705155803 : return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
490 : }
491 :
492 : inline Address MarkbitIndexToAddress(uint32_t index) const {
493 : return this->address() + (index << kPointerSizeLog2);
494 : }
495 :
496 : void SetFlag(Flag flag) { flags_ |= flag; }
497 : void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
498 4423942975 : bool IsFlagSet(Flag flag) { return (flags_ & flag) != 0; }
499 :
500 : // Set or clear multiple flags at a time. The flags in the mask are set to
501 : // the value in "flags", the rest retain the current value in |flags_|.
502 : void SetFlags(uintptr_t flags, uintptr_t mask) {
503 1091774 : flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
504 : }
505 :
506 : // Return all current flags.
507 : uintptr_t GetFlags() { return flags_; }
508 :
509 : bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
510 :
511 : void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
512 :
513 4419814526 : bool IsEvacuationCandidate() {
514 : DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
515 4419814526 : return IsFlagSet(EVACUATION_CANDIDATE);
516 : }
517 :
518 : bool CanAllocate() {
519 70213878 : return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
520 : }
521 :
522 12772303 : bool ShouldSkipEvacuationSlotRecording() {
523 18657697 : return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
524 12772303 : !IsFlagSet(COMPACTION_WAS_ABORTED);
525 : }
526 :
527 : Executability executable() {
528 1471090 : return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
529 : }
530 :
531 4355939872 : bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
532 :
533 : bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
534 :
535 : bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
536 :
537 : MemoryChunk* next_chunk() { return next_chunk_.Value(); }
538 :
539 : MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
540 :
541 : void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
542 :
543 : void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
544 :
545 374138022 : Space* owner() const {
546 : intptr_t owner_value = base::NoBarrierAtomicValue<intptr_t>::FromAddress(
547 : const_cast<Address*>(&owner_))
548 : ->Value();
549 374134276 : if ((owner_value & kPageHeaderTagMask) == kPageHeaderTag) {
550 321058293 : return reinterpret_cast<Space*>(owner_value - kPageHeaderTag);
551 : } else {
552 : return nullptr;
553 : }
554 : }
555 :
556 : void set_owner(Space* space) {
557 : DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
558 2285009 : owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
559 : DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
560 : kPageHeaderTag);
561 : }
562 :
563 289607914 : bool HasPageHeader() { return owner() != nullptr; }
564 :
565 : void InsertAfter(MemoryChunk* other);
566 : void Unlink();
567 :
568 : protected:
569 : static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
570 : Address area_start, Address area_end,
571 : Executability executable, Space* owner,
572 : base::VirtualMemory* reservation);
573 :
574 : // Should be called when memory chunk is about to be freed.
575 : void ReleaseAllocatedMemory();
576 :
577 : base::VirtualMemory* reserved_memory() { return &reservation_; }
578 :
579 : size_t size_;
580 : Flags flags_;
581 :
582 : // Start and end of allocatable memory on this chunk.
583 : Address area_start_;
584 : Address area_end_;
585 :
586 : // If the chunk needs to remember its memory reservation, it is stored here.
587 : base::VirtualMemory reservation_;
588 :
589 : // The identity of the owning space. This is tagged as a failure pointer, but
590 : // no failure can be in an object, so this can be distinguished from any entry
591 : // in a fixed array.
592 : Address owner_;
593 :
594 : Heap* heap_;
595 :
596 : // Used by the incremental marker to keep track of the scanning progress in
597 : // large objects that have a progress bar and are scanned in increments.
598 : intptr_t progress_bar_;
599 :
600 : // Count of bytes marked black on page.
601 : intptr_t live_byte_count_;
602 :
603 : // A single slot set for small pages (of size kPageSize) or an array of slot
604 : // set for large pages. In the latter case the number of entries in the array
605 : // is ceil(size() / kPageSize).
606 : base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
607 : base::AtomicValue<TypedSlotSet*>
608 : typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
609 :
610 : SkipList* skip_list_;
611 :
612 : // Assuming the initial allocation on a page is sequential,
613 : // count highest number of bytes ever allocated on the page.
614 : base::AtomicValue<intptr_t> high_water_mark_;
615 :
616 : base::RecursiveMutex* mutex_;
617 :
618 : base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
619 :
620 : // PagedSpace free-list statistics.
621 : base::AtomicNumber<intptr_t> available_in_free_list_;
622 : base::AtomicNumber<intptr_t> wasted_memory_;
623 :
624 : // next_chunk_ holds a pointer of type MemoryChunk
625 : base::AtomicValue<MemoryChunk*> next_chunk_;
626 : // prev_chunk_ holds a pointer of type MemoryChunk
627 : base::AtomicValue<MemoryChunk*> prev_chunk_;
628 :
629 : FreeListCategory categories_[kNumberOfCategories];
630 :
631 : LocalArrayBufferTracker* local_tracker_;
632 :
633 : intptr_t young_generation_live_byte_count_;
634 : Bitmap* young_generation_bitmap_;
635 :
636 : private:
637 900355 : void InitializeReservedMemory() { reservation_.Reset(); }
638 :
639 : friend class MarkingState;
640 : friend class MemoryAllocator;
641 : friend class MemoryChunkValidator;
642 : };
643 :
644 : DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
645 :
646 : static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
647 : "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
648 :
649 : class MarkingState {
650 : public:
651 0 : static MarkingState External(HeapObject* object) {
652 0 : return External(MemoryChunk::FromAddress(object->address()));
653 : }
654 :
655 : static MarkingState External(MemoryChunk* chunk) {
656 : return MarkingState(chunk->young_generation_bitmap_,
657 0 : &chunk->young_generation_live_byte_count_);
658 : }
659 :
660 5883172923 : static MarkingState Internal(HeapObject* object) {
661 13059771406 : return Internal(MemoryChunk::FromAddress(object->address()));
662 : }
663 :
664 : static MarkingState Internal(MemoryChunk* chunk) {
665 : return MarkingState(
666 : Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize),
667 7180143832 : &chunk->live_byte_count_);
668 : }
669 :
670 : MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
671 : : bitmap_(bitmap), live_bytes_(live_bytes) {}
672 :
673 : template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
674 : inline void IncrementLiveBytes(intptr_t by) const;
675 :
676 : void SetLiveBytes(intptr_t value) const {
677 336223 : *live_bytes_ = static_cast<int>(value);
678 : }
679 :
680 : void ClearLiveness() const {
681 79622 : bitmap_->Clear();
682 1946976 : *live_bytes_ = 0;
683 : }
684 :
685 7176618696 : Bitmap* bitmap() const { return bitmap_; }
686 1650182 : intptr_t live_bytes() const { return *live_bytes_; }
687 :
688 : private:
689 : Bitmap* bitmap_;
690 : intptr_t* live_bytes_;
691 : };
692 :
693 : template <>
694 709249639 : inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>(
695 : intptr_t by) const {
696 709345086 : *live_bytes_ += by;
697 709249639 : }
698 :
699 : template <>
700 : inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>(
701 : intptr_t by) const {
702 : reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
703 : }
704 :
705 : // -----------------------------------------------------------------------------
706 : // A page is a memory chunk of a size 1MB. Large object pages may be larger.
707 : //
708 : // The only way to get a page pointer is by calling factory methods:
709 : // Page* p = Page::FromAddress(addr); or
710 : // Page* p = Page::FromTopOrLimit(top);
711 : class Page : public MemoryChunk {
712 : public:
713 : static const intptr_t kCopyAllFlags = ~0;
714 :
715 : // Page flags copied from from-space to to-space when flipping semispaces.
716 : static const intptr_t kCopyOnFlipFlagsMask =
717 : static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
718 : static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
719 :
720 : static inline Page* ConvertNewToOld(Page* old_page);
721 :
722 : // Returns the page containing a given address. The address ranges
723 : // from [page_addr .. page_addr + kPageSize[. This only works if the object
724 : // is in fact in a page.
725 8852395479 : static Page* FromAddress(Address addr) {
726 14632892366 : return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
727 : }
728 :
729 : // Returns the page containing the address provided. The address can
730 : // potentially point righter after the page. To be also safe for tagged values
731 : // we subtract a hole word. The valid address ranges from
732 : // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
733 : static Page* FromAllocationAreaAddress(Address address) {
734 141488740 : return Page::FromAddress(address - kPointerSize);
735 : }
736 :
737 : // Checks if address1 and address2 are on the same new space page.
738 : static bool OnSamePage(Address address1, Address address2) {
739 : return Page::FromAddress(address1) == Page::FromAddress(address2);
740 : }
741 :
742 : // Checks whether an address is page aligned.
743 : static bool IsAlignedToPageSize(Address addr) {
744 87746845 : return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
745 : }
746 :
747 : static bool IsAtObjectStart(Address addr) {
748 : return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
749 : kObjectStartOffset;
750 : }
751 :
752 : inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
753 :
754 : // Create a Page object that is only used as anchor for the doubly-linked
755 : // list of real pages.
756 422445 : explicit Page(Space* owner) { InitializeAsAnchor(owner); }
757 :
758 : inline void MarkNeverAllocateForTesting();
759 : inline void MarkEvacuationCandidate();
760 : inline void ClearEvacuationCandidate();
761 :
762 : Page* next_page() { return static_cast<Page*>(next_chunk()); }
763 : Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
764 : void set_next_page(Page* page) { set_next_chunk(page); }
765 : void set_prev_page(Page* page) { set_prev_chunk(page); }
766 :
767 : template <typename Callback>
768 514870 : inline void ForAllFreeListCategories(Callback callback) {
769 3955756 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
770 3440886 : callback(&categories_[i]);
771 : }
772 514870 : }
773 :
774 : // Returns the offset of a given address to this page.
775 : inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
776 :
777 : // Returns the address for a given offset to the this page.
778 : Address OffsetToAddress(size_t offset) {
779 : DCHECK_PAGE_OFFSET(offset);
780 : return address() + offset;
781 : }
782 :
783 : // WaitUntilSweepingCompleted only works when concurrent sweeping is in
784 : // progress. In particular, when we know that right before this call a
785 : // sweeper thread was sweeping this page.
786 : void WaitUntilSweepingCompleted() {
787 0 : mutex_->Lock();
788 0 : mutex_->Unlock();
789 : DCHECK(SweepingDone());
790 : }
791 :
792 : void ResetFreeListStatistics();
793 :
794 : size_t AvailableInFreeList();
795 :
796 121363 : size_t LiveBytesFromFreeList() {
797 : DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
798 242726 : return area_size() - wasted_memory() - available_in_free_list();
799 : }
800 :
801 : FreeListCategory* free_list_category(FreeListCategoryType type) {
802 35089758 : return &categories_[type];
803 : }
804 :
805 : bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
806 :
807 904746 : size_t wasted_memory() { return wasted_memory_.Value(); }
808 942076 : void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
809 121363 : size_t available_in_free_list() { return available_in_free_list_.Value(); }
810 : void add_available_in_free_list(size_t available) {
811 : DCHECK_LE(available, area_size());
812 35110520 : available_in_free_list_.Increment(available);
813 : }
814 : void remove_available_in_free_list(size_t available) {
815 : DCHECK_LE(available, area_size());
816 : DCHECK_GE(available_in_free_list(), available);
817 2368287 : available_in_free_list_.Decrement(available);
818 : }
819 :
820 : size_t ShrinkToHighWaterMark();
821 :
822 : V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
823 :
824 : #ifdef DEBUG
825 : void Print();
826 : #endif // DEBUG
827 :
828 : private:
829 : enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
830 :
831 : template <InitializationMode mode = kFreeMemory>
832 : static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
833 : Executability executable, PagedSpace* owner);
834 : static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
835 : Executability executable, SemiSpace* owner);
836 :
837 : inline void InitializeFreeListCategories();
838 :
839 : void InitializeAsAnchor(Space* owner);
840 :
841 : friend class MemoryAllocator;
842 : };
843 :
844 : class LargePage : public MemoryChunk {
845 : public:
846 32494 : HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
847 :
848 : inline LargePage* next_page() {
849 : return static_cast<LargePage*>(next_chunk());
850 : }
851 :
852 : inline void set_next_page(LargePage* page) { set_next_chunk(page); }
853 :
854 : // Uncommit memory that is not in use anymore by the object. If the object
855 : // cannot be shrunk 0 is returned.
856 : Address GetAddressToShrink();
857 :
858 : void ClearOutOfLiveRangeSlots(Address free_start);
859 :
860 : // A limit to guarantee that we do not overflow typed slot offset in
861 : // the old to old remembered set.
862 : // Note that this limit is higher than what assembler already imposes on
863 : // x64 and ia32 architectures.
864 : static const int kMaxCodePageSize = 512 * MB;
865 :
866 : private:
867 : static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
868 : Executability executable, Space* owner);
869 :
870 : friend class MemoryAllocator;
871 : };
872 :
873 :
874 : // ----------------------------------------------------------------------------
875 : // Space is the abstract superclass for all allocation spaces.
876 : class Space : public Malloced {
877 : public:
878 : Space(Heap* heap, AllocationSpace id, Executability executable)
879 : : allocation_observers_(new List<AllocationObserver*>()),
880 : allocation_observers_paused_(false),
881 : heap_(heap),
882 : id_(id),
883 : executable_(executable),
884 : committed_(0),
885 1088018 : max_committed_(0) {}
886 :
887 478666 : virtual ~Space() {}
888 :
889 : Heap* heap() const { return heap_; }
890 :
891 : // Does the space need executable memory?
892 : Executability executable() { return executable_; }
893 :
894 : // Identity used in error reporting.
895 : AllocationSpace identity() { return id_; }
896 :
897 : V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
898 : AllocationObserver* observer);
899 :
900 : V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
901 : AllocationObserver* observer);
902 :
903 : V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
904 :
905 : V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
906 :
907 : void AllocationStep(Address soon_object, int size);
908 :
909 : // Return the total amount committed memory for this space, i.e., allocatable
910 : // memory and page headers.
911 7716933 : virtual size_t CommittedMemory() { return committed_; }
912 :
913 0 : virtual size_t MaximumCommittedMemory() { return max_committed_; }
914 :
915 : // Returns allocated size.
916 : virtual size_t Size() = 0;
917 :
918 : // Returns size of objects. Can differ from the allocated size
919 : // (e.g. see LargeObjectSpace).
920 0 : virtual size_t SizeOfObjects() { return Size(); }
921 :
922 : // Approximate amount of physical memory committed for this space.
923 : virtual size_t CommittedPhysicalMemory() = 0;
924 :
925 : // Return the available bytes without growing.
926 : virtual size_t Available() = 0;
927 :
928 6057 : virtual int RoundSizeDownToObjectAlignment(int size) {
929 6057 : if (id_ == CODE_SPACE) {
930 769 : return RoundDown(size, kCodeAlignment);
931 : } else {
932 5288 : return RoundDown(size, kPointerSize);
933 : }
934 : }
935 :
936 : virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
937 :
938 : void AccountCommitted(size_t bytes) {
939 : DCHECK_GE(committed_ + bytes, committed_);
940 734610 : committed_ += bytes;
941 734610 : if (committed_ > max_committed_) {
942 594447 : max_committed_ = committed_;
943 : }
944 : }
945 :
946 : void AccountUncommitted(size_t bytes) {
947 : DCHECK_GE(committed_, committed_ - bytes);
948 407145 : committed_ -= bytes;
949 : }
950 :
951 : #ifdef DEBUG
952 : virtual void Print() = 0;
953 : #endif
954 :
955 : protected:
956 : std::unique_ptr<List<AllocationObserver*>> allocation_observers_;
957 : bool allocation_observers_paused_;
958 :
959 : private:
960 : Heap* heap_;
961 : AllocationSpace id_;
962 : Executability executable_;
963 :
964 : // Keeps track of committed memory in a space.
965 : size_t committed_;
966 : size_t max_committed_;
967 :
968 : DISALLOW_COPY_AND_ASSIGN(Space);
969 : };
970 :
971 :
972 : class MemoryChunkValidator {
973 : // Computed offsets should match the compiler generated ones.
974 : STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
975 :
976 : // Validate our estimates on the header size.
977 : STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
978 : STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
979 : STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
980 : };
981 :
982 :
983 : // ----------------------------------------------------------------------------
984 : // All heap objects containing executable code (code objects) must be allocated
985 : // from a 2 GB range of memory, so that they can call each other using 32-bit
986 : // displacements. This happens automatically on 32-bit platforms, where 32-bit
987 : // displacements cover the entire 4GB virtual address space. On 64-bit
988 : // platforms, we support this using the CodeRange object, which reserves and
989 : // manages a range of virtual memory.
990 : class CodeRange {
991 : public:
992 : explicit CodeRange(Isolate* isolate);
993 124226 : ~CodeRange() { TearDown(); }
994 :
995 : // Reserves a range of virtual memory, but does not commit any of it.
996 : // Can only be called once, at heap initialization time.
997 : // Returns false on failure.
998 : bool SetUp(size_t requested_size);
999 :
1000 : bool valid() { return code_range_ != NULL; }
1001 : Address start() {
1002 : DCHECK(valid());
1003 13459314 : return static_cast<Address>(code_range_->address());
1004 : }
1005 : size_t size() {
1006 : DCHECK(valid());
1007 0 : return code_range_->size();
1008 : }
1009 561420 : bool contains(Address address) {
1010 561420 : if (!valid()) return false;
1011 1113550 : Address start = static_cast<Address>(code_range_->address());
1012 1113550 : return start <= address && address < start + code_range_->size();
1013 : }
1014 :
1015 : // Allocates a chunk of memory from the large-object portion of
1016 : // the code range. On platforms with no separate code range, should
1017 : // not be called.
1018 : MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1019 : const size_t commit_size,
1020 : size_t* allocated);
1021 : bool CommitRawMemory(Address start, size_t length);
1022 : bool UncommitRawMemory(Address start, size_t length);
1023 : void FreeRawMemory(Address buf, size_t length);
1024 :
1025 : private:
1026 : class FreeBlock {
1027 : public:
1028 405174 : FreeBlock() : start(0), size(0) {}
1029 : FreeBlock(Address start_arg, size_t size_arg)
1030 459130 : : start(start_arg), size(size_arg) {
1031 : DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1032 : DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1033 : }
1034 : FreeBlock(void* start_arg, size_t size_arg)
1035 : : start(static_cast<Address>(start_arg)), size(size_arg) {
1036 : DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1037 : DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1038 : }
1039 :
1040 : Address start;
1041 : size_t size;
1042 : };
1043 :
1044 : // Frees the range of virtual memory, and frees the data structures used to
1045 : // manage it.
1046 : void TearDown();
1047 :
1048 : // Finds a block on the allocation list that contains at least the
1049 : // requested amount of memory. If none is found, sorts and merges
1050 : // the existing free memory blocks, and searches again.
1051 : // If none can be found, returns false.
1052 : bool GetNextAllocationBlock(size_t requested);
1053 : // Compares the start addresses of two free blocks.
1054 : static int CompareFreeBlockAddress(const FreeBlock* left,
1055 : const FreeBlock* right);
1056 : bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1057 : void ReleaseBlock(const FreeBlock* block);
1058 :
1059 : Isolate* isolate_;
1060 :
1061 : // The reserved range of virtual memory that all code objects are put in.
1062 : base::VirtualMemory* code_range_;
1063 :
1064 : // The global mutex guards free_list_ and allocation_list_ as GC threads may
1065 : // access both lists concurrently to the main thread.
1066 : base::Mutex code_range_mutex_;
1067 :
1068 : // Freed blocks of memory are added to the free list. When the allocation
1069 : // list is exhausted, the free list is sorted and merged to make the new
1070 : // allocation list.
1071 : List<FreeBlock> free_list_;
1072 :
1073 : // Memory is allocated from the free blocks on the allocation list.
1074 : // The block at current_allocation_block_index_ is the current block.
1075 : List<FreeBlock> allocation_list_;
1076 : int current_allocation_block_index_;
1077 :
1078 : DISALLOW_COPY_AND_ASSIGN(CodeRange);
1079 : };
1080 :
1081 :
1082 : class SkipList {
1083 : public:
1084 : SkipList() { Clear(); }
1085 :
1086 : void Clear() {
1087 26859776 : for (int idx = 0; idx < kSize; idx++) {
1088 26859776 : starts_[idx] = reinterpret_cast<Address>(-1);
1089 : }
1090 : }
1091 :
1092 5662996 : Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1093 :
1094 : void AddObject(Address addr, int size) {
1095 : int start_region = RegionNumber(addr);
1096 85707833 : int end_region = RegionNumber(addr + size - kPointerSize);
1097 109314174 : for (int idx = start_region; idx <= end_region; idx++) {
1098 109314174 : if (starts_[idx] > addr) {
1099 17196000 : starts_[idx] = addr;
1100 : } else {
1101 : // In the first region, there may already be an object closer to the
1102 : // start of the region. Do not change the start in that case. If this
1103 : // is not the first region, you probably added overlapping objects.
1104 : DCHECK_EQ(start_region, idx);
1105 : }
1106 : }
1107 : }
1108 :
1109 : static inline int RegionNumber(Address addr) {
1110 325557302 : return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1111 : }
1112 :
1113 85707833 : static void Update(Address addr, int size) {
1114 : Page* page = Page::FromAddress(addr);
1115 85707833 : SkipList* list = page->skip_list();
1116 85707833 : if (list == NULL) {
1117 220676 : list = new SkipList();
1118 : page->set_skip_list(list);
1119 : }
1120 :
1121 : list->AddObject(addr, size);
1122 85707833 : }
1123 :
1124 : private:
1125 : static const int kRegionSizeLog2 = 13;
1126 : static const int kRegionSize = 1 << kRegionSizeLog2;
1127 : static const int kSize = Page::kPageSize / kRegionSize;
1128 :
1129 : STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1130 :
1131 : Address starts_[kSize];
1132 : };
1133 :
1134 :
1135 : // ----------------------------------------------------------------------------
1136 : // A space acquires chunks of memory from the operating system. The memory
1137 : // allocator allocates and deallocates pages for the paged heap spaces and large
1138 : // pages for large object space.
1139 59285 : class V8_EXPORT_PRIVATE MemoryAllocator {
1140 : public:
1141 : // Unmapper takes care of concurrently unmapping and uncommitting memory
1142 : // chunks.
1143 177855 : class Unmapper {
1144 : public:
1145 : class UnmapFreeMemoryTask;
1146 :
1147 63610 : explicit Unmapper(MemoryAllocator* allocator)
1148 : : allocator_(allocator),
1149 : pending_unmapping_tasks_semaphore_(0),
1150 318050 : concurrent_unmapping_tasks_active_(0) {
1151 63610 : chunks_[kRegular].reserve(kReservedQueueingSlots);
1152 63610 : chunks_[kPooled].reserve(kReservedQueueingSlots);
1153 63610 : }
1154 :
1155 204171 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1156 401489 : if ((chunk->size() == Page::kPageSize) &&
1157 : (chunk->executable() != EXECUTABLE)) {
1158 196299 : AddMemoryChunkSafe<kRegular>(chunk);
1159 : } else {
1160 7872 : AddMemoryChunkSafe<kNonRegular>(chunk);
1161 : }
1162 204171 : }
1163 :
1164 188999 : MemoryChunk* TryGetPooledMemoryChunkSafe() {
1165 : // Procedure:
1166 : // (1) Try to get a chunk that was declared as pooled and already has
1167 : // been uncommitted.
1168 : // (2) Try to steal any memory chunk of kPageSize that would've been
1169 : // unmapped.
1170 188999 : MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1171 188999 : if (chunk == nullptr) {
1172 173753 : chunk = GetMemoryChunkSafe<kRegular>();
1173 173753 : if (chunk != nullptr) {
1174 : // For stolen chunks we need to manually free any allocated memory.
1175 734 : chunk->ReleaseAllocatedMemory();
1176 : }
1177 : }
1178 188999 : return chunk;
1179 : }
1180 :
1181 : void FreeQueuedChunks();
1182 : bool WaitUntilCompleted();
1183 : void TearDown();
1184 :
1185 : bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
1186 :
1187 : private:
1188 : static const int kReservedQueueingSlots = 64;
1189 :
1190 : enum ChunkQueueType {
1191 : kRegular, // Pages of kPageSize that do not live in a CodeRange and
1192 : // can thus be used for stealing.
1193 : kNonRegular, // Large chunks and executable chunks.
1194 : kPooled, // Pooled chunks, already uncommited and ready for reuse.
1195 : kNumberOfChunkQueues,
1196 : };
1197 :
1198 : enum class FreeMode {
1199 : kUncommitPooled,
1200 : kReleasePooled,
1201 : };
1202 :
1203 : template <ChunkQueueType type>
1204 465499 : void AddMemoryChunkSafe(MemoryChunk* chunk) {
1205 465499 : base::LockGuard<base::Mutex> guard(&mutex_);
1206 549030 : if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
1207 387235 : chunks_[type].push_back(chunk);
1208 : } else {
1209 : DCHECK_EQ(type, kRegular);
1210 78264 : delayed_regular_chunks_.push_back(chunk);
1211 : }
1212 465499 : }
1213 :
1214 : template <ChunkQueueType type>
1215 1332596 : MemoryChunk* GetMemoryChunkSafe() {
1216 1332596 : base::LockGuard<base::Mutex> guard(&mutex_);
1217 1332686 : if (chunks_[type].empty()) return nullptr;
1218 386845 : MemoryChunk* chunk = chunks_[type].back();
1219 : chunks_[type].pop_back();
1220 386845 : return chunk;
1221 : }
1222 :
1223 : void ReconsiderDelayedChunks();
1224 : template <FreeMode mode>
1225 : void PerformFreeMemoryOnQueuedChunks();
1226 :
1227 : base::Mutex mutex_;
1228 : MemoryAllocator* allocator_;
1229 : std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1230 : // Delayed chunks cannot be processed in the current unmapping cycle because
1231 : // of dependencies such as an active sweeper.
1232 : // See MemoryAllocator::CanFreeMemoryChunk.
1233 : std::list<MemoryChunk*> delayed_regular_chunks_;
1234 : base::Semaphore pending_unmapping_tasks_semaphore_;
1235 : intptr_t concurrent_unmapping_tasks_active_;
1236 :
1237 : friend class MemoryAllocator;
1238 : };
1239 :
1240 : enum AllocationMode {
1241 : kRegular,
1242 : kPooled,
1243 : };
1244 :
1245 : enum FreeMode {
1246 : kFull,
1247 : kAlreadyPooled,
1248 : kPreFreeAndQueue,
1249 : kPooledAndQueue,
1250 : };
1251 :
1252 : static size_t CodePageGuardStartOffset();
1253 :
1254 : static size_t CodePageGuardSize();
1255 :
1256 : static size_t CodePageAreaStartOffset();
1257 :
1258 : static size_t CodePageAreaEndOffset();
1259 :
1260 120042 : static size_t CodePageAreaSize() {
1261 120889 : return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1262 : }
1263 :
1264 2541 : static size_t PageAreaSize(AllocationSpace space) {
1265 : DCHECK_NE(LO_SPACE, space);
1266 : return (space == CODE_SPACE) ? CodePageAreaSize()
1267 305963 : : Page::kAllocatableMemory;
1268 : }
1269 :
1270 : static intptr_t GetCommitPageSize();
1271 :
1272 : explicit MemoryAllocator(Isolate* isolate);
1273 :
1274 : // Initializes its internal bookkeeping structures.
1275 : // Max capacity of the total space and executable memory limit.
1276 : bool SetUp(size_t max_capacity, size_t capacity_executable,
1277 : size_t code_range_size);
1278 :
1279 : void TearDown();
1280 :
1281 : // Allocates a Page from the allocator. AllocationMode is used to indicate
1282 : // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1283 : // should be tried first.
1284 : template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1285 : typename SpaceType>
1286 : Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1287 :
1288 : LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1289 : Executability executable);
1290 :
1291 : template <MemoryAllocator::FreeMode mode = kFull>
1292 : void Free(MemoryChunk* chunk);
1293 :
1294 : bool CanFreeMemoryChunk(MemoryChunk* chunk);
1295 :
1296 : // Returns allocated spaces in bytes.
1297 : size_t Size() { return size_.Value(); }
1298 :
1299 : // Returns allocated executable spaces in bytes.
1300 : size_t SizeExecutable() { return size_executable_.Value(); }
1301 :
1302 : // Returns the maximum available bytes of heaps.
1303 : size_t Available() {
1304 : const size_t size = Size();
1305 211320 : return capacity_ < size ? 0 : capacity_ - size;
1306 : }
1307 :
1308 : // Returns the maximum available executable bytes of heaps.
1309 : size_t AvailableExecutable() {
1310 : const size_t executable_size = SizeExecutable();
1311 : if (capacity_executable_ < executable_size) return 0;
1312 : return capacity_executable_ - executable_size;
1313 : }
1314 :
1315 : // Returns maximum available bytes that the old space can have.
1316 69189 : size_t MaxAvailable() {
1317 69189 : return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1318 : }
1319 :
1320 : // Returns an indication of whether a pointer is in a space that has
1321 : // been allocated by this MemoryAllocator.
1322 : V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1323 6257530 : return address < lowest_ever_allocated_.Value() ||
1324 : address >= highest_ever_allocated_.Value();
1325 : }
1326 :
1327 : // Returns a MemoryChunk in which the memory region from commit_area_size to
1328 : // reserve_area_size of the chunk area is reserved but not committed, it
1329 : // could be committed later by calling MemoryChunk::CommitArea.
1330 : MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1331 : Executability executable, Space* space);
1332 :
1333 : void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
1334 :
1335 : Address ReserveAlignedMemory(size_t requested, size_t alignment,
1336 : base::VirtualMemory* controller);
1337 : Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1338 : size_t alignment, Executability executable,
1339 : base::VirtualMemory* controller);
1340 :
1341 : bool CommitMemory(Address addr, size_t size, Executability executable);
1342 :
1343 : void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1344 : void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
1345 : void FreeMemory(Address addr, size_t size, Executability executable);
1346 :
1347 : // Commit a contiguous block of memory from the initial chunk. Assumes that
1348 : // the address is not NULL, the size is greater than zero, and that the
1349 : // block is contained in the initial chunk. Returns true if it succeeded
1350 : // and false otherwise.
1351 : bool CommitBlock(Address start, size_t size, Executability executable);
1352 :
1353 : // Uncommit a contiguous block of memory [start..(start+size)[.
1354 : // start is not NULL, the size is greater than zero, and the
1355 : // block is contained in the initial chunk. Returns true if it succeeded
1356 : // and false otherwise.
1357 : bool UncommitBlock(Address start, size_t size);
1358 :
1359 : // Zaps a contiguous block of memory [start..(start+size)[ thus
1360 : // filling it up with a recognizable non-NULL bit pattern.
1361 : void ZapBlock(Address start, size_t size);
1362 :
1363 : MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1364 : Address start, size_t commit_size,
1365 : size_t reserved_size);
1366 :
1367 : CodeRange* code_range() { return code_range_; }
1368 : Unmapper* unmapper() { return &unmapper_; }
1369 :
1370 : #ifdef DEBUG
1371 : // Reports statistic info of the space.
1372 : void ReportStatistics();
1373 : #endif
1374 :
1375 : private:
1376 : // PreFree logically frees the object, i.e., it takes care of the size
1377 : // bookkeeping and calls the allocation callback.
1378 : void PreFreeMemory(MemoryChunk* chunk);
1379 :
1380 : // FreeMemory can be called concurrently when PreFree was executed before.
1381 : void PerformFreeMemory(MemoryChunk* chunk);
1382 :
1383 : // See AllocatePage for public interface. Note that currently we only support
1384 : // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1385 : template <typename SpaceType>
1386 : MemoryChunk* AllocatePagePooled(SpaceType* owner);
1387 :
1388 : // Initializes pages in a chunk. Returns the first page address.
1389 : // This function and GetChunkId() are provided for the mark-compact
1390 : // collector to rebuild page headers in the from space, which is
1391 : // used as a marking stack and its page headers are destroyed.
1392 : Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1393 : PagedSpace* owner);
1394 :
1395 904859 : void UpdateAllocatedSpaceLimits(void* low, void* high) {
1396 : // The use of atomic primitives does not guarantee correctness (wrt.
1397 : // desired semantics) by default. The loop here ensures that we update the
1398 : // values only if they did not change in between.
1399 : void* ptr = nullptr;
1400 904859 : do {
1401 : ptr = lowest_ever_allocated_.Value();
1402 1199282 : } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1403 904859 : do {
1404 : ptr = highest_ever_allocated_.Value();
1405 977700 : } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1406 904859 : }
1407 :
1408 : Isolate* isolate_;
1409 : CodeRange* code_range_;
1410 :
1411 : // Maximum space size in bytes.
1412 : size_t capacity_;
1413 : // Maximum subset of capacity_ that can be executable
1414 : size_t capacity_executable_;
1415 :
1416 : // Allocated space size in bytes.
1417 : base::AtomicNumber<size_t> size_;
1418 : // Allocated executable space size in bytes.
1419 : base::AtomicNumber<size_t> size_executable_;
1420 :
1421 : // We keep the lowest and highest addresses allocated as a quick way
1422 : // of determining that pointers are outside the heap. The estimate is
1423 : // conservative, i.e. not all addresses in 'allocated' space are allocated
1424 : // to our heap. The range is [lowest, highest[, inclusive on the low end
1425 : // and exclusive on the high end.
1426 : base::AtomicValue<void*> lowest_ever_allocated_;
1427 : base::AtomicValue<void*> highest_ever_allocated_;
1428 :
1429 : base::VirtualMemory last_chunk_;
1430 : Unmapper unmapper_;
1431 :
1432 : friend class TestCodeRangeScope;
1433 :
1434 : DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1435 : };
1436 :
1437 : extern template Page*
1438 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1439 : size_t size, PagedSpace* owner, Executability executable);
1440 : extern template Page*
1441 : MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1442 : size_t size, SemiSpace* owner, Executability executable);
1443 : extern template Page*
1444 : MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1445 : size_t size, SemiSpace* owner, Executability executable);
1446 :
1447 : // -----------------------------------------------------------------------------
1448 : // Interface for heap object iterator to be implemented by all object space
1449 : // object iterators.
1450 : //
1451 : // NOTE: The space specific object iterators also implements the own next()
1452 : // method which is used to avoid using virtual functions
1453 : // iterating a specific space.
1454 :
1455 179064 : class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1456 : public:
1457 122845 : virtual ~ObjectIterator() {}
1458 : virtual HeapObject* Next() = 0;
1459 : };
1460 :
1461 : template <class PAGE_TYPE>
1462 : class PageIteratorImpl
1463 : : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1464 : public:
1465 3358747 : explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
1466 : PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1467 0 : PAGE_TYPE* operator*() { return p_; }
1468 : bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1469 301979 : return rhs.p_ == p_;
1470 : }
1471 : bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1472 10737628 : return rhs.p_ != p_;
1473 : }
1474 : inline PageIteratorImpl<PAGE_TYPE>& operator++();
1475 : inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1476 :
1477 : private:
1478 : PAGE_TYPE* p_;
1479 : };
1480 :
1481 : typedef PageIteratorImpl<Page> PageIterator;
1482 : typedef PageIteratorImpl<LargePage> LargePageIterator;
1483 :
1484 : class PageRange {
1485 : public:
1486 : typedef PageIterator iterator;
1487 73710 : PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1488 0 : explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1489 : inline PageRange(Address start, Address limit);
1490 :
1491 : iterator begin() { return iterator(begin_); }
1492 : iterator end() { return iterator(end_); }
1493 :
1494 : private:
1495 : Page* begin_;
1496 : Page* end_;
1497 : };
1498 :
1499 : // -----------------------------------------------------------------------------
1500 : // Heap object iterator in new/old/map spaces.
1501 : //
1502 : // A HeapObjectIterator iterates objects from the bottom of the given space
1503 : // to its top or from the bottom of the given page to its top.
1504 : //
1505 : // If objects are allocated in the page during iteration the iterator may
1506 : // or may not iterate over those objects. The caller must create a new
1507 : // iterator in order to be sure to visit these new objects.
1508 147414 : class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1509 : public:
1510 : // Creates a new object iterator in a given space.
1511 : explicit HeapObjectIterator(PagedSpace* space);
1512 : explicit HeapObjectIterator(Page* page);
1513 :
1514 : // Advance to the next object, skipping free spaces and other fillers and
1515 : // skipping the special garbage section of which there is one per space.
1516 : // Returns nullptr when the iteration has ended.
1517 : inline HeapObject* Next() override;
1518 :
1519 : private:
1520 : // Fast (inlined) path of next().
1521 : inline HeapObject* FromCurrentPage();
1522 :
1523 : // Slow path of next(), goes into the next page. Returns false if the
1524 : // iteration has ended.
1525 : bool AdvanceToNextPage();
1526 :
1527 : Address cur_addr_; // Current iteration point.
1528 : Address cur_end_; // End iteration point.
1529 : PagedSpace* space_;
1530 : PageRange page_range_;
1531 : PageRange::iterator current_page_;
1532 : };
1533 :
1534 :
1535 : // -----------------------------------------------------------------------------
1536 : // A space has a circular list of pages. The next page can be accessed via
1537 : // Page::next_page() call.
1538 :
1539 : // An abstraction of allocation and relocation pointers in a page-structured
1540 : // space.
1541 : class AllocationInfo {
1542 : public:
1543 569502 : AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
1544 : AllocationInfo(Address top, Address limit)
1545 1091699 : : original_top_(top), top_(top), limit_(limit) {}
1546 :
1547 : void Reset(Address top, Address limit) {
1548 5478313 : original_top_ = top;
1549 : set_top(top);
1550 : set_limit(limit);
1551 : }
1552 :
1553 : Address original_top() {
1554 : SLOW_DCHECK(top_ == NULL ||
1555 : (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1556 : return original_top_;
1557 : }
1558 :
1559 : INLINE(void set_top(Address top)) {
1560 : SLOW_DCHECK(top == NULL ||
1561 : (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1562 793320924 : top_ = top;
1563 : }
1564 :
1565 : INLINE(Address top()) const {
1566 : SLOW_DCHECK(top_ == NULL ||
1567 : (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1568 : return top_;
1569 : }
1570 :
1571 : Address* top_address() { return &top_; }
1572 :
1573 : INLINE(void set_limit(Address limit)) {
1574 6296817 : limit_ = limit;
1575 : }
1576 :
1577 : INLINE(Address limit()) const {
1578 : return limit_;
1579 : }
1580 :
1581 : Address* limit_address() { return &limit_; }
1582 :
1583 : #ifdef DEBUG
1584 : bool VerifyPagedAllocation() {
1585 : return (Page::FromAllocationAreaAddress(top_) ==
1586 : Page::FromAllocationAreaAddress(limit_)) &&
1587 : (top_ <= limit_);
1588 : }
1589 : #endif
1590 :
1591 : private:
1592 : // The original top address when the allocation info was initialized.
1593 : Address original_top_;
1594 : // Current allocation top.
1595 : Address top_;
1596 : // Current allocation limit.
1597 : Address limit_;
1598 : };
1599 :
1600 :
1601 : // An abstraction of the accounting statistics of a page-structured space.
1602 : //
1603 : // The stats are only set by functions that ensure they stay balanced. These
1604 : // functions increase or decrease one of the non-capacity stats in conjunction
1605 : // with capacity, or else they always balance increases and decreases to the
1606 : // non-capacity stats.
1607 : class AllocationStats BASE_EMBEDDED {
1608 : public:
1609 : AllocationStats() { Clear(); }
1610 :
1611 : // Zero out all the allocation statistics (i.e., no capacity).
1612 : void Clear() {
1613 1016673 : capacity_ = 0;
1614 1016673 : max_capacity_ = 0;
1615 1016673 : size_ = 0;
1616 : }
1617 :
1618 160038 : void ClearSize() { size_ = capacity_; }
1619 :
1620 : // Accessors for the allocation statistics.
1621 : size_t Capacity() { return capacity_; }
1622 : size_t MaxCapacity() { return max_capacity_; }
1623 : size_t Size() { return size_; }
1624 :
1625 : // Grow the space by adding available bytes. They are initially marked as
1626 : // being in use (part of the size), but will normally be immediately freed,
1627 : // putting them on the free list and removing them from size_.
1628 : void ExpandSpace(size_t bytes) {
1629 : DCHECK_GE(size_ + bytes, size_);
1630 : DCHECK_GE(capacity_ + bytes, capacity_);
1631 505976 : capacity_ += bytes;
1632 505976 : size_ += bytes;
1633 505976 : if (capacity_ > max_capacity_) {
1634 499828 : max_capacity_ = capacity_;
1635 : }
1636 : }
1637 :
1638 : // Shrink the space by removing available bytes. Since shrinking is done
1639 : // during sweeping, bytes have been marked as being in use (part of the size)
1640 : // and are hereby freed.
1641 : void ShrinkSpace(size_t bytes) {
1642 : DCHECK_GE(capacity_, bytes);
1643 : DCHECK_GE(size_, bytes);
1644 493300 : capacity_ -= bytes;
1645 493300 : size_ -= bytes;
1646 : }
1647 :
1648 : void AllocateBytes(size_t bytes) {
1649 : DCHECK_GE(size_ + bytes, size_);
1650 2227930 : size_ += bytes;
1651 : }
1652 :
1653 : void DeallocateBytes(size_t bytes) {
1654 : DCHECK_GE(size_, bytes);
1655 2691134 : size_ -= bytes;
1656 : }
1657 :
1658 : void DecreaseCapacity(size_t bytes) {
1659 : DCHECK_GE(capacity_, bytes);
1660 : DCHECK_GE(capacity_ - bytes, size_);
1661 303515 : capacity_ -= bytes;
1662 : }
1663 :
1664 : void IncreaseCapacity(size_t bytes) {
1665 : DCHECK_GE(capacity_ + bytes, capacity_);
1666 253462 : capacity_ += bytes;
1667 : }
1668 :
1669 : // Merge |other| into |this|.
1670 : void Merge(const AllocationStats& other) {
1671 : DCHECK_GE(capacity_ + other.capacity_, capacity_);
1672 : DCHECK_GE(size_ + other.size_, size_);
1673 118521 : capacity_ += other.capacity_;
1674 118521 : size_ += other.size_;
1675 118521 : if (other.max_capacity_ > max_capacity_) {
1676 40 : max_capacity_ = other.max_capacity_;
1677 : }
1678 : }
1679 :
1680 : private:
1681 : // |capacity_|: The number of object-area bytes (i.e., not including page
1682 : // bookkeeping structures) currently in the space.
1683 : size_t capacity_;
1684 :
1685 : // |max_capacity_|: The maximum capacity ever observed.
1686 : size_t max_capacity_;
1687 :
1688 : // |size_|: The number of allocated bytes.
1689 : size_t size_;
1690 : };
1691 :
1692 : // A free list maintaining free blocks of memory. The free list is organized in
1693 : // a way to encourage objects allocated around the same time to be near each
1694 : // other. The normal way to allocate is intended to be by bumping a 'top'
1695 : // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1696 : // find a new space to allocate from. This is done with the free list, which is
1697 : // divided up into rough categories to cut down on waste. Having finer
1698 : // categories would scatter allocation more.
1699 :
1700 : // The free list is organized in categories as follows:
1701 : // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1702 : // allocation, when categories >= small do not have entries anymore.
1703 : // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1704 : // categories >= small do not have entries anymore.
1705 : // 32-255 words (small): Used for allocating free space between 1-31 words in
1706 : // size.
1707 : // 256-2047 words (medium): Used for allocating free space between 32-255 words
1708 : // in size.
1709 : // 1048-16383 words (large): Used for allocating free space between 256-2047
1710 : // words in size.
1711 : // At least 16384 words (huge): This list is for objects of 2048 words or
1712 : // larger. Empty pages are also added to this list.
1713 : class V8_EXPORT_PRIVATE FreeList {
1714 : public:
1715 : // This method returns how much memory can be allocated after freeing
1716 : // maximum_freed memory.
1717 : static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1718 479734 : if (maximum_freed <= kTiniestListMax) {
1719 : // Since we are not iterating over all list entries, we cannot guarantee
1720 : // that we can find the maximum freed block in that free list.
1721 : return 0;
1722 473960 : } else if (maximum_freed <= kTinyListMax) {
1723 : return kTinyAllocationMax;
1724 463530 : } else if (maximum_freed <= kSmallListMax) {
1725 : return kSmallAllocationMax;
1726 319845 : } else if (maximum_freed <= kMediumListMax) {
1727 : return kMediumAllocationMax;
1728 192422 : } else if (maximum_freed <= kLargeListMax) {
1729 : return kLargeAllocationMax;
1730 : }
1731 : return maximum_freed;
1732 : }
1733 :
1734 : explicit FreeList(PagedSpace* owner);
1735 :
1736 : // Adds a node on the free list. The block of size {size_in_bytes} starting
1737 : // at {start} is placed on the free list. The return value is the number of
1738 : // bytes that were not added to the free list, because they freed memory block
1739 : // was too small. Bookkeeping information will be written to the block, i.e.,
1740 : // its contents will be destroyed. The start address should be word aligned,
1741 : // and the size should be a non-zero multiple of the word size.
1742 : size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1743 :
1744 : // Allocate a block of size {size_in_bytes} from the free list. The block is
1745 : // unitialized. A failure is returned if no block is available. The size
1746 : // should be a non-zero multiple of the word size.
1747 : MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
1748 :
1749 : // Clear the free list.
1750 : void Reset();
1751 :
1752 643395 : void ResetStats() {
1753 : wasted_bytes_.SetValue(0);
1754 : ForAllFreeListCategories(
1755 160038 : [](FreeListCategory* category) { category->ResetStats(); });
1756 643395 : }
1757 :
1758 : // Return the number of bytes available on the free list.
1759 : size_t Available() {
1760 : size_t available = 0;
1761 1867279 : ForAllFreeListCategories([&available](FreeListCategory* category) {
1762 1867279 : available += category->available();
1763 : });
1764 : return available;
1765 : }
1766 :
1767 : bool IsEmpty() {
1768 : bool empty = true;
1769 : ForAllFreeListCategories([&empty](FreeListCategory* category) {
1770 : if (!category->is_empty()) empty = false;
1771 : });
1772 : return empty;
1773 : }
1774 :
1775 : // Used after booting the VM.
1776 : void RepairLists(Heap* heap);
1777 :
1778 : size_t EvictFreeListItems(Page* page);
1779 : bool ContainsPageFreeListItems(Page* page);
1780 :
1781 : PagedSpace* owner() { return owner_; }
1782 : size_t wasted_bytes() { return wasted_bytes_.Value(); }
1783 :
1784 : template <typename Callback>
1785 : void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1786 14113146 : FreeListCategory* current = categories_[type];
1787 17519154 : while (current != nullptr) {
1788 : FreeListCategory* next = current->next();
1789 : callback(current);
1790 : current = next;
1791 : }
1792 : }
1793 :
1794 : template <typename Callback>
1795 342255 : void ForAllFreeListCategories(Callback callback) {
1796 14295363 : for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1797 14113146 : ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1798 : }
1799 342255 : }
1800 :
1801 : bool AddCategory(FreeListCategory* category);
1802 : void RemoveCategory(FreeListCategory* category);
1803 : void PrintCategories(FreeListCategoryType type);
1804 :
1805 : #ifdef DEBUG
1806 : size_t SumFreeLists();
1807 : bool IsVeryLong();
1808 : #endif
1809 :
1810 : private:
1811 : class FreeListCategoryIterator {
1812 : public:
1813 : FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1814 4696944 : : current_(free_list->categories_[type]) {}
1815 :
1816 : bool HasNext() { return current_ != nullptr; }
1817 :
1818 : FreeListCategory* Next() {
1819 : DCHECK(HasNext());
1820 : FreeListCategory* tmp = current_;
1821 2480086 : current_ = current_->next();
1822 : return tmp;
1823 : }
1824 :
1825 : private:
1826 : FreeListCategory* current_;
1827 : };
1828 :
1829 : // The size range of blocks, in bytes.
1830 : static const size_t kMinBlockSize = 3 * kPointerSize;
1831 : static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1832 :
1833 : static const size_t kTiniestListMax = 0xa * kPointerSize;
1834 : static const size_t kTinyListMax = 0x1f * kPointerSize;
1835 : static const size_t kSmallListMax = 0xff * kPointerSize;
1836 : static const size_t kMediumListMax = 0x7ff * kPointerSize;
1837 : static const size_t kLargeListMax = 0x3fff * kPointerSize;
1838 : static const size_t kTinyAllocationMax = kTiniestListMax;
1839 : static const size_t kSmallAllocationMax = kTinyListMax;
1840 : static const size_t kMediumAllocationMax = kSmallListMax;
1841 : static const size_t kLargeAllocationMax = kMediumListMax;
1842 :
1843 : FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
1844 :
1845 : // Walks all available categories for a given |type| and tries to retrieve
1846 : // a node. Returns nullptr if the category is empty.
1847 : FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
1848 :
1849 : // Tries to retrieve a node from the first category in a given |type|.
1850 : // Returns nullptr if the category is empty.
1851 : FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
1852 : size_t minimum_size);
1853 :
1854 : // Searches a given |type| for a node of at least |minimum_size|.
1855 : FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1856 : size_t minimum_size);
1857 :
1858 : FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1859 35402204 : if (size_in_bytes <= kTiniestListMax) {
1860 : return kTiniest;
1861 14435104 : } else if (size_in_bytes <= kTinyListMax) {
1862 : return kTiny;
1863 6923952 : } else if (size_in_bytes <= kSmallListMax) {
1864 : return kSmall;
1865 2175419 : } else if (size_in_bytes <= kMediumListMax) {
1866 : return kMedium;
1867 1282378 : } else if (size_in_bytes <= kLargeListMax) {
1868 : return kLarge;
1869 : }
1870 : return kHuge;
1871 : }
1872 :
1873 : // The tiny categories are not used for fast allocation.
1874 : FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1875 : size_t size_in_bytes) {
1876 2825778 : if (size_in_bytes <= kSmallAllocationMax) {
1877 : return kSmall;
1878 794199 : } else if (size_in_bytes <= kMediumAllocationMax) {
1879 : return kMedium;
1880 640983 : } else if (size_in_bytes <= kLargeAllocationMax) {
1881 : return kLarge;
1882 : }
1883 : return kHuge;
1884 : }
1885 :
1886 0 : FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1887 :
1888 : PagedSpace* owner_;
1889 : base::AtomicNumber<size_t> wasted_bytes_;
1890 : FreeListCategory* categories_[kNumberOfCategories];
1891 :
1892 : friend class FreeListCategory;
1893 :
1894 : DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1895 : };
1896 :
1897 : // LocalAllocationBuffer represents a linear allocation area that is created
1898 : // from a given {AllocationResult} and can be used to allocate memory without
1899 : // synchronization.
1900 : //
1901 : // The buffer is properly closed upon destruction and reassignment.
1902 : // Example:
1903 : // {
1904 : // AllocationResult result = ...;
1905 : // LocalAllocationBuffer a(heap, result, size);
1906 : // LocalAllocationBuffer b = a;
1907 : // CHECK(!a.IsValid());
1908 : // CHECK(b.IsValid());
1909 : // // {a} is invalid now and cannot be used for further allocations.
1910 : // }
1911 : // // Since {b} went out of scope, the LAB is closed, resulting in creating a
1912 : // // filler object for the remaining area.
1913 : class LocalAllocationBuffer {
1914 : public:
1915 : // Indicates that a buffer cannot be used for allocations anymore. Can result
1916 : // from either reassigning a buffer, or trying to construct it from an
1917 : // invalid {AllocationResult}.
1918 : static inline LocalAllocationBuffer InvalidBuffer();
1919 :
1920 : // Creates a new LAB from a given {AllocationResult}. Results in
1921 : // InvalidBuffer if the result indicates a retry.
1922 : static inline LocalAllocationBuffer FromResult(Heap* heap,
1923 : AllocationResult result,
1924 : intptr_t size);
1925 :
1926 474900 : ~LocalAllocationBuffer() { Close(); }
1927 :
1928 : // Convert to C++11 move-semantics once allowed by the style guide.
1929 : LocalAllocationBuffer(const LocalAllocationBuffer& other);
1930 : LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1931 :
1932 : MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1933 : int size_in_bytes, AllocationAlignment alignment);
1934 :
1935 16359854 : inline bool IsValid() { return allocation_info_.top() != nullptr; }
1936 :
1937 : // Try to merge LABs, which is only possible when they are adjacent in memory.
1938 : // Returns true if the merge was successful, false otherwise.
1939 : inline bool TryMerge(LocalAllocationBuffer* other);
1940 :
1941 : // Close a LAB, effectively invalidating it. Returns the unused area.
1942 : AllocationInfo Close();
1943 :
1944 : private:
1945 : LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1946 :
1947 : Heap* heap_;
1948 : AllocationInfo allocation_info_;
1949 : };
1950 :
1951 : class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
1952 : public:
1953 : typedef PageIterator iterator;
1954 :
1955 : static const intptr_t kCompactionMemoryWanted = 500 * KB;
1956 :
1957 : // Creates a space with an id.
1958 : PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1959 :
1960 483052 : ~PagedSpace() override { TearDown(); }
1961 :
1962 : // Set up the space using the given address range of virtual memory (from
1963 : // the memory allocator's initial chunk) if possible. If the block of
1964 : // addresses is not big enough to contain a single page-aligned page, a
1965 : // fresh chunk will be allocated.
1966 : bool SetUp();
1967 :
1968 : // Returns true if the space has been successfully set up and not
1969 : // subsequently torn down.
1970 : bool HasBeenSetUp();
1971 :
1972 : // Checks whether an object/address is in this space.
1973 : inline bool Contains(Address a);
1974 : inline bool Contains(Object* o);
1975 : bool ContainsSlow(Address addr);
1976 :
1977 : // During boot the free_space_map is created, and afterwards we may need
1978 : // to write it into the free list nodes that were already created.
1979 : void RepairFreeListsAfterDeserialization();
1980 :
1981 : // Prepares for a mark-compact GC.
1982 : void PrepareForMarkCompact();
1983 :
1984 : // Current capacity without growing (Size() + Available()).
1985 1698159 : size_t Capacity() { return accounting_stats_.Capacity(); }
1986 :
1987 : // Approximate amount of physical memory committed for this space.
1988 : size_t CommittedPhysicalMemory() override;
1989 :
1990 : void ResetFreeListStatistics();
1991 :
1992 : // Sets the capacity, the available space and the wasted space to zero.
1993 : // The stats are rebuilt during sweeping by adding each page to the
1994 : // capacity and the size when it is encountered. As free spaces are
1995 : // discovered during the sweeping they are subtracted from the size and added
1996 : // to the available and wasted totals.
1997 160038 : void ClearStats() {
1998 160038 : accounting_stats_.ClearSize();
1999 160038 : free_list_.ResetStats();
2000 160038 : ResetFreeListStatistics();
2001 160038 : }
2002 :
2003 : // Available bytes without growing. These are the bytes on the free list.
2004 : // The bytes in the linear allocation area are not included in this total
2005 : // because updating the stats would slow down allocation. New pages are
2006 : // immediately added to the free list so they show up here.
2007 1446292 : size_t Available() override { return free_list_.Available(); }
2008 :
2009 : // Allocated bytes in this space. Garbage bytes that were not found due to
2010 : // concurrent sweeping are counted as being allocated! The bytes in the
2011 : // current linear allocation area (between top and limit) are also counted
2012 : // here.
2013 10717995 : size_t Size() override { return accounting_stats_.Size(); }
2014 :
2015 : // As size, but the bytes in lazily swept pages are estimated and the bytes
2016 : // in the current linear allocation area are not included.
2017 : size_t SizeOfObjects() override;
2018 :
2019 : // Wasted bytes in this space. These are just the bytes that were thrown away
2020 : // due to being too small to use for allocation.
2021 826480 : virtual size_t Waste() { return free_list_.wasted_bytes(); }
2022 :
2023 : // Returns the allocation pointer in this space.
2024 409434300 : Address top() { return allocation_info_.top(); }
2025 34228103 : Address limit() { return allocation_info_.limit(); }
2026 :
2027 : // The allocation top address.
2028 : Address* allocation_top_address() { return allocation_info_.top_address(); }
2029 :
2030 : // The allocation limit address.
2031 : Address* allocation_limit_address() {
2032 : return allocation_info_.limit_address();
2033 : }
2034 :
2035 : enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2036 :
2037 : // Allocate the requested number of bytes in the space if possible, return a
2038 : // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2039 : // to be manually updated later.
2040 : MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
2041 : int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2042 :
2043 : MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2044 : int size_in_bytes);
2045 :
2046 : // Allocate the requested number of bytes in the space double aligned if
2047 : // possible, return a failure object if not.
2048 : MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2049 : int size_in_bytes, AllocationAlignment alignment);
2050 :
2051 : // Allocate the requested number of bytes in the space and consider allocation
2052 : // alignment if needed.
2053 : MUST_USE_RESULT inline AllocationResult AllocateRaw(
2054 : int size_in_bytes, AllocationAlignment alignment);
2055 :
2056 : // Give a block of memory to the space's free list. It might be added to
2057 : // the free list or accounted as waste.
2058 : // If add_to_freelist is false then just accounting stats are updated and
2059 : // no attempt to add area to free list is made.
2060 : size_t Free(Address start, size_t size_in_bytes) {
2061 2691134 : size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2062 : accounting_stats_.DeallocateBytes(size_in_bytes);
2063 : DCHECK_GE(size_in_bytes, wasted);
2064 : return size_in_bytes - wasted;
2065 : }
2066 :
2067 : size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2068 34034526 : size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2069 : DCHECK_GE(size_in_bytes, wasted);
2070 34179371 : return size_in_bytes - wasted;
2071 : }
2072 :
2073 182091 : void ResetFreeList() { free_list_.Reset(); }
2074 :
2075 : // Set space allocation info.
2076 : void SetTopAndLimit(Address top, Address limit) {
2077 : DCHECK(top == limit ||
2078 : Page::FromAddress(top) == Page::FromAddress(limit - 1));
2079 4316454 : MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2080 : allocation_info_.Reset(top, limit);
2081 : }
2082 :
2083 : void SetAllocationInfo(Address top, Address limit);
2084 :
2085 : // Empty space allocation info, returning unused area to free list.
2086 : void EmptyAllocationInfo();
2087 :
2088 : void MarkAllocationInfoBlack();
2089 :
2090 : void AccountAllocatedBytes(size_t bytes) {
2091 : accounting_stats_.AllocateBytes(bytes);
2092 : }
2093 :
2094 : void IncreaseCapacity(size_t bytes);
2095 :
2096 : // Releases an unused page and shrinks the space.
2097 : void ReleasePage(Page* page);
2098 :
2099 : // The dummy page that anchors the linked list of pages.
2100 : Page* anchor() { return &anchor_; }
2101 :
2102 :
2103 : #ifdef VERIFY_HEAP
2104 : // Verify integrity of this space.
2105 : virtual void Verify(ObjectVisitor* visitor);
2106 :
2107 : // Overridden by subclasses to verify space-specific object
2108 : // properties (e.g., only maps or free-list nodes are in map space).
2109 : virtual void VerifyObject(HeapObject* obj) {}
2110 : #endif
2111 :
2112 : #ifdef DEBUG
2113 : // Print meta info and objects in this space.
2114 : void Print() override;
2115 :
2116 : // Reports statistics for the space
2117 : void ReportStatistics();
2118 :
2119 : // Report code object related statistics
2120 : static void ReportCodeStatistics(Isolate* isolate);
2121 : static void ResetCodeStatistics(Isolate* isolate);
2122 : #endif
2123 :
2124 : Page* FirstPage() { return anchor_.next_page(); }
2125 : Page* LastPage() { return anchor_.prev_page(); }
2126 :
2127 : bool CanExpand(size_t size);
2128 :
2129 : // Returns the number of total pages in this space.
2130 : int CountTotalPages();
2131 :
2132 : // Return size of allocatable area on a page in this space.
2133 3338403 : inline int AreaSize() { return static_cast<int>(area_size_); }
2134 :
2135 750116 : virtual bool is_local() { return false; }
2136 :
2137 : // Merges {other} into the current space. Note that this modifies {other},
2138 : // e.g., removes its bump pointer area and resets statistics.
2139 : void MergeCompactionSpace(CompactionSpace* other);
2140 :
2141 : // Refills the free list from the corresponding free list filled by the
2142 : // sweeper.
2143 : virtual void RefillFreeList();
2144 :
2145 : FreeList* free_list() { return &free_list_; }
2146 :
2147 : base::Mutex* mutex() { return &space_mutex_; }
2148 :
2149 : inline void UnlinkFreeListCategories(Page* page);
2150 : inline intptr_t RelinkFreeListCategories(Page* page);
2151 :
2152 4284996 : iterator begin() { return iterator(anchor_.next_page()); }
2153 5508761 : iterator end() { return iterator(&anchor_); }
2154 :
2155 : // Shrink immortal immovable pages of the space to be exactly the size needed
2156 : // using the high water mark.
2157 : void ShrinkImmortalImmovablePages();
2158 :
2159 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2160 :
2161 : protected:
2162 : // PagedSpaces that should be included in snapshots have different, i.e.,
2163 : // smaller, initial pages.
2164 0 : virtual bool snapshotable() { return true; }
2165 :
2166 : bool HasPages() { return anchor_.next_page() != &anchor_; }
2167 :
2168 : // Cleans up the space, frees all pages in this space except those belonging
2169 : // to the initial chunk, uncommits addresses in the initial chunk.
2170 : void TearDown();
2171 :
2172 : // Expands the space by allocating a fixed number of pages. Returns false if
2173 : // it cannot allocate requested number of pages from OS, or if the hard heap
2174 : // size limit has been hit.
2175 : bool Expand();
2176 :
2177 : // Generic fast case allocation function that tries linear allocation at the
2178 : // address denoted by top in allocation_info_.
2179 : inline HeapObject* AllocateLinearly(int size_in_bytes);
2180 :
2181 : // Generic fast case allocation function that tries aligned linear allocation
2182 : // at the address denoted by top in allocation_info_. Writes the aligned
2183 : // allocation size, which includes the filler size, to size_in_bytes.
2184 : inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2185 : AllocationAlignment alignment);
2186 :
2187 : // If sweeping is still in progress try to sweep unswept pages. If that is
2188 : // not successful, wait for the sweeper threads and re-try free-list
2189 : // allocation.
2190 : MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2191 : int size_in_bytes);
2192 :
2193 : // Slow path of AllocateRaw. This function is space-dependent.
2194 : MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2195 :
2196 : size_t area_size_;
2197 :
2198 : // Accounting information for this space.
2199 : AllocationStats accounting_stats_;
2200 :
2201 : // The dummy page that anchors the double linked list of pages.
2202 : Page anchor_;
2203 :
2204 : // The space's free list.
2205 : FreeList free_list_;
2206 :
2207 : // Normal allocation information.
2208 : AllocationInfo allocation_info_;
2209 :
2210 : // Mutex guarding any concurrent access to the space.
2211 : base::Mutex space_mutex_;
2212 :
2213 : friend class IncrementalMarking;
2214 : friend class MarkCompactCollector;
2215 :
2216 : // Used in cctest.
2217 : friend class HeapTester;
2218 : };
2219 :
2220 : enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2221 :
2222 : // -----------------------------------------------------------------------------
2223 : // SemiSpace in young generation
2224 : //
2225 : // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2226 : // The mark-compact collector uses the memory of the first page in the from
2227 : // space as a marking stack when tracing live objects.
2228 237140 : class SemiSpace : public Space {
2229 : public:
2230 : typedef PageIterator iterator;
2231 :
2232 : static void Swap(SemiSpace* from, SemiSpace* to);
2233 :
2234 121564 : SemiSpace(Heap* heap, SemiSpaceId semispace)
2235 : : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2236 : current_capacity_(0),
2237 : maximum_capacity_(0),
2238 : minimum_capacity_(0),
2239 : age_mark_(nullptr),
2240 : committed_(false),
2241 : id_(semispace),
2242 : anchor_(this),
2243 : current_page_(nullptr),
2244 243128 : pages_used_(0) {}
2245 :
2246 : inline bool Contains(HeapObject* o);
2247 : inline bool Contains(Object* o);
2248 : inline bool ContainsSlow(Address a);
2249 :
2250 : void SetUp(size_t initial_capacity, size_t maximum_capacity);
2251 : void TearDown();
2252 : bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2253 :
2254 : bool Commit();
2255 : bool Uncommit();
2256 : bool is_committed() { return committed_; }
2257 :
2258 : // Grow the semispace to the new capacity. The new capacity requested must
2259 : // be larger than the current capacity and less than the maximum capacity.
2260 : bool GrowTo(size_t new_capacity);
2261 :
2262 : // Shrinks the semispace to the new capacity. The new capacity requested
2263 : // must be more than the amount of used memory in the semispace and less
2264 : // than the current capacity.
2265 : bool ShrinkTo(size_t new_capacity);
2266 :
2267 : bool EnsureCurrentCapacity();
2268 :
2269 : // Returns the start address of the first page of the space.
2270 405918 : Address space_start() {
2271 : DCHECK_NE(anchor_.next_page(), anchor());
2272 405918 : return anchor_.next_page()->area_start();
2273 : }
2274 :
2275 : Page* first_page() { return anchor_.next_page(); }
2276 : Page* current_page() { return current_page_; }
2277 : int pages_used() { return pages_used_; }
2278 :
2279 : // Returns one past the end address of the space.
2280 276756 : Address space_end() { return anchor_.prev_page()->area_end(); }
2281 :
2282 : // Returns the start address of the current page of the space.
2283 1699627 : Address page_low() { return current_page_->area_start(); }
2284 :
2285 : // Returns one past the end address of the current page of the space.
2286 1548706 : Address page_high() { return current_page_->area_end(); }
2287 :
2288 375804 : bool AdvancePage() {
2289 187902 : Page* next_page = current_page_->next_page();
2290 : // We cannot expand if we reached the maximum number of pages already. Note
2291 : // that we need to account for the next page already for this check as we
2292 : // could potentially fill the whole page after advancing.
2293 187902 : const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2294 187902 : if (next_page == anchor() || reached_max_pages) {
2295 : return false;
2296 : }
2297 120063 : current_page_ = next_page;
2298 120063 : pages_used_++;
2299 120063 : return true;
2300 : }
2301 :
2302 : // Resets the space to using the first page.
2303 : void Reset();
2304 :
2305 : void RemovePage(Page* page);
2306 : void PrependPage(Page* page);
2307 :
2308 : // Age mark accessors.
2309 : Address age_mark() { return age_mark_; }
2310 : void set_age_mark(Address mark);
2311 :
2312 : // Returns the current capacity of the semispace.
2313 : size_t current_capacity() { return current_capacity_; }
2314 :
2315 : // Returns the maximum capacity of the semispace.
2316 : size_t maximum_capacity() { return maximum_capacity_; }
2317 :
2318 : // Returns the initial capacity of the semispace.
2319 : size_t minimum_capacity() { return minimum_capacity_; }
2320 :
2321 : SemiSpaceId id() { return id_; }
2322 :
2323 : // Approximate amount of physical memory committed for this space.
2324 : size_t CommittedPhysicalMemory() override;
2325 :
2326 : // If we don't have these here then SemiSpace will be abstract. However
2327 : // they should never be called:
2328 :
2329 0 : size_t Size() override {
2330 0 : UNREACHABLE();
2331 : return 0;
2332 : }
2333 :
2334 0 : size_t SizeOfObjects() override { return Size(); }
2335 :
2336 0 : size_t Available() override {
2337 0 : UNREACHABLE();
2338 : return 0;
2339 : }
2340 :
2341 1165848 : iterator begin() { return iterator(anchor_.next_page()); }
2342 519702 : iterator end() { return iterator(anchor()); }
2343 :
2344 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2345 :
2346 : #ifdef DEBUG
2347 : void Print() override;
2348 : // Validate a range of of addresses in a SemiSpace.
2349 : // The "from" address must be on a page prior to the "to" address,
2350 : // in the linked page order, or it must be earlier on the same page.
2351 : static void AssertValidRange(Address from, Address to);
2352 : #else
2353 : // Do nothing.
2354 : inline static void AssertValidRange(Address from, Address to) {}
2355 : #endif
2356 :
2357 : #ifdef VERIFY_HEAP
2358 : virtual void Verify();
2359 : #endif
2360 :
2361 : private:
2362 : void RewindPages(Page* start, int num_pages);
2363 :
2364 : inline Page* anchor() { return &anchor_; }
2365 : inline int max_pages() {
2366 187902 : return static_cast<int>(current_capacity_ / Page::kPageSize);
2367 : }
2368 :
2369 : // Copies the flags into the masked positions on all pages in the space.
2370 : void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2371 :
2372 : // The currently committed space capacity.
2373 : size_t current_capacity_;
2374 :
2375 : // The maximum capacity that can be used by this space. A space cannot grow
2376 : // beyond that size.
2377 : size_t maximum_capacity_;
2378 :
2379 : // The minimum capacity for the space. A space cannot shrink below this size.
2380 : size_t minimum_capacity_;
2381 :
2382 : // Used to govern object promotion during mark-compact collection.
2383 : Address age_mark_;
2384 :
2385 : bool committed_;
2386 : SemiSpaceId id_;
2387 :
2388 : Page anchor_;
2389 : Page* current_page_;
2390 : int pages_used_;
2391 :
2392 : friend class NewSpace;
2393 : friend class SemiSpaceIterator;
2394 : };
2395 :
2396 :
2397 : // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2398 : // semispace of the heap's new space. It iterates over the objects in the
2399 : // semispace from a given start address (defaulting to the bottom of the
2400 : // semispace) to the top of the semispace. New objects allocated after the
2401 : // iterator is created are not iterated.
2402 49138 : class SemiSpaceIterator : public ObjectIterator {
2403 : public:
2404 : // Create an iterator over the allocated objects in the given to-space.
2405 : explicit SemiSpaceIterator(NewSpace* space);
2406 :
2407 : inline HeapObject* Next() override;
2408 :
2409 : private:
2410 : void Initialize(Address start, Address end);
2411 :
2412 : // The current iteration point.
2413 : Address current_;
2414 : // The end of iteration.
2415 : Address limit_;
2416 : };
2417 :
2418 : // -----------------------------------------------------------------------------
2419 : // The young generation space.
2420 : //
2421 : // The new space consists of a contiguous pair of semispaces. It simply
2422 : // forwards most functions to the appropriate semispace.
2423 :
2424 237140 : class NewSpace : public Space {
2425 : public:
2426 : typedef PageIterator iterator;
2427 :
2428 60782 : explicit NewSpace(Heap* heap)
2429 : : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2430 : to_space_(heap, kToSpace),
2431 : from_space_(heap, kFromSpace),
2432 : reservation_(),
2433 : top_on_previous_step_(0),
2434 : allocated_histogram_(nullptr),
2435 121564 : promoted_histogram_(nullptr) {}
2436 :
2437 : inline bool Contains(HeapObject* o);
2438 : inline bool ContainsSlow(Address a);
2439 : inline bool Contains(Object* o);
2440 :
2441 : bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
2442 :
2443 : // Tears down the space. Heap memory was not allocated by the space, so it
2444 : // is not deallocated here.
2445 : void TearDown();
2446 :
2447 : // True if the space has been set up but not torn down.
2448 : bool HasBeenSetUp() {
2449 : return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2450 : }
2451 :
2452 : // Flip the pair of spaces.
2453 : void Flip();
2454 :
2455 : // Grow the capacity of the semispaces. Assumes that they are not at
2456 : // their maximum capacity.
2457 : void Grow();
2458 :
2459 : // Shrink the capacity of the semispaces.
2460 : void Shrink();
2461 :
2462 : // Return the allocated bytes in the active semispace.
2463 1396240 : size_t Size() override {
2464 : DCHECK_GE(top(), to_space_.page_low());
2465 4188720 : return to_space_.pages_used() * Page::kAllocatableMemory +
2466 4188720 : static_cast<size_t>(top() - to_space_.page_low());
2467 : }
2468 :
2469 856717 : size_t SizeOfObjects() override { return Size(); }
2470 :
2471 : // Return the allocatable capacity of a semispace.
2472 : size_t Capacity() {
2473 : SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2474 444289 : return (to_space_.current_capacity() / Page::kPageSize) *
2475 444289 : Page::kAllocatableMemory;
2476 : }
2477 :
2478 : // Return the current size of a semispace, allocatable and non-allocatable
2479 : // memory.
2480 : size_t TotalCapacity() {
2481 : DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2482 394160 : return to_space_.current_capacity();
2483 : }
2484 :
2485 : // Committed memory for NewSpace is the committed memory of both semi-spaces
2486 : // combined.
2487 1779707 : size_t CommittedMemory() override {
2488 1779707 : return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2489 : }
2490 :
2491 0 : size_t MaximumCommittedMemory() override {
2492 : return from_space_.MaximumCommittedMemory() +
2493 0 : to_space_.MaximumCommittedMemory();
2494 : }
2495 :
2496 : // Approximate amount of physical memory committed for this space.
2497 : size_t CommittedPhysicalMemory() override;
2498 :
2499 : // Return the available bytes without growing.
2500 122566 : size_t Available() override {
2501 : DCHECK_GE(Capacity(), Size());
2502 122566 : return Capacity() - Size();
2503 : }
2504 :
2505 250502 : size_t AllocatedSinceLastGC() {
2506 250502 : const Address age_mark = to_space_.age_mark();
2507 : DCHECK_NOT_NULL(age_mark);
2508 : DCHECK_NOT_NULL(top());
2509 : Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2510 : Page* const last_page = Page::FromAllocationAreaAddress(top());
2511 : Page* current_page = age_mark_page;
2512 : size_t allocated = 0;
2513 250502 : if (current_page != last_page) {
2514 : DCHECK_EQ(current_page, age_mark_page);
2515 : DCHECK_GE(age_mark_page->area_end(), age_mark);
2516 141943 : allocated += age_mark_page->area_end() - age_mark;
2517 : current_page = current_page->next_page();
2518 : } else {
2519 : DCHECK_GE(top(), age_mark);
2520 108559 : return top() - age_mark;
2521 : }
2522 364883 : while (current_page != last_page) {
2523 : DCHECK_NE(current_page, age_mark_page);
2524 80997 : allocated += Page::kAllocatableMemory;
2525 : current_page = current_page->next_page();
2526 : }
2527 : DCHECK_GE(top(), current_page->area_start());
2528 141943 : allocated += top() - current_page->area_start();
2529 : DCHECK_LE(allocated, Size());
2530 141943 : return allocated;
2531 : }
2532 :
2533 : void MovePageFromSpaceToSpace(Page* page) {
2534 : DCHECK(page->InFromSpace());
2535 489 : from_space_.RemovePage(page);
2536 489 : to_space_.PrependPage(page);
2537 : }
2538 :
2539 : bool Rebalance();
2540 :
2541 : // Return the maximum capacity of a semispace.
2542 : size_t MaximumCapacity() {
2543 : DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2544 369731 : return to_space_.maximum_capacity();
2545 : }
2546 :
2547 : bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2548 :
2549 : // Returns the initial capacity of a semispace.
2550 : size_t InitialTotalCapacity() {
2551 : DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2552 24429 : return to_space_.minimum_capacity();
2553 : }
2554 :
2555 : // Return the address of the allocation pointer in the active semispace.
2556 : Address top() {
2557 : DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2558 144477560 : return allocation_info_.top();
2559 : }
2560 :
2561 : // Return the address of the allocation pointer limit in the active semispace.
2562 : Address limit() {
2563 : DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2564 : return allocation_info_.limit();
2565 : }
2566 :
2567 : // Return the address of the first object in the active semispace.
2568 145005 : Address bottom() { return to_space_.space_start(); }
2569 :
2570 : // Get the age mark of the inactive semispace.
2571 138006552 : Address age_mark() { return from_space_.age_mark(); }
2572 : // Set the age mark in the active semispace.
2573 122535 : void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2574 :
2575 : // The allocation top and limit address.
2576 : Address* allocation_top_address() { return allocation_info_.top_address(); }
2577 :
2578 : // The allocation limit address.
2579 : Address* allocation_limit_address() {
2580 : return allocation_info_.limit_address();
2581 : }
2582 :
2583 : MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2584 : int size_in_bytes, AllocationAlignment alignment));
2585 :
2586 : MUST_USE_RESULT INLINE(
2587 : AllocationResult AllocateRawUnaligned(int size_in_bytes));
2588 :
2589 : MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2590 : int size_in_bytes, AllocationAlignment alignment));
2591 :
2592 : MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2593 : int size_in_bytes, AllocationAlignment alignment);
2594 :
2595 : // Reset the allocation pointer to the beginning of the active semispace.
2596 : void ResetAllocationInfo();
2597 :
2598 : // When inline allocation stepping is active, either because of incremental
2599 : // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2600 : // inline allocation every once in a while. This is done by setting
2601 : // allocation_info_.limit to be lower than the actual limit and and increasing
2602 : // it in steps to guarantee that the observers are notified periodically.
2603 : void UpdateInlineAllocationLimit(int size_in_bytes);
2604 :
2605 : void DisableInlineAllocationSteps() {
2606 : top_on_previous_step_ = 0;
2607 : UpdateInlineAllocationLimit(0);
2608 : }
2609 :
2610 : // Allows observation of inline allocation. The observer->Step() method gets
2611 : // called after every step_size bytes have been allocated (approximately).
2612 : // This works by adjusting the allocation limit to a lower value and adjusting
2613 : // it after each step.
2614 : void AddAllocationObserver(AllocationObserver* observer) override;
2615 :
2616 : void RemoveAllocationObserver(AllocationObserver* observer) override;
2617 :
2618 : // Get the extent of the inactive semispace (for use as a marking stack,
2619 : // or to zap it). Notice: space-addresses are not necessarily on the
2620 : // same page, so FromSpaceStart() might be above FromSpaceEnd().
2621 : Address FromSpacePageLow() { return from_space_.page_low(); }
2622 : Address FromSpacePageHigh() { return from_space_.page_high(); }
2623 69189 : Address FromSpaceStart() { return from_space_.space_start(); }
2624 69189 : Address FromSpaceEnd() { return from_space_.space_end(); }
2625 :
2626 : // Get the extent of the active semispace's pages' memory.
2627 69189 : Address ToSpaceStart() { return to_space_.space_start(); }
2628 69189 : Address ToSpaceEnd() { return to_space_.space_end(); }
2629 :
2630 : inline bool ToSpaceContainsSlow(Address a);
2631 : inline bool FromSpaceContainsSlow(Address a);
2632 : inline bool ToSpaceContains(Object* o);
2633 : inline bool FromSpaceContains(Object* o);
2634 :
2635 : // Try to switch the active semispace to a new, empty, page.
2636 : // Returns false if this isn't possible or reasonable (i.e., there
2637 : // are no pages, or the current page is already empty), or true
2638 : // if successful.
2639 : bool AddFreshPage();
2640 : bool AddFreshPageSynchronized();
2641 :
2642 : #ifdef VERIFY_HEAP
2643 : // Verify the active semispace.
2644 : virtual void Verify();
2645 : #endif
2646 :
2647 : #ifdef DEBUG
2648 : // Print the active semispace.
2649 : void Print() override { to_space_.Print(); }
2650 : #endif
2651 :
2652 : // Iterates the active semispace to collect statistics.
2653 : void CollectStatistics();
2654 : // Reports previously collected statistics of the active semispace.
2655 : void ReportStatistics();
2656 : // Clears previously collected statistics.
2657 : void ClearHistograms();
2658 :
2659 : // Record the allocation or promotion of a heap object. Note that we don't
2660 : // record every single allocation, but only those that happen in the
2661 : // to space during a scavenge GC.
2662 : void RecordAllocation(HeapObject* obj);
2663 : void RecordPromotion(HeapObject* obj);
2664 :
2665 : // Return whether the operation succeded.
2666 : bool CommitFromSpaceIfNeeded() {
2667 122535 : if (from_space_.is_committed()) return true;
2668 24086 : return from_space_.Commit();
2669 : }
2670 :
2671 : bool UncommitFromSpace() {
2672 24393 : if (!from_space_.is_committed()) return true;
2673 17212 : return from_space_.Uncommit();
2674 : }
2675 :
2676 0 : bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2677 :
2678 : SemiSpace* active_space() { return &to_space_; }
2679 :
2680 : void PauseAllocationObservers() override;
2681 : void ResumeAllocationObservers() override;
2682 :
2683 5814 : iterator begin() { return to_space_.begin(); }
2684 : iterator end() { return to_space_.end(); }
2685 :
2686 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2687 :
2688 : SemiSpace& from_space() { return from_space_; }
2689 : SemiSpace& to_space() { return to_space_; }
2690 :
2691 : private:
2692 : // Update allocation info to match the current to-space page.
2693 : void UpdateAllocationInfo();
2694 :
2695 : base::Mutex mutex_;
2696 :
2697 : // The semispaces.
2698 : SemiSpace to_space_;
2699 : SemiSpace from_space_;
2700 : base::VirtualMemory reservation_;
2701 :
2702 : // Allocation pointer and limit for normal allocation and allocation during
2703 : // mark-compact collection.
2704 : AllocationInfo allocation_info_;
2705 :
2706 : Address top_on_previous_step_;
2707 :
2708 : HistogramInfo* allocated_histogram_;
2709 : HistogramInfo* promoted_histogram_;
2710 :
2711 : bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2712 :
2713 : // If we are doing inline allocation in steps, this method performs the 'step'
2714 : // operation. top is the memory address of the bump pointer at the last
2715 : // inline allocation (i.e. it determines the numbers of bytes actually
2716 : // allocated since the last step.) new_top is the address of the bump pointer
2717 : // where the next byte is going to be allocated from. top and new_top may be
2718 : // different when we cross a page boundary or reset the space.
2719 : void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2720 : size_t size);
2721 : intptr_t GetNextInlineAllocationStepSize();
2722 : void StartNextInlineAllocationStep();
2723 :
2724 : friend class SemiSpaceIterator;
2725 : };
2726 :
2727 : class PauseAllocationObserversScope {
2728 : public:
2729 : explicit PauseAllocationObserversScope(Heap* heap);
2730 : ~PauseAllocationObserversScope();
2731 :
2732 : private:
2733 : Heap* heap_;
2734 : DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2735 : };
2736 :
2737 : // -----------------------------------------------------------------------------
2738 : // Compaction space that is used temporarily during compaction.
2739 :
2740 59262 : class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2741 : public:
2742 : CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2743 59260 : : PagedSpace(heap, id, executable) {}
2744 :
2745 92835 : bool is_local() override { return true; }
2746 :
2747 : protected:
2748 : // The space is temporary and not included in any snapshots.
2749 0 : bool snapshotable() override { return false; }
2750 :
2751 : MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2752 : int size_in_bytes) override;
2753 : };
2754 :
2755 :
2756 : // A collection of |CompactionSpace|s used by a single compaction task.
2757 : class CompactionSpaceCollection : public Malloced {
2758 : public:
2759 59260 : explicit CompactionSpaceCollection(Heap* heap)
2760 : : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2761 59260 : code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2762 :
2763 12730966 : CompactionSpace* Get(AllocationSpace space) {
2764 12730966 : switch (space) {
2765 : case OLD_SPACE:
2766 12655077 : return &old_space_;
2767 : case CODE_SPACE:
2768 75889 : return &code_space_;
2769 : default:
2770 0 : UNREACHABLE();
2771 : }
2772 : UNREACHABLE();
2773 : return nullptr;
2774 : }
2775 :
2776 : private:
2777 : CompactionSpace old_space_;
2778 : CompactionSpace code_space_;
2779 : };
2780 :
2781 :
2782 : // -----------------------------------------------------------------------------
2783 : // Old object space (includes the old space of objects and code space)
2784 :
2785 127440 : class OldSpace : public PagedSpace {
2786 : public:
2787 : // Creates an old space object. The constructor does not allocate pages
2788 : // from OS.
2789 : OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2790 121564 : : PagedSpace(heap, id, executable) {}
2791 : };
2792 :
2793 :
2794 : // For contiguous spaces, top should be in the space (or at the end) and limit
2795 : // should be the end of the space.
2796 : #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2797 : SLOW_DCHECK((space).page_low() <= (info).top() && \
2798 : (info).top() <= (space).page_high() && \
2799 : (info).limit() <= (space).page_high())
2800 :
2801 :
2802 : // -----------------------------------------------------------------------------
2803 : // Old space for all map objects
2804 :
2805 118570 : class MapSpace : public PagedSpace {
2806 : public:
2807 : // Creates a map space object.
2808 : MapSpace(Heap* heap, AllocationSpace id)
2809 60782 : : PagedSpace(heap, id, NOT_EXECUTABLE) {}
2810 :
2811 521 : int RoundSizeDownToObjectAlignment(int size) override {
2812 : if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2813 : return RoundDown(size, Map::kSize);
2814 : } else {
2815 521 : return (size / Map::kSize) * Map::kSize;
2816 : }
2817 : }
2818 :
2819 : #ifdef VERIFY_HEAP
2820 : void VerifyObject(HeapObject* obj) override;
2821 : #endif
2822 : };
2823 :
2824 :
2825 : // -----------------------------------------------------------------------------
2826 : // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2827 : // managed by the large object space. A large object is allocated from OS
2828 : // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2829 : // A large object always starts at Page::kObjectStartOffset to a page.
2830 : // Large objects do not move during garbage collections.
2831 :
2832 : class LargeObjectSpace : public Space {
2833 : public:
2834 : typedef LargePageIterator iterator;
2835 :
2836 : LargeObjectSpace(Heap* heap, AllocationSpace id);
2837 : virtual ~LargeObjectSpace();
2838 :
2839 : // Initializes internal data structures.
2840 : bool SetUp();
2841 :
2842 : // Releases internal resources, frees objects in this space.
2843 : void TearDown();
2844 :
2845 : static size_t ObjectSizeFor(size_t chunk_size) {
2846 142131 : if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2847 140927 : return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2848 : }
2849 :
2850 : // Shared implementation of AllocateRaw, AllocateRawCode and
2851 : // AllocateRawFixedArray.
2852 : MUST_USE_RESULT AllocationResult
2853 : AllocateRaw(int object_size, Executability executable);
2854 :
2855 : // Available bytes for objects in this space.
2856 : inline size_t Available() override;
2857 :
2858 1641349 : size_t Size() override { return size_; }
2859 :
2860 4637222 : size_t SizeOfObjects() override { return objects_size_; }
2861 :
2862 : // Approximate amount of physical memory committed for this space.
2863 : size_t CommittedPhysicalMemory() override;
2864 :
2865 : int PageCount() { return page_count_; }
2866 :
2867 : // Finds an object for a given address, returns a Smi if it is not found.
2868 : // The function iterates through all objects in this space, may be slow.
2869 : Object* FindObject(Address a);
2870 :
2871 : // Takes the chunk_map_mutex_ and calls FindPage after that.
2872 : LargePage* FindPageThreadSafe(Address a);
2873 :
2874 : // Finds a large object page containing the given address, returns NULL
2875 : // if such a page doesn't exist.
2876 : LargePage* FindPage(Address a);
2877 :
2878 : // Clears the marking state of live objects.
2879 : void ClearMarkingStateOfLiveObjects();
2880 :
2881 : // Frees unmarked objects.
2882 : void FreeUnmarkedObjects();
2883 :
2884 : void InsertChunkMapEntries(LargePage* page);
2885 : void RemoveChunkMapEntries(LargePage* page);
2886 : void RemoveChunkMapEntries(LargePage* page, Address free_start);
2887 :
2888 : // Checks whether a heap object is in this space; O(1).
2889 : bool Contains(HeapObject* obj);
2890 : // Checks whether an address is in the object area in this space. Iterates
2891 : // all objects in the space. May be slow.
2892 0 : bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
2893 :
2894 : // Checks whether the space is empty.
2895 : bool IsEmpty() { return first_page_ == NULL; }
2896 :
2897 121 : void AdjustLiveBytes(int by) { objects_size_ += by; }
2898 :
2899 : LargePage* first_page() { return first_page_; }
2900 :
2901 : // Collect code statistics.
2902 : void CollectCodeStatistics();
2903 :
2904 : iterator begin() { return iterator(first_page_); }
2905 : iterator end() { return iterator(nullptr); }
2906 :
2907 : std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2908 :
2909 : #ifdef VERIFY_HEAP
2910 : virtual void Verify();
2911 : #endif
2912 :
2913 : #ifdef DEBUG
2914 : void Print() override;
2915 : void ReportStatistics();
2916 : #endif
2917 :
2918 : private:
2919 : // The head of the linked list of large object chunks.
2920 : LargePage* first_page_;
2921 : size_t size_; // allocated bytes
2922 : int page_count_; // number of chunks
2923 : size_t objects_size_; // size of objects
2924 : // The chunk_map_mutex_ has to be used when the chunk map is accessed
2925 : // concurrently.
2926 : base::Mutex chunk_map_mutex_;
2927 : // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2928 : base::HashMap chunk_map_;
2929 :
2930 : friend class LargeObjectIterator;
2931 : };
2932 :
2933 :
2934 49138 : class LargeObjectIterator : public ObjectIterator {
2935 : public:
2936 : explicit LargeObjectIterator(LargeObjectSpace* space);
2937 :
2938 : HeapObject* Next() override;
2939 :
2940 : private:
2941 : LargePage* current_;
2942 : };
2943 :
2944 : // Iterates over the chunks (pages and large object pages) that can contain
2945 : // pointers to new space or to evacuation candidates.
2946 : class MemoryChunkIterator BASE_EMBEDDED {
2947 : public:
2948 : inline explicit MemoryChunkIterator(Heap* heap);
2949 :
2950 : // Return NULL when the iterator is done.
2951 : inline MemoryChunk* next();
2952 :
2953 : private:
2954 : enum State {
2955 : kOldSpaceState,
2956 : kMapState,
2957 : kCodeState,
2958 : kLargeObjectState,
2959 : kFinishedState
2960 : };
2961 : Heap* heap_;
2962 : State state_;
2963 : PageIterator old_iterator_;
2964 : PageIterator code_iterator_;
2965 : PageIterator map_iterator_;
2966 : LargePageIterator lo_iterator_;
2967 : };
2968 :
2969 : } // namespace internal
2970 : } // namespace v8
2971 :
2972 : #endif // V8_HEAP_SPACES_H_
|