Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SLOT_SET_H_
6 : #define V8_HEAP_SLOT_SET_H_
7 :
8 : #include <map>
9 : #include <stack>
10 :
11 : #include "src/allocation.h"
12 : #include "src/base/atomic-utils.h"
13 : #include "src/base/bits.h"
14 : #include "src/objects/slots.h"
15 : #include "src/utils.h"
16 :
17 : #ifdef V8_COMPRESS_POINTERS
18 : #include "src/ptr-compr.h"
19 : #endif
20 :
21 : namespace v8 {
22 : namespace internal {
23 :
24 : enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
25 :
26 : // Data structure for maintaining a set of slots in a standard (non-large)
27 : // page. The base address of the page must be set with SetPageStart before any
28 : // operation.
29 : // The data structure assumes that the slots are pointer size aligned and
30 : // splits the valid slot offset range into kBuckets buckets.
31 : // Each bucket is a bitmap with a bit corresponding to a single slot offset.
32 : class SlotSet : public Malloced {
33 : public:
34 : enum EmptyBucketMode {
35 : FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
36 : PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
37 : // set, but deallocated on demand by a sweeper
38 : // thread.
39 : KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
40 : };
41 :
42 211299 : SlotSet() {
43 6866423 : for (int i = 0; i < kBuckets; i++) {
44 6760773 : StoreBucket(&buckets_[i], nullptr);
45 : }
46 105650 : }
47 :
48 211269 : ~SlotSet() {
49 6865277 : for (int i = 0; i < kBuckets; i++) {
50 6759639 : ReleaseBucket(i);
51 : }
52 105638 : FreeToBeFreedBuckets();
53 105647 : }
54 :
55 105648 : void SetPageStart(Address page_start) { page_start_ = page_start; }
56 :
57 : // The slot offset specifies a slot at address page_start_ + slot_offset.
58 : // This method should only be called on the main thread because concurrent
59 : // allocation of the bucket is not thread-safe.
60 : //
61 : // AccessMode defines whether there can be concurrent access on the buckets
62 : // or not.
63 : template <AccessMode access_mode = AccessMode::ATOMIC>
64 197367777 : void Insert(int slot_offset) {
65 : int bucket_index, cell_index, bit_index;
66 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
67 197367777 : Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
68 197367777 : if (bucket == nullptr) {
69 1352903 : bucket = AllocateBucket();
70 1201291 : if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
71 : DeleteArray<uint32_t>(bucket);
72 : bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
73 : }
74 : }
75 : // Check that monotonicity is preserved, i.e., once a bucket is set we do
76 : // not free it concurrently.
77 : DCHECK_NOT_NULL(bucket);
78 : DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
79 197367785 : uint32_t mask = 1u << bit_index;
80 378586850 : if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
81 : SetCellBits<access_mode>(&bucket[cell_index], mask);
82 : }
83 197416192 : }
84 :
85 : // The slot offset specifies a slot at address page_start_ + slot_offset.
86 : // Returns true if the set contains the slot.
87 : bool Contains(int slot_offset) {
88 : int bucket_index, cell_index, bit_index;
89 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
90 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
91 : if (bucket == nullptr) return false;
92 : return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
93 : }
94 :
95 : // The slot offset specifies a slot at address page_start_ + slot_offset.
96 44222 : void Remove(int slot_offset) {
97 : int bucket_index, cell_index, bit_index;
98 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
99 44222 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
100 44222 : if (bucket != nullptr) {
101 43770 : uint32_t cell = LoadCell(&bucket[cell_index]);
102 43770 : uint32_t bit_mask = 1u << bit_index;
103 43770 : if (cell & bit_mask) {
104 : ClearCellBits(&bucket[cell_index], bit_mask);
105 : }
106 : }
107 44222 : }
108 :
109 : // The slot offsets specify a range of slots at addresses:
110 : // [page_start_ + start_offset ... page_start_ + end_offset).
111 21573664 : void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
112 21573664 : CHECK_LE(end_offset, 1 << kPageSizeBits);
113 : DCHECK_LE(start_offset, end_offset);
114 : int start_bucket, start_cell, start_bit;
115 : SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
116 : int end_bucket, end_cell, end_bit;
117 : SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
118 21573664 : uint32_t start_mask = (1u << start_bit) - 1;
119 21573664 : uint32_t end_mask = ~((1u << end_bit) - 1);
120 : Bucket bucket;
121 21573664 : if (start_bucket == end_bucket && start_cell == end_cell) {
122 12685359 : bucket = LoadBucket(&buckets_[start_bucket]);
123 12685359 : if (bucket != nullptr) {
124 1033434 : ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
125 : }
126 : return;
127 : }
128 : int current_bucket = start_bucket;
129 : int current_cell = start_cell;
130 8888305 : bucket = LoadBucket(&buckets_[current_bucket]);
131 8888305 : if (bucket != nullptr) {
132 1109927 : ClearCellBits(&bucket[current_cell], ~start_mask);
133 : }
134 8963196 : current_cell++;
135 8963196 : if (current_bucket < end_bucket) {
136 879722 : if (bucket != nullptr) {
137 150843 : ClearBucket(bucket, current_cell, kCellsPerBucket);
138 : }
139 : // The rest of the current bucket is cleared.
140 : // Move on to the next bucket.
141 879734 : current_bucket++;
142 : current_cell = 0;
143 : }
144 : DCHECK(current_bucket == end_bucket ||
145 : (current_bucket < end_bucket && current_cell == 0));
146 19615642 : while (current_bucket < end_bucket) {
147 10652388 : if (mode == PREFREE_EMPTY_BUCKETS) {
148 777 : PreFreeEmptyBucket(current_bucket);
149 10651637 : } else if (mode == FREE_EMPTY_BUCKETS) {
150 1392 : ReleaseBucket(current_bucket);
151 : } else {
152 : DCHECK(mode == KEEP_EMPTY_BUCKETS);
153 10650245 : bucket = LoadBucket(&buckets_[current_bucket]);
154 10650245 : if (bucket != nullptr) {
155 22426 : ClearBucket(bucket, 0, kCellsPerBucket);
156 : }
157 : }
158 10652434 : current_bucket++;
159 : }
160 : // All buckets between start_bucket and end_bucket are cleared.
161 8963254 : bucket = LoadBucket(&buckets_[current_bucket]);
162 : DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
163 8963254 : if (current_bucket == kBuckets || bucket == nullptr) {
164 : return;
165 : }
166 2181656 : while (current_cell < end_cell) {
167 1150705 : StoreCell(&bucket[current_cell], 0);
168 1150705 : current_cell++;
169 : }
170 : // All cells between start_cell and end_cell are cleared.
171 : DCHECK(current_bucket == end_bucket && current_cell == end_cell);
172 1030951 : ClearCellBits(&bucket[end_cell], ~end_mask);
173 : }
174 :
175 : // The slot offset specifies a slot at address page_start_ + slot_offset.
176 2808888 : bool Lookup(int slot_offset) {
177 : int bucket_index, cell_index, bit_index;
178 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
179 2808888 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
180 2808888 : if (bucket == nullptr) return false;
181 4827248 : return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
182 : }
183 :
184 : // Iterate over all slots in the set and for each slot invoke the callback.
185 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
186 : // Returns the new number of slots.
187 : // This method should only be called on the main thread.
188 : //
189 : // Sample usage:
190 : // Iterate([](MaybeObjectSlot slot) {
191 : // if (good(slot)) return KEEP_SLOT;
192 : // else return REMOVE_SLOT;
193 : // });
194 : template <typename Callback>
195 460571 : int Iterate(Callback callback, EmptyBucketMode mode) {
196 : int new_count = 0;
197 30412826 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
198 29942689 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
199 29942689 : if (bucket != nullptr) {
200 : int in_bucket_count = 0;
201 1618311 : int cell_offset = bucket_index * kBitsPerBucket;
202 53380471 : for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
203 51752338 : uint32_t cell = LoadCell(&bucket[i]);
204 51752338 : if (cell) {
205 : uint32_t old_cell = cell;
206 : uint32_t mask = 0;
207 152242090 : while (cell) {
208 129902552 : int bit_offset = base::bits::CountTrailingZeros(cell);
209 129902552 : uint32_t bit_mask = 1u << bit_offset;
210 129902552 : uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
211 259565880 : if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
212 38928221 : ++in_bucket_count;
213 : } else {
214 90671516 : mask |= bit_mask;
215 : }
216 129599737 : cell ^= bit_mask;
217 : }
218 22339538 : uint32_t new_cell = old_cell & ~mask;
219 22339538 : if (old_cell != new_cell) {
220 : ClearCellBits(&bucket[i], mask);
221 : }
222 : }
223 : }
224 1628197 : if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
225 688430 : PreFreeEmptyBucket(bucket_index);
226 : }
227 1627878 : new_count += in_bucket_count;
228 : }
229 : }
230 470138 : return new_count;
231 : }
232 :
233 10 : int NumberOfPreFreedEmptyBuckets() {
234 10 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
235 20 : return static_cast<int>(to_be_freed_buckets_.size());
236 : }
237 :
238 482 : void PreFreeEmptyBuckets() {
239 31330 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
240 30848 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
241 30848 : if (bucket != nullptr) {
242 670 : if (IsEmptyBucket(bucket)) {
243 666 : PreFreeEmptyBucket(bucket_index);
244 : }
245 : }
246 : }
247 482 : }
248 :
249 113459 : void FreeEmptyBuckets() {
250 7374835 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
251 7261376 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
252 7261376 : if (bucket != nullptr) {
253 507302 : if (IsEmptyBucket(bucket)) {
254 201789 : ReleaseBucket(bucket_index);
255 : }
256 : }
257 : }
258 113459 : }
259 :
260 550145 : void FreeToBeFreedBuckets() {
261 550145 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
262 1239768 : while (!to_be_freed_buckets_.empty()) {
263 689595 : Bucket top = to_be_freed_buckets_.top();
264 : to_be_freed_buckets_.pop();
265 : DeleteArray<uint32_t>(top);
266 : }
267 : DCHECK_EQ(0u, to_be_freed_buckets_.size());
268 550173 : }
269 :
270 : private:
271 : typedef uint32_t* Bucket;
272 : static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
273 : static const int kCellsPerBucket = 32;
274 : static const int kCellsPerBucketLog2 = 5;
275 : static const int kBitsPerCell = 32;
276 : static const int kBitsPerCellLog2 = 5;
277 : static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
278 : static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
279 : static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
280 :
281 1352903 : Bucket AllocateBucket() {
282 1352903 : Bucket result = NewArray<uint32_t>(kCellsPerBucket);
283 43292583 : for (int i = 0; i < kCellsPerBucket; i++) {
284 43292583 : result[i] = 0;
285 : }
286 1352919 : return result;
287 : }
288 :
289 173271 : void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
290 : DCHECK_GE(start_cell, 0);
291 : DCHECK_LE(end_cell, kCellsPerBucket);
292 : int current_cell = start_cell;
293 2235677 : while (current_cell < kCellsPerBucket) {
294 2062406 : StoreCell(&bucket[current_cell], 0);
295 2062406 : current_cell++;
296 : }
297 173271 : }
298 :
299 689747 : void PreFreeEmptyBucket(int bucket_index) {
300 1379494 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
301 689747 : if (bucket != nullptr) {
302 689294 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
303 : to_be_freed_buckets_.push(bucket);
304 : StoreBucket(&buckets_[bucket_index], nullptr);
305 : }
306 689785 : }
307 :
308 6962844 : void ReleaseBucket(int bucket_index) {
309 6962844 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
310 : StoreBucket(&buckets_[bucket_index], nullptr);
311 : DeleteArray<uint32_t>(bucket);
312 6962844 : }
313 :
314 : template <AccessMode access_mode = AccessMode::ATOMIC>
315 : Bucket LoadBucket(Bucket* bucket) {
316 : if (access_mode == AccessMode::ATOMIC)
317 : return base::AsAtomicPointer::Acquire_Load(bucket);
318 : return *bucket;
319 : }
320 :
321 : template <AccessMode access_mode = AccessMode::ATOMIC>
322 : void StoreBucket(Bucket* bucket, Bucket value) {
323 : if (access_mode == AccessMode::ATOMIC) {
324 : base::AsAtomicPointer::Release_Store(bucket, value);
325 : } else {
326 : *bucket = value;
327 : }
328 : }
329 :
330 507972 : bool IsEmptyBucket(Bucket bucket) {
331 8094333 : for (int i = 0; i < kCellsPerBucket; i++) {
332 16799700 : if (LoadCell(&bucket[i])) {
333 : return false;
334 : }
335 : }
336 : return true;
337 : }
338 :
339 : template <AccessMode access_mode = AccessMode::ATOMIC>
340 1201290 : bool SwapInNewBucket(Bucket* bucket, Bucket value) {
341 : if (access_mode == AccessMode::ATOMIC) {
342 : return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
343 1201290 : value) == nullptr;
344 : } else {
345 : DCHECK_NULL(*bucket);
346 151618 : *bucket = value;
347 : return true;
348 : }
349 : }
350 :
351 : template <AccessMode access_mode = AccessMode::ATOMIC>
352 : uint32_t LoadCell(uint32_t* cell) {
353 : if (access_mode == AccessMode::ATOMIC)
354 : return base::AsAtomic32::Acquire_Load(cell);
355 : return *cell;
356 : }
357 :
358 : void StoreCell(uint32_t* cell, uint32_t value) {
359 : base::AsAtomic32::Release_Store(cell, value);
360 : }
361 :
362 : void ClearCellBits(uint32_t* cell, uint32_t mask) {
363 18011278 : base::AsAtomic32::SetBits(cell, 0u, mask);
364 : }
365 :
366 : template <AccessMode access_mode = AccessMode::ATOMIC>
367 : void SetCellBits(uint32_t* cell, uint32_t mask) {
368 : if (access_mode == AccessMode::ATOMIC) {
369 103805367 : base::AsAtomic32::SetBits(cell, mask, mask);
370 : } else {
371 16149799 : *cell = (*cell & ~mask) | mask;
372 : }
373 : }
374 :
375 : // Converts the slot offset into bucket/cell/bit index.
376 : void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
377 : int* bit_index) {
378 : DCHECK(IsAligned(slot_offset, kTaggedSize));
379 243368215 : int slot = slot_offset >> kTaggedSizeLog2;
380 : DCHECK(slot >= 0 && slot <= kMaxSlots);
381 243368215 : *bucket_index = slot >> kBitsPerBucketLog2;
382 243368215 : *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
383 243368215 : *bit_index = slot & (kBitsPerCell - 1);
384 : }
385 :
386 : Bucket buckets_[kBuckets];
387 : Address page_start_;
388 : base::Mutex to_be_freed_buckets_mutex_;
389 : std::stack<uint32_t*> to_be_freed_buckets_;
390 : };
391 :
392 : enum SlotType {
393 : EMBEDDED_OBJECT_SLOT,
394 : OBJECT_SLOT,
395 : CODE_TARGET_SLOT,
396 : CODE_ENTRY_SLOT,
397 : CLEARED_SLOT
398 : };
399 :
400 : // Data structure for maintaining a list of typed slots in a page.
401 : // Typed slots can only appear in Code and JSFunction objects, so
402 : // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
403 : // The implementation is a chain of chunks, where each chunks is an array of
404 : // encoded (slot type, slot offset) pairs.
405 : // There is no duplicate detection and we do not expect many duplicates because
406 : // typed slots contain V8 internal pointers that are not directly exposed to JS.
407 : class V8_EXPORT_PRIVATE TypedSlots {
408 : public:
409 : static const int kMaxOffset = 1 << 29;
410 11285 : TypedSlots() = default;
411 : virtual ~TypedSlots();
412 : void Insert(SlotType type, uint32_t offset);
413 : void Merge(TypedSlots* other);
414 :
415 : protected:
416 : class OffsetField : public BitField<int, 0, 29> {};
417 : class TypeField : public BitField<SlotType, 29, 3> {};
418 : struct TypedSlot {
419 : uint32_t type_and_offset;
420 : };
421 : struct Chunk {
422 : Chunk* next;
423 : TypedSlot* buffer;
424 : int32_t capacity;
425 : int32_t count;
426 : };
427 : static const int kInitialBufferSize = 100;
428 : static const int kMaxBufferSize = 16 * KB;
429 : static int NextCapacity(int capacity) {
430 1759 : return Min(kMaxBufferSize, capacity * 2);
431 : }
432 : Chunk* EnsureChunk();
433 : Chunk* NewChunk(Chunk* next, int capacity);
434 : Chunk* head_ = nullptr;
435 : Chunk* tail_ = nullptr;
436 : };
437 :
438 : // A multiset of per-page typed slots that allows concurrent iteration
439 : // clearing of invalid slots.
440 : class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
441 : public:
442 : // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
443 : // during the iteration are queued in to_be_freed_chunks_, which are
444 : // then freed in FreeToBeFreedChunks.
445 : enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
446 :
447 31383 : explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
448 :
449 : ~TypedSlotSet() override;
450 :
451 : // Iterate over all slots in the set and for each slot invoke the callback.
452 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
453 : // Returns the new number of slots.
454 : //
455 : // Sample usage:
456 : // Iterate([](SlotType slot_type, Address slot_address) {
457 : // if (good(slot_type, slot_address)) return KEEP_SLOT;
458 : // else return REMOVE_SLOT;
459 : // });
460 : // This can run concurrently to ClearInvalidSlots().
461 : template <typename Callback>
462 5168 : int Iterate(Callback callback, IterationMode mode) {
463 : STATIC_ASSERT(CLEARED_SLOT < 8);
464 5168 : Chunk* chunk = head_;
465 : Chunk* previous = nullptr;
466 : int new_count = 0;
467 17201 : while (chunk != nullptr) {
468 6866 : TypedSlot* buffer = chunk->buffer;
469 6866 : int count = chunk->count;
470 : bool empty = true;
471 493326 : for (int i = 0; i < count; i++) {
472 486460 : TypedSlot slot = LoadTypedSlot(buffer + i);
473 : SlotType type = TypeField::decode(slot.type_and_offset);
474 486460 : if (type != CLEARED_SLOT) {
475 470312 : uint32_t offset = OffsetField::decode(slot.type_and_offset);
476 470312 : Address addr = page_start_ + offset;
477 470312 : if (callback(type, addr) == KEEP_SLOT) {
478 163178 : new_count++;
479 : empty = false;
480 : } else {
481 : ClearTypedSlot(buffer + i);
482 : }
483 : }
484 : }
485 6866 : Chunk* next = chunk->next;
486 6866 : if (mode == PREFREE_EMPTY_CHUNKS && empty) {
487 : // We remove the chunk from the list but let it still point its next
488 : // chunk to allow concurrent iteration.
489 0 : if (previous) {
490 : StoreNext(previous, next);
491 : } else {
492 : StoreHead(next);
493 : }
494 0 : base::MutexGuard guard(&to_be_freed_chunks_mutex_);
495 0 : to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
496 : } else {
497 : previous = chunk;
498 : }
499 : chunk = next;
500 : }
501 5168 : return new_count;
502 : }
503 :
504 : // Clears all slots that have the offset in the specified ranges.
505 : // This can run concurrently to Iterate().
506 : void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
507 :
508 : // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
509 : void FreeToBeFreedChunks();
510 :
511 : private:
512 : // Atomic operations used by Iterate and ClearInvalidSlots;
513 : Chunk* LoadNext(Chunk* chunk) {
514 1378 : return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
515 : }
516 : void StoreNext(Chunk* chunk, Chunk* next) {
517 0 : return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
518 : }
519 1258 : Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
520 : void StoreHead(Chunk* chunk) {
521 0 : base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
522 : }
523 : TypedSlot LoadTypedSlot(TypedSlot* slot) {
524 525888 : return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
525 : }
526 : void ClearTypedSlot(TypedSlot* slot) {
527 : // Order is important here and should match that of LoadTypedSlot.
528 : base::AsAtomic32::Relaxed_Store(
529 : &slot->type_and_offset,
530 : TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
531 : }
532 :
533 : Address page_start_;
534 : base::Mutex to_be_freed_chunks_mutex_;
535 : std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
536 : };
537 :
538 : } // namespace internal
539 : } // namespace v8
540 :
541 : #endif // V8_HEAP_SLOT_SET_H_
|