Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_SLOT_SET_H_
6 : #define V8_HEAP_SLOT_SET_H_
7 :
8 : #include <map>
9 : #include <stack>
10 :
11 : #include "src/allocation.h"
12 : #include "src/base/atomic-utils.h"
13 : #include "src/base/bits.h"
14 : #include "src/objects/compressed-slots.h"
15 : #include "src/objects/slots.h"
16 : #include "src/utils.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
22 :
23 : // Data structure for maintaining a set of slots in a standard (non-large)
24 : // page. The base address of the page must be set with SetPageStart before any
25 : // operation.
26 : // The data structure assumes that the slots are pointer size aligned and
27 : // splits the valid slot offset range into kBuckets buckets.
28 : // Each bucket is a bitmap with a bit corresponding to a single slot offset.
29 : class SlotSet : public Malloced {
30 : public:
31 : enum EmptyBucketMode {
32 : FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
33 : PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
34 : // set, but deallocated on demand by a sweeper
35 : // thread.
36 : KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
37 : };
38 :
39 222849 : SlotSet() {
40 7242025 : for (int i = 0; i < kBuckets; i++) {
41 3565300 : StoreBucket(&buckets_[i], nullptr);
42 : }
43 111425 : }
44 :
45 222834 : ~SlotSet() {
46 7241093 : for (int i = 0; i < kBuckets; i++) {
47 3564835 : ReleaseBucket(i);
48 : }
49 111416 : FreeToBeFreedBuckets();
50 111424 : }
51 :
52 111426 : void SetPageStart(Address page_start) { page_start_ = page_start; }
53 :
54 : // The slot offset specifies a slot at address page_start_ + slot_offset.
55 : // This method should only be called on the main thread because concurrent
56 : // allocation of the bucket is not thread-safe.
57 : //
58 : // AccessMode defines whether there can be concurrent access on the buckets
59 : // or not.
60 : template <AccessMode access_mode = AccessMode::ATOMIC>
61 185745933 : void Insert(int slot_offset) {
62 : int bucket_index, cell_index, bit_index;
63 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
64 169535575 : Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
65 185745933 : if (bucket == nullptr) {
66 : bucket = AllocateBucket();
67 1309101 : if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
68 : DeleteArray<uint32_t>(bucket);
69 : bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
70 : }
71 : }
72 : // Check that monotonicity is preserved, i.e., once a bucket is set we do
73 : // not free it concurrently.
74 : DCHECK_NOT_NULL(bucket);
75 : DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
76 185745960 : uint32_t mask = 1u << bit_index;
77 355281552 : if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
78 : SetCellBits<access_mode>(&bucket[cell_index], mask);
79 : }
80 185816497 : }
81 :
82 : // The slot offset specifies a slot at address page_start_ + slot_offset.
83 : // Returns true if the set contains the slot.
84 : bool Contains(int slot_offset) {
85 : int bucket_index, cell_index, bit_index;
86 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
87 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
88 : if (bucket == nullptr) return false;
89 : return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
90 : }
91 :
92 : // The slot offset specifies a slot at address page_start_ + slot_offset.
93 22216 : void Remove(int slot_offset) {
94 : int bucket_index, cell_index, bit_index;
95 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
96 22216 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
97 22216 : if (bucket != nullptr) {
98 21899 : uint32_t cell = LoadCell(&bucket[cell_index]);
99 21899 : uint32_t bit_mask = 1u << bit_index;
100 21899 : if (cell & bit_mask) {
101 : ClearCellBits(&bucket[cell_index], bit_mask);
102 : }
103 : }
104 22216 : }
105 :
106 : // The slot offsets specify a range of slots at addresses:
107 : // [page_start_ + start_offset ... page_start_ + end_offset).
108 16585141 : void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
109 16585141 : CHECK_LE(end_offset, 1 << kPageSizeBits);
110 : DCHECK_LE(start_offset, end_offset);
111 : int start_bucket, start_cell, start_bit;
112 : SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
113 : int end_bucket, end_cell, end_bit;
114 : SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
115 16585141 : uint32_t start_mask = (1u << start_bit) - 1;
116 16585141 : uint32_t end_mask = ~((1u << end_bit) - 1);
117 : Bucket bucket;
118 16585141 : if (start_bucket == end_bucket && start_cell == end_cell) {
119 9254094 : bucket = LoadBucket(&buckets_[start_bucket]);
120 9254094 : if (bucket != nullptr) {
121 1104474 : ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
122 : }
123 : return;
124 : }
125 : int current_bucket = start_bucket;
126 : int current_cell = start_cell;
127 7331047 : bucket = LoadBucket(&buckets_[current_bucket]);
128 7331047 : if (bucket != nullptr) {
129 1231339 : ClearCellBits(&bucket[current_cell], ~start_mask);
130 : }
131 7431916 : current_cell++;
132 7431916 : if (current_bucket < end_bucket) {
133 873196 : if (bucket != nullptr) {
134 : ClearBucket(bucket, current_cell, kCellsPerBucket);
135 : }
136 : // The rest of the current bucket is cleared.
137 : // Move on to the next bucket.
138 873196 : current_bucket++;
139 : current_cell = 0;
140 : }
141 : DCHECK(current_bucket == end_bucket ||
142 : (current_bucket < end_bucket && current_cell == 0));
143 15374714 : while (current_bucket < end_bucket) {
144 3971389 : if (mode == PREFREE_EMPTY_BUCKETS) {
145 205 : PreFreeEmptyBucket(current_bucket);
146 3971184 : } else if (mode == FREE_EMPTY_BUCKETS) {
147 696 : ReleaseBucket(current_bucket);
148 : } else {
149 : DCHECK(mode == KEEP_EMPTY_BUCKETS);
150 3970488 : bucket = LoadBucket(&buckets_[current_bucket]);
151 3970488 : if (bucket != nullptr) {
152 : ClearBucket(bucket, 0, kCellsPerBucket);
153 : }
154 : }
155 3971399 : current_bucket++;
156 : }
157 : // All buckets between start_bucket and end_bucket are cleared.
158 7431926 : bucket = LoadBucket(&buckets_[current_bucket]);
159 : DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
160 7431926 : if (current_bucket == kBuckets || bucket == nullptr) {
161 : return;
162 : }
163 3839477 : while (current_cell < end_cell) {
164 1335585 : StoreCell(&bucket[current_cell], 0);
165 1335585 : current_cell++;
166 : }
167 : // All cells between start_cell and end_cell are cleared.
168 : DCHECK(current_bucket == end_bucket && current_cell == end_cell);
169 1168307 : ClearCellBits(&bucket[end_cell], ~end_mask);
170 : }
171 :
172 : // The slot offset specifies a slot at address page_start_ + slot_offset.
173 2513976 : bool Lookup(int slot_offset) {
174 : int bucket_index, cell_index, bit_index;
175 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
176 2513976 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
177 2513976 : if (bucket == nullptr) return false;
178 4499568 : return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
179 : }
180 :
181 : // Iterate over all slots in the set and for each slot invoke the callback.
182 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
183 : // Returns the new number of slots.
184 : // This method should only be called on the main thread.
185 : //
186 : // Sample usage:
187 : // Iterate([](MaybeObjectSlot slot) {
188 : // if (good(slot)) return KEEP_SLOT;
189 : // else return REMOVE_SLOT;
190 : // });
191 : template <typename Callback>
192 474504 : int Iterate(Callback callback, EmptyBucketMode mode) {
193 : int new_count = 0;
194 32956548 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
195 16205983 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
196 16205983 : if (bucket != nullptr) {
197 : int in_bucket_count = 0;
198 1610181 : int cell_offset = bucket_index * kBitsPerBucket;
199 104154249 : for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
200 51240388 : uint32_t cell = LoadCell(&bucket[i]);
201 51240388 : if (cell) {
202 : uint32_t old_cell = cell;
203 : uint32_t mask = 0;
204 232942977 : while (cell) {
205 106346404 : int bit_offset = base::bits::CountTrailingZeros(cell);
206 106346404 : uint32_t bit_mask = 1u << bit_offset;
207 106346404 : uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
208 212310353 : if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
209 30628087 : ++in_bucket_count;
210 : } else {
211 75273860 : mask |= bit_mask;
212 : }
213 105901947 : cell ^= bit_mask;
214 : }
215 20699308 : uint32_t new_cell = old_cell & ~mask;
216 20699308 : if (old_cell != new_cell) {
217 : ClearCellBits(&bucket[i], mask);
218 : }
219 : }
220 : }
221 1641827 : if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
222 571552 : PreFreeEmptyBucket(bucket_index);
223 : }
224 1645220 : new_count += in_bucket_count;
225 : }
226 : }
227 509543 : return new_count;
228 : }
229 :
230 10 : int NumberOfPreFreedEmptyBuckets() {
231 10 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
232 20 : return static_cast<int>(to_be_freed_buckets_.size());
233 : }
234 :
235 777 : void PreFreeEmptyBuckets() {
236 50505 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
237 24864 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
238 24864 : if (bucket != nullptr) {
239 181 : if (IsEmptyBucket(bucket)) {
240 178 : PreFreeEmptyBucket(bucket_index);
241 : }
242 : }
243 : }
244 777 : }
245 :
246 190547 : void FreeEmptyBuckets() {
247 12385555 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
248 6097504 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
249 6097504 : if (bucket != nullptr) {
250 621306 : if (IsEmptyBucket(bucket)) {
251 240422 : ReleaseBucket(bucket_index);
252 : }
253 : }
254 : }
255 190547 : }
256 :
257 601945 : void FreeToBeFreedBuckets() {
258 601945 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
259 1177036 : while (!to_be_freed_buckets_.empty()) {
260 575032 : Bucket top = to_be_freed_buckets_.top();
261 : to_be_freed_buckets_.pop();
262 : DeleteArray<uint32_t>(top);
263 : }
264 : DCHECK_EQ(0u, to_be_freed_buckets_.size());
265 601998 : }
266 :
267 : private:
268 : using Bucket = uint32_t*;
269 : static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
270 : static const int kCellsPerBucket = 32;
271 : static const int kCellsPerBucketLog2 = 5;
272 : static const int kBitsPerCell = 32;
273 : static const int kBitsPerCellLog2 = 5;
274 : static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
275 : static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
276 : static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
277 :
278 : Bucket AllocateBucket() {
279 1409911 : Bucket result = NewArray<uint32_t>(kCellsPerBucket);
280 91645458 : for (int i = 0; i < kCellsPerBucket; i++) {
281 45117760 : result[i] = 0;
282 : }
283 : return result;
284 : }
285 :
286 : void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
287 : DCHECK_GE(start_cell, 0);
288 : DCHECK_LE(end_cell, kCellsPerBucket);
289 : int current_cell = start_cell;
290 2109511 : while (current_cell < kCellsPerBucket) {
291 1935233 : StoreCell(&bucket[current_cell], 0);
292 1935233 : current_cell++;
293 : }
294 : }
295 :
296 571713 : void PreFreeEmptyBucket(int bucket_index) {
297 1143426 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
298 571713 : if (bucket != nullptr) {
299 571747 : base::MutexGuard guard(&to_be_freed_buckets_mutex_);
300 : to_be_freed_buckets_.push(bucket);
301 : StoreBucket(&buckets_[bucket_index], nullptr);
302 : }
303 574173 : }
304 :
305 3805899 : void ReleaseBucket(int bucket_index) {
306 3805899 : Bucket bucket = LoadBucket(&buckets_[bucket_index]);
307 : StoreBucket(&buckets_[bucket_index], nullptr);
308 : DeleteArray<uint32_t>(bucket);
309 3805901 : }
310 :
311 : template <AccessMode access_mode = AccessMode::ATOMIC>
312 : Bucket LoadBucket(Bucket* bucket) {
313 : if (access_mode == AccessMode::ATOMIC)
314 : return base::AsAtomicPointer::Acquire_Load(bucket);
315 : return *bucket;
316 : }
317 :
318 : template <AccessMode access_mode = AccessMode::ATOMIC>
319 : void StoreBucket(Bucket* bucket, Bucket value) {
320 : if (access_mode == AccessMode::ATOMIC) {
321 : base::AsAtomicPointer::Release_Store(bucket, value);
322 : } else {
323 : *bucket = value;
324 : }
325 : }
326 :
327 621487 : bool IsEmptyBucket(Bucket bucket) {
328 20219589 : for (int i = 0; i < kCellsPerBucket; i++) {
329 20359876 : if (LoadCell(&bucket[i])) {
330 : return false;
331 : }
332 : }
333 : return true;
334 : }
335 :
336 : template <AccessMode access_mode = AccessMode::ATOMIC>
337 : bool SwapInNewBucket(Bucket* bucket, Bucket value) {
338 : if (access_mode == AccessMode::ATOMIC) {
339 : return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
340 : value) == nullptr;
341 : } else {
342 : DCHECK_NULL(*bucket);
343 100837 : *bucket = value;
344 : return true;
345 : }
346 : }
347 :
348 : template <AccessMode access_mode = AccessMode::ATOMIC>
349 : uint32_t LoadCell(uint32_t* cell) {
350 : if (access_mode == AccessMode::ATOMIC)
351 : return base::AsAtomic32::Acquire_Load(cell);
352 : return *cell;
353 : }
354 :
355 : void StoreCell(uint32_t* cell, uint32_t value) {
356 : base::AsAtomic32::Release_Store(cell, value);
357 : }
358 :
359 : void ClearCellBits(uint32_t* cell, uint32_t mask) {
360 16898393 : base::AsAtomic32::SetBits(cell, 0u, mask);
361 : }
362 :
363 : template <AccessMode access_mode = AccessMode::ATOMIC>
364 : void SetCellBits(uint32_t* cell, uint32_t mask) {
365 : if (access_mode == AccessMode::ATOMIC) {
366 88412402 : base::AsAtomic32::SetBits(cell, mask, mask);
367 : } else {
368 16213470 : *cell = (*cell & ~mask) | mask;
369 : }
370 : }
371 :
372 : // Converts the slot offset into bucket/cell/bit index.
373 : void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
374 : int* bit_index) {
375 : DCHECK(IsAligned(slot_offset, kTaggedSize));
376 221452407 : int slot = slot_offset >> kTaggedSizeLog2;
377 : DCHECK(slot >= 0 && slot <= kMaxSlots);
378 221452407 : *bucket_index = slot >> kBitsPerBucketLog2;
379 221452407 : *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
380 221452407 : *bit_index = slot & (kBitsPerCell - 1);
381 : }
382 :
383 : Bucket buckets_[kBuckets];
384 : Address page_start_;
385 : base::Mutex to_be_freed_buckets_mutex_;
386 : std::stack<uint32_t*> to_be_freed_buckets_;
387 : };
388 :
389 : enum SlotType {
390 : EMBEDDED_OBJECT_SLOT,
391 : OBJECT_SLOT,
392 : CODE_TARGET_SLOT,
393 : CODE_ENTRY_SLOT,
394 : CLEARED_SLOT
395 : };
396 :
397 : // Data structure for maintaining a list of typed slots in a page.
398 : // Typed slots can only appear in Code and JSFunction objects, so
399 : // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
400 : // The implementation is a chain of chunks, where each chunks is an array of
401 : // encoded (slot type, slot offset) pairs.
402 : // There is no duplicate detection and we do not expect many duplicates because
403 : // typed slots contain V8 internal pointers that are not directly exposed to JS.
404 : class V8_EXPORT_PRIVATE TypedSlots {
405 : public:
406 : static const int kMaxOffset = 1 << 29;
407 10061 : TypedSlots() = default;
408 : virtual ~TypedSlots();
409 : void Insert(SlotType type, uint32_t offset);
410 : void Merge(TypedSlots* other);
411 :
412 : protected:
413 : class OffsetField : public BitField<int, 0, 29> {};
414 : class TypeField : public BitField<SlotType, 29, 3> {};
415 : struct TypedSlot {
416 : uint32_t type_and_offset;
417 : };
418 : struct Chunk {
419 : Chunk* next;
420 : TypedSlot* buffer;
421 : int32_t capacity;
422 : int32_t count;
423 : };
424 : static const int kInitialBufferSize = 100;
425 : static const int kMaxBufferSize = 16 * KB;
426 : static int NextCapacity(int capacity) {
427 886 : return Min(kMaxBufferSize, capacity * 2);
428 : }
429 : Chunk* EnsureChunk();
430 : Chunk* NewChunk(Chunk* next, int capacity);
431 : Chunk* head_ = nullptr;
432 : Chunk* tail_ = nullptr;
433 : };
434 :
435 : // A multiset of per-page typed slots that allows concurrent iteration
436 : // clearing of invalid slots.
437 : class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
438 : public:
439 : // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
440 : // during the iteration are queued in to_be_freed_chunks_, which are
441 : // then freed in FreeToBeFreedChunks.
442 : enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
443 :
444 28680 : explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
445 :
446 : ~TypedSlotSet() override;
447 :
448 : // Iterate over all slots in the set and for each slot invoke the callback.
449 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
450 : // Returns the new number of slots.
451 : //
452 : // Sample usage:
453 : // Iterate([](SlotType slot_type, Address slot_address) {
454 : // if (good(slot_type, slot_address)) return KEEP_SLOT;
455 : // else return REMOVE_SLOT;
456 : // });
457 : // This can run concurrently to ClearInvalidSlots().
458 : template <typename Callback>
459 3805 : int Iterate(Callback callback, IterationMode mode) {
460 : STATIC_ASSERT(CLEARED_SLOT < 8);
461 3805 : Chunk* chunk = head_;
462 : Chunk* previous = nullptr;
463 : int new_count = 0;
464 9329 : while (chunk != nullptr) {
465 4830 : TypedSlot* buffer = chunk->buffer;
466 4830 : int count = chunk->count;
467 : bool empty = true;
468 936308 : for (int i = 0; i < count; i++) {
469 465045 : TypedSlot slot = LoadTypedSlot(buffer + i);
470 : SlotType type = TypeField::decode(slot.type_and_offset);
471 465045 : if (type != CLEARED_SLOT) {
472 455219 : uint32_t offset = OffsetField::decode(slot.type_and_offset);
473 455219 : Address addr = page_start_ + offset;
474 455888 : if (callback(type, addr) == KEEP_SLOT) {
475 98545 : new_count++;
476 : empty = false;
477 : } else {
478 : ClearTypedSlot(buffer + i);
479 : }
480 : }
481 : }
482 5524 : Chunk* next = chunk->next;
483 5524 : if (mode == PREFREE_EMPTY_CHUNKS && empty) {
484 : // We remove the chunk from the list but let it still point its next
485 : // chunk to allow concurrent iteration.
486 0 : if (previous) {
487 : StoreNext(previous, next);
488 : } else {
489 : StoreHead(next);
490 : }
491 0 : base::MutexGuard guard(&to_be_freed_chunks_mutex_);
492 0 : to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
493 : } else {
494 : previous = chunk;
495 : }
496 : chunk = next;
497 : }
498 4499 : return new_count;
499 : }
500 :
501 : // Clears all slots that have the offset in the specified ranges.
502 : // This can run concurrently to Iterate().
503 : void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
504 :
505 : // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
506 : void FreeToBeFreedChunks();
507 :
508 : private:
509 : // Atomic operations used by Iterate and ClearInvalidSlots;
510 : Chunk* LoadNext(Chunk* chunk) {
511 965 : return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
512 : }
513 : void StoreNext(Chunk* chunk, Chunk* next) {
514 0 : return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
515 : }
516 922 : Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
517 : void StoreHead(Chunk* chunk) {
518 0 : base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
519 : }
520 : TypedSlot LoadTypedSlot(TypedSlot* slot) {
521 484248 : return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
522 : }
523 : void ClearTypedSlot(TypedSlot* slot) {
524 : // Order is important here and should match that of LoadTypedSlot.
525 : base::AsAtomic32::Relaxed_Store(
526 : &slot->type_and_offset,
527 : TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
528 : }
529 :
530 : Address page_start_;
531 : base::Mutex to_be_freed_chunks_mutex_;
532 : std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
533 : };
534 :
535 : } // namespace internal
536 : } // namespace v8
537 :
538 : #endif // V8_HEAP_SLOT_SET_H_
|