Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_SLOT_SET_H
6 : #define V8_SLOT_SET_H
7 :
8 : #include <map>
9 : #include <stack>
10 :
11 : #include "src/allocation.h"
12 : #include "src/base/atomic-utils.h"
13 : #include "src/base/bits.h"
14 : #include "src/utils.h"
15 :
16 : namespace v8 {
17 : namespace internal {
18 :
19 : enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
20 :
21 : // Data structure for maintaining a set of slots in a standard (non-large)
22 : // page. The base address of the page must be set with SetPageStart before any
23 : // operation.
24 : // The data structure assumes that the slots are pointer size aligned and
25 : // splits the valid slot offset range into kBuckets buckets.
26 : // Each bucket is a bitmap with a bit corresponding to a single slot offset.
27 : class SlotSet : public Malloced {
28 : public:
29 : enum EmptyBucketMode {
30 : FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
31 : PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
32 : // set, but deallocated on demand by a sweeper
33 : // thread.
34 : KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
35 : };
36 :
37 6082494 : SlotSet() {
38 5990335 : for (int i = 0; i < kBuckets; i++) {
39 5898176 : bucket[i].SetValue(nullptr);
40 : }
41 92159 : }
42 :
43 178966 : ~SlotSet() {
44 5814206 : for (int i = 0; i < kBuckets; i++) {
45 5724735 : ReleaseBucket(i);
46 : }
47 89471 : FreeToBeFreedBuckets();
48 89477 : }
49 :
50 92159 : void SetPageStart(Address page_start) { page_start_ = page_start; }
51 :
52 : // The slot offset specifies a slot at address page_start_ + slot_offset.
53 : // This method should only be called on the main thread because concurrent
54 : // allocation of the bucket is not thread-safe.
55 298635881 : void Insert(int slot_offset) {
56 : int bucket_index, cell_index, bit_index;
57 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
58 298635881 : base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
59 298635527 : if (current_bucket == nullptr) {
60 1751304 : current_bucket = AllocateBucket();
61 : bucket[bucket_index].SetValue(current_bucket);
62 : }
63 597270112 : if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
64 : current_bucket[cell_index].SetBit(bit_index);
65 : }
66 298639762 : }
67 :
68 : // The slot offset specifies a slot at address page_start_ + slot_offset.
69 : // Returns true if the set contains the slot.
70 0 : bool Contains(int slot_offset) {
71 : int bucket_index, cell_index, bit_index;
72 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
73 0 : base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
74 0 : if (current_bucket == nullptr) {
75 : return false;
76 : }
77 0 : return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
78 : }
79 :
80 : // The slot offset specifies a slot at address page_start_ + slot_offset.
81 2388 : void Remove(int slot_offset) {
82 : int bucket_index, cell_index, bit_index;
83 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
84 2388 : base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
85 2388 : if (current_bucket != nullptr) {
86 562 : uint32_t cell = current_bucket[cell_index].Value();
87 562 : if (cell) {
88 292 : uint32_t bit_mask = 1u << bit_index;
89 292 : if (cell & bit_mask) {
90 : current_bucket[cell_index].ClearBit(bit_index);
91 : }
92 : }
93 : }
94 2388 : }
95 :
96 : // The slot offsets specify a range of slots at addresses:
97 : // [page_start_ + start_offset ... page_start_ + end_offset).
98 33343723 : void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
99 33343723 : CHECK_LE(end_offset, 1 << kPageSizeBits);
100 : DCHECK_LE(start_offset, end_offset);
101 : int start_bucket, start_cell, start_bit;
102 : SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
103 : int end_bucket, end_cell, end_bit;
104 : SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
105 33343723 : uint32_t start_mask = (1u << start_bit) - 1;
106 33343723 : uint32_t end_mask = ~((1u << end_bit) - 1);
107 33343723 : if (start_bucket == end_bucket && start_cell == end_cell) {
108 21972767 : ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
109 21967295 : return;
110 : }
111 : int current_bucket = start_bucket;
112 : int current_cell = start_cell;
113 11370956 : ClearCell(current_bucket, current_cell, ~start_mask);
114 11380059 : current_cell++;
115 11380059 : base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
116 11375044 : if (current_bucket < end_bucket) {
117 1148820 : if (bucket_ptr != nullptr) {
118 186088 : ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
119 : }
120 : // The rest of the current bucket is cleared.
121 : // Move on to the next bucket.
122 1148805 : current_bucket++;
123 : current_cell = 0;
124 : }
125 : DCHECK(current_bucket == end_bucket ||
126 : (current_bucket < end_bucket && current_cell == 0));
127 17281643 : while (current_bucket < end_bucket) {
128 5906677 : if (mode == PREFREE_EMPTY_BUCKETS) {
129 887 : PreFreeEmptyBucket(current_bucket);
130 5905790 : } else if (mode == FREE_EMPTY_BUCKETS) {
131 1668 : ReleaseBucket(current_bucket);
132 : } else {
133 : DCHECK(mode == KEEP_EMPTY_BUCKETS);
134 5904122 : bucket_ptr = bucket[current_bucket].Value();
135 5904078 : if (bucket_ptr) {
136 10815 : ClearBucket(bucket_ptr, 0, kCellsPerBucket);
137 : }
138 : }
139 5906614 : current_bucket++;
140 : }
141 : // All buckets between start_bucket and end_bucket are cleared.
142 11374966 : bucket_ptr = bucket[current_bucket].Value();
143 : DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
144 11370168 : if (current_bucket == kBuckets || bucket_ptr == nullptr) {
145 : return;
146 : }
147 4132950 : while (current_cell < end_cell) {
148 2027294 : bucket_ptr[current_cell].SetValue(0);
149 2027263 : current_cell++;
150 : }
151 : // All cells between start_cell and end_cell are cleared.
152 : DCHECK(current_bucket == end_bucket && current_cell == end_cell);
153 2105656 : ClearCell(end_bucket, end_cell, ~end_mask);
154 : }
155 :
156 : // The slot offset specifies a slot at address page_start_ + slot_offset.
157 : bool Lookup(int slot_offset) {
158 : int bucket_index, cell_index, bit_index;
159 : SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
160 : if (bucket[bucket_index].Value() != nullptr) {
161 : uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
162 : return (cell & (1u << bit_index)) != 0;
163 : }
164 : return false;
165 : }
166 :
167 : // Iterate over all slots in the set and for each slot invoke the callback.
168 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
169 : // Returns the new number of slots.
170 : // This method should only be called on the main thread.
171 : //
172 : // Sample usage:
173 : // Iterate([](Address slot_address) {
174 : // if (good(slot_address)) return KEEP_SLOT;
175 : // else return REMOVE_SLOT;
176 : // });
177 : template <typename Callback>
178 551066 : int Iterate(Callback callback, EmptyBucketMode mode) {
179 : int new_count = 0;
180 35350260 : for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
181 : base::AtomicValue<uint32_t>* current_bucket =
182 35115871 : bucket[bucket_index].Value();
183 35139044 : if (current_bucket != nullptr) {
184 : int in_bucket_count = 0;
185 2302628 : int cell_offset = bucket_index * kBitsPerBucket;
186 73732583 : for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
187 144840796 : if (current_bucket[i].Value()) {
188 : uint32_t cell = current_bucket[i].Value();
189 : uint32_t old_cell = cell;
190 : uint32_t mask = 0;
191 177021218 : while (cell) {
192 136380703 : int bit_offset = base::bits::CountTrailingZeros32(cell);
193 136380703 : uint32_t bit_mask = 1u << bit_offset;
194 136380703 : uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
195 272855146 : if (callback(page_start_ + slot) == KEEP_SLOT) {
196 61495475 : ++in_bucket_count;
197 : } else {
198 74978968 : mask |= bit_mask;
199 : }
200 136474443 : cell ^= bit_mask;
201 : }
202 27206995 : uint32_t new_cell = old_cell & ~mask;
203 27206995 : if (old_cell != new_cell) {
204 16121029 : while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
205 : // If TrySetValue fails, the cell must have changed. We just
206 : // have to read the current value of the cell, & it with the
207 : // computed value, and retry. We can do this, because this
208 : // method will only be called on the main thread and filtering
209 : // threads will only remove slots.
210 : old_cell = current_bucket[i].Value();
211 0 : new_cell = old_cell & ~mask;
212 : }
213 : }
214 : }
215 : }
216 2277284 : if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
217 1116834 : PreFreeEmptyBucket(bucket_index);
218 : }
219 2278255 : new_count += in_bucket_count;
220 : }
221 : }
222 549866 : return new_count;
223 : }
224 :
225 311995 : void FreeToBeFreedBuckets() {
226 311995 : base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
227 1425972 : while (!to_be_freed_buckets_.empty()) {
228 1113953 : base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
229 : to_be_freed_buckets_.pop();
230 : DeleteArray<base::AtomicValue<uint32_t>>(top);
231 : }
232 312019 : }
233 :
234 : private:
235 : static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
236 : static const int kCellsPerBucket = 32;
237 : static const int kCellsPerBucketLog2 = 5;
238 : static const int kBitsPerCell = 32;
239 : static const int kBitsPerCellLog2 = 5;
240 : static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
241 : static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
242 : static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
243 :
244 1751304 : base::AtomicValue<uint32_t>* AllocateBucket() {
245 : base::AtomicValue<uint32_t>* result =
246 1751304 : NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
247 56041642 : for (int i = 0; i < kCellsPerBucket; i++) {
248 56041616 : result[i].SetValue(0);
249 : }
250 1751305 : return result;
251 : }
252 :
253 196880 : void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
254 : int end_cell) {
255 : DCHECK_GE(start_cell, 0);
256 : DCHECK_LE(end_cell, kCellsPerBucket);
257 : int current_cell = start_cell;
258 1768448 : while (current_cell < kCellsPerBucket) {
259 1571547 : bucket[current_cell].SetValue(0);
260 1571568 : current_cell++;
261 : }
262 196901 : }
263 :
264 1013139 : void PreFreeEmptyBucket(int bucket_index) {
265 2026337 : base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
266 1013198 : if (bucket_ptr != nullptr) {
267 1013033 : base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
268 : to_be_freed_buckets_.push(bucket_ptr);
269 : bucket[bucket_index].SetValue(nullptr);
270 : }
271 1013692 : }
272 :
273 5726368 : void ReleaseBucket(int bucket_index) {
274 5726368 : DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
275 : bucket[bucket_index].SetValue(nullptr);
276 5726387 : }
277 :
278 32899167 : void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
279 32899167 : if (bucket_index < kBuckets) {
280 32899167 : base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
281 32893176 : if (cells != nullptr) {
282 8458310 : uint32_t cell = cells[cell_index].Value();
283 8456876 : if (cell) cells[cell_index].SetBits(0, mask);
284 : }
285 : } else {
286 : // GCC bug 59124: Emits wrong warnings
287 : // "array subscript is above array bounds"
288 0 : UNREACHABLE();
289 : }
290 32892840 : }
291 :
292 : // Converts the slot offset into bucket/cell/bit index.
293 : void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
294 : int* bit_index) {
295 : DCHECK_EQ(slot_offset % kPointerSize, 0);
296 365325715 : int slot = slot_offset >> kPointerSizeLog2;
297 : DCHECK(slot >= 0 && slot <= kMaxSlots);
298 365325715 : *bucket_index = slot >> kBitsPerBucketLog2;
299 365325715 : *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
300 365325715 : *bit_index = slot & (kBitsPerCell - 1);
301 : }
302 :
303 : base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
304 : Address page_start_;
305 : base::Mutex to_be_freed_buckets_mutex_;
306 : std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
307 : };
308 :
309 : enum SlotType {
310 : EMBEDDED_OBJECT_SLOT,
311 : OBJECT_SLOT,
312 : CELL_TARGET_SLOT,
313 : CODE_TARGET_SLOT,
314 : CODE_ENTRY_SLOT,
315 : DEBUG_TARGET_SLOT,
316 : CLEARED_SLOT
317 : };
318 :
319 : // Data structure for maintaining a multiset of typed slots in a page.
320 : // Typed slots can only appear in Code and JSFunction objects, so
321 : // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
322 : // The implementation is a chain of chunks, where each chunks is an array of
323 : // encoded (slot type, slot offset) pairs.
324 : // There is no duplicate detection and we do not expect many duplicates because
325 : // typed slots contain V8 internal pointers that are not directly exposed to JS.
326 : class TypedSlotSet {
327 : public:
328 : enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
329 :
330 : typedef std::pair<SlotType, uint32_t> TypeAndOffset;
331 :
332 : struct TypedSlot {
333 3147900 : TypedSlot() {
334 : type_and_offset_.SetValue(0);
335 : host_offset_.SetValue(0);
336 3147900 : }
337 :
338 1260649 : TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
339 : type_and_offset_.SetValue(TypeField::encode(type) |
340 1260649 : OffsetField::encode(offset));
341 : host_offset_.SetValue(host_offset);
342 1260650 : }
343 :
344 : bool operator==(const TypedSlot other) {
345 : return type_and_offset_.Value() == other.type_and_offset_.Value() &&
346 : host_offset_.Value() == other.host_offset_.Value();
347 : }
348 :
349 : bool operator!=(const TypedSlot other) { return !(*this == other); }
350 :
351 : SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
352 :
353 : uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
354 :
355 44451580 : TypeAndOffset GetTypeAndOffset() {
356 : uint32_t type_and_offset = type_and_offset_.Value();
357 : return std::make_pair(TypeField::decode(type_and_offset),
358 44451489 : OffsetField::decode(type_and_offset));
359 : }
360 :
361 : uint32_t host_offset() { return host_offset_.Value(); }
362 :
363 1260650 : void Set(TypedSlot slot) {
364 : type_and_offset_.SetValue(slot.type_and_offset_.Value());
365 : host_offset_.SetValue(slot.host_offset_.Value());
366 1260649 : }
367 :
368 1147408 : void Clear() {
369 : type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
370 : OffsetField::encode(0));
371 : host_offset_.SetValue(0);
372 1147377 : }
373 :
374 : base::AtomicValue<uint32_t> type_and_offset_;
375 : base::AtomicValue<uint32_t> host_offset_;
376 : };
377 : static const int kMaxOffset = 1 << 29;
378 :
379 48453 : explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
380 16151 : chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
381 16151 : }
382 :
383 31350 : ~TypedSlotSet() {
384 : Chunk* chunk = chunk_.Value();
385 41852 : while (chunk != nullptr) {
386 : Chunk* next = chunk->next.Value();
387 10502 : delete chunk;
388 : chunk = next;
389 : }
390 15675 : FreeToBeFreedChunks();
391 15675 : }
392 :
393 : // The slot offset specifies a slot at address page_start_ + offset.
394 : // This method can only be called on the main thread.
395 1260650 : void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
396 1260650 : TypedSlot slot(type, host_offset, offset);
397 : Chunk* top_chunk = chunk_.Value();
398 1260650 : if (!top_chunk) {
399 1424 : top_chunk = new Chunk(nullptr, kInitialBufferSize);
400 : chunk_.SetValue(top_chunk);
401 : }
402 1260650 : if (!top_chunk->AddSlot(slot)) {
403 : Chunk* new_top_chunk =
404 3974 : new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
405 3974 : bool added = new_top_chunk->AddSlot(slot);
406 : chunk_.SetValue(new_top_chunk);
407 : DCHECK(added);
408 : USE(added);
409 : }
410 1260649 : }
411 :
412 : // Iterate over all slots in the set and for each slot invoke the callback.
413 : // If the callback returns REMOVE_SLOT then the slot is removed from the set.
414 : // Returns the new number of slots.
415 : //
416 : // Sample usage:
417 : // Iterate([](SlotType slot_type, Address slot_address) {
418 : // if (good(slot_type, slot_address)) return KEEP_SLOT;
419 : // else return REMOVE_SLOT;
420 : // });
421 : template <typename Callback>
422 247688 : int Iterate(Callback callback, IterationMode mode) {
423 : STATIC_ASSERT(CLEARED_SLOT < 8);
424 247688 : Chunk* chunk = chunk_.Value();
425 : Chunk* previous = nullptr;
426 : int new_count = 0;
427 815865 : while (chunk != nullptr) {
428 : TypedSlot* buffer = chunk->buffer.Value();
429 324899 : int count = chunk->count.Value();
430 : bool empty = true;
431 44770686 : for (int i = 0; i < count; i++) {
432 : // Order is important here. We have to read out the slot type last to
433 : // observe the concurrent removal case consistently.
434 44451743 : Address host_addr = page_start_ + buffer[i].host_offset();
435 44451641 : TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
436 44451489 : SlotType type = type_and_offset.first;
437 44451489 : if (type != CLEARED_SLOT) {
438 28773616 : Address addr = page_start_ + type_and_offset.second;
439 28774202 : if (callback(type, host_addr, addr) == KEEP_SLOT) {
440 27642672 : new_count++;
441 : empty = false;
442 : } else {
443 1131530 : buffer[i].Clear();
444 : }
445 : }
446 : }
447 :
448 324899 : Chunk* next = chunk->next.Value();
449 324899 : if (mode == PREFREE_EMPTY_CHUNKS && empty) {
450 : // We remove the chunk from the list but let it still point its next
451 : // chunk to allow concurrent iteration.
452 10342 : if (previous) {
453 : previous->next.SetValue(next);
454 : } else {
455 : chunk_.SetValue(next);
456 : }
457 10342 : base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
458 : to_be_freed_chunks_.push(chunk);
459 : } else {
460 314557 : previous = chunk;
461 : }
462 324899 : chunk = next;
463 : }
464 247688 : return new_count;
465 : }
466 :
467 17903 : void FreeToBeFreedChunks() {
468 17903 : base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
469 28215 : while (!to_be_freed_chunks_.empty()) {
470 10312 : Chunk* top = to_be_freed_chunks_.top();
471 : to_be_freed_chunks_.pop();
472 10312 : delete top;
473 : }
474 17903 : }
475 :
476 2228 : void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
477 : Chunk* chunk = chunk_.Value();
478 7111 : while (chunk != nullptr) {
479 : TypedSlot* buffer = chunk->buffer.Value();
480 : int count = chunk->count.Value();
481 141799 : for (int i = 0; i < count; i++) {
482 139144 : uint32_t host_offset = buffer[i].host_offset();
483 : std::map<uint32_t, uint32_t>::iterator upper_bound =
484 : invalid_ranges.upper_bound(host_offset);
485 139144 : if (upper_bound == invalid_ranges.begin()) continue;
486 : // upper_bounds points to the invalid range after the given slot. Hence,
487 : // we have to go to the previous element.
488 : upper_bound--;
489 : DCHECK_LE(upper_bound->first, host_offset);
490 74446 : if (upper_bound->second > host_offset) {
491 15855 : buffer[i].Clear();
492 : }
493 : }
494 : chunk = chunk->next.Value();
495 : }
496 2228 : }
497 :
498 : private:
499 : static const int kInitialBufferSize = 100;
500 : static const int kMaxBufferSize = 16 * KB;
501 :
502 : static int NextCapacity(int capacity) {
503 3974 : return Min(kMaxBufferSize, capacity * 2);
504 : }
505 :
506 : class OffsetField : public BitField<int, 0, 29> {};
507 : class TypeField : public BitField<SlotType, 29, 3> {};
508 :
509 : struct Chunk : Malloced {
510 21549 : explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
511 : count.SetValue(0);
512 : capacity.SetValue(chunk_capacity);
513 21549 : buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
514 : next.SetValue(next_chunk);
515 21549 : }
516 1264624 : bool AddSlot(TypedSlot slot) {
517 : int current_count = count.Value();
518 1264624 : if (current_count == capacity.Value()) return false;
519 : TypedSlot* current_buffer = buffer.Value();
520 : // Order is important here. We have to write the slot first before
521 : // increasing the counter to guarantee that a consistent state is
522 : // observed by concurrent threads.
523 1260650 : current_buffer[current_count].Set(slot);
524 1260649 : count.SetValue(current_count + 1);
525 1260649 : return true;
526 : }
527 41628 : ~Chunk() { DeleteArray(buffer.Value()); }
528 : base::AtomicValue<Chunk*> next;
529 : base::AtomicValue<int> count;
530 : base::AtomicValue<int> capacity;
531 : base::AtomicValue<TypedSlot*> buffer;
532 : };
533 :
534 : Address page_start_;
535 : base::AtomicValue<Chunk*> chunk_;
536 : base::Mutex to_be_freed_chunks_mutex_;
537 : std::stack<Chunk*> to_be_freed_chunks_;
538 : };
539 :
540 : } // namespace internal
541 : } // namespace v8
542 :
543 : #endif // V8_SLOT_SET_H
|