LCOV - code coverage report
Current view: top level - src/heap - slot-set.h (source / functions) Hit Total Coverage
Test: app.info Lines: 204 213 95.8 %
Date: 2017-10-20 Functions: 58 71 81.7 %

          Line data    Source code
       1             : // Copyright 2016 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #ifndef V8_SLOT_SET_H
       6             : #define V8_SLOT_SET_H
       7             : 
       8             : #include <map>
       9             : #include <stack>
      10             : 
      11             : #include "src/allocation.h"
      12             : #include "src/base/atomic-utils.h"
      13             : #include "src/base/bits.h"
      14             : #include "src/utils.h"
      15             : 
      16             : namespace v8 {
      17             : namespace internal {
      18             : 
      19             : enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
      20             : 
      21             : // Data structure for maintaining a set of slots in a standard (non-large)
      22             : // page. The base address of the page must be set with SetPageStart before any
      23             : // operation.
      24             : // The data structure assumes that the slots are pointer size aligned and
      25             : // splits the valid slot offset range into kBuckets buckets.
      26             : // Each bucket is a bitmap with a bit corresponding to a single slot offset.
      27             : class SlotSet : public Malloced {
      28             :  public:
      29             :   enum EmptyBucketMode {
      30             :     FREE_EMPTY_BUCKETS,     // An empty bucket will be deallocated immediately.
      31             :     PREFREE_EMPTY_BUCKETS,  // An empty bucket will be unlinked from the slot
      32             :                             // set, but deallocated on demand by a sweeper
      33             :                             // thread.
      34             :     KEEP_EMPTY_BUCKETS      // An empty bucket will be kept.
      35             :   };
      36             : 
      37      176128 :   SlotSet() {
      38     5723659 :     for (int i = 0; i < kBuckets; i++) {
      39     5635595 :       StoreBucket(&buckets_[i], nullptr);
      40             :     }
      41       88064 :   }
      42             : 
      43      171398 :   ~SlotSet() {
      44     5568770 :     for (int i = 0; i < kBuckets; i++) {
      45     5483081 :       ReleaseBucket(i);
      46             :     }
      47       85689 :     FreeToBeFreedBuckets();
      48       85693 :   }
      49             : 
      50       88064 :   void SetPageStart(Address page_start) { page_start_ = page_start; }
      51             : 
      52             :   // The slot offset specifies a slot at address page_start_ + slot_offset.
      53             :   // This method should only be called on the main thread because concurrent
      54             :   // allocation of the bucket is not thread-safe.
      55             :   //
      56             :   // AccessMode defines whether there can be concurrent access on the buckets
      57             :   // or not.
      58             :   template <AccessMode access_mode = AccessMode::ATOMIC>
      59   167868265 :   void Insert(int slot_offset) {
      60             :     int bucket_index, cell_index, bit_index;
      61             :     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
      62   167868265 :     Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
      63   167868265 :     if (bucket == nullptr) {
      64     1306903 :       bucket = AllocateBucket();
      65     1211857 :       if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
      66             :         DeleteArray<uint32_t>(bucket);
      67             :         bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
      68             :       }
      69             :     }
      70             :     // Check that monotonicity is preserved, i.e., once a bucket is set we do
      71             :     // not free it concurrently.
      72             :     DCHECK_NOT_NULL(bucket);
      73             :     DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
      74   167868269 :     uint32_t mask = 1u << bit_index;
      75   328492236 :     if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
      76             :       SetCellBits<access_mode>(&bucket[cell_index], mask);
      77             :     }
      78   167869586 :   }
      79             : 
      80             :   // The slot offset specifies a slot at address page_start_ + slot_offset.
      81             :   // Returns true if the set contains the slot.
      82           0 :   bool Contains(int slot_offset) {
      83             :     int bucket_index, cell_index, bit_index;
      84             :     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
      85           0 :     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
      86           0 :     if (bucket == nullptr) return false;
      87           0 :     return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
      88             :   }
      89             : 
      90             :   // The slot offset specifies a slot at address page_start_ + slot_offset.
      91       43945 :   void Remove(int slot_offset) {
      92             :     int bucket_index, cell_index, bit_index;
      93             :     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
      94       43945 :     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
      95       43945 :     if (bucket != nullptr) {
      96       43756 :       uint32_t cell = LoadCell(&bucket[cell_index]);
      97       43756 :       uint32_t bit_mask = 1u << bit_index;
      98       43756 :       if (cell & bit_mask) {
      99             :         ClearCellBits(&bucket[cell_index], bit_mask);
     100             :       }
     101             :     }
     102       43945 :   }
     103             : 
     104             :   // The slot offsets specify a range of slots at addresses:
     105             :   // [page_start_ + start_offset ... page_start_ + end_offset).
     106    28174786 :   void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
     107    28174786 :     CHECK_LE(end_offset, 1 << kPageSizeBits);
     108             :     DCHECK_LE(start_offset, end_offset);
     109             :     int start_bucket, start_cell, start_bit;
     110             :     SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
     111             :     int end_bucket, end_cell, end_bit;
     112             :     SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
     113    28174786 :     uint32_t start_mask = (1u << start_bit) - 1;
     114    28174786 :     uint32_t end_mask = ~((1u << end_bit) - 1);
     115             :     Bucket bucket;
     116    28174786 :     if (start_bucket == end_bucket && start_cell == end_cell) {
     117    17932155 :       bucket = LoadBucket(&buckets_[start_bucket]);
     118    17932155 :       if (bucket != nullptr) {
     119     4098565 :         ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
     120             :       }
     121             :       return;
     122             :     }
     123             :     int current_bucket = start_bucket;
     124             :     int current_cell = start_cell;
     125    10242631 :     bucket = LoadBucket(&buckets_[current_bucket]);
     126    10242631 :     if (bucket != nullptr) {
     127     1707296 :       ClearCellBits(&bucket[current_cell], ~start_mask);
     128             :     }
     129    10295337 :     current_cell++;
     130    10295337 :     if (current_bucket < end_bucket) {
     131      919230 :       if (bucket != nullptr) {
     132      173822 :         ClearBucket(bucket, current_cell, kCellsPerBucket);
     133             :       }
     134             :       // The rest of the current bucket is cleared.
     135             :       // Move on to the next bucket.
     136      919259 :       current_bucket++;
     137             :       current_cell = 0;
     138             :     }
     139             :     DCHECK(current_bucket == end_bucket ||
     140             :            (current_bucket < end_bucket && current_cell == 0));
     141    16761349 :     while (current_bucket < end_bucket) {
     142     6465972 :       if (mode == PREFREE_EMPTY_BUCKETS) {
     143         724 :         PreFreeEmptyBucket(current_bucket);
     144     6465248 :       } else if (mode == FREE_EMPTY_BUCKETS) {
     145        1607 :         ReleaseBucket(current_bucket);
     146             :       } else {
     147             :         DCHECK(mode == KEEP_EMPTY_BUCKETS);
     148     6463641 :         bucket = LoadBucket(&buckets_[current_bucket]);
     149     6463641 :         if (bucket != nullptr) {
     150       11390 :           ClearBucket(bucket, 0, kCellsPerBucket);
     151             :         }
     152             :       }
     153     6465983 :       current_bucket++;
     154             :     }
     155             :     // All buckets between start_bucket and end_bucket are cleared.
     156    10295377 :     bucket = LoadBucket(&buckets_[current_bucket]);
     157             :     DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
     158    10295377 :     if (current_bucket == kBuckets || bucket == nullptr) {
     159             :       return;
     160             :     }
     161     3102778 :     while (current_cell < end_cell) {
     162     1457069 :       StoreCell(&bucket[current_cell], 0);
     163     1457069 :       current_cell++;
     164             :     }
     165             :     // All cells between start_cell and end_cell are cleared.
     166             :     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
     167     1645709 :     ClearCellBits(&bucket[end_cell], ~end_mask);
     168             :   }
     169             : 
     170             :   // The slot offset specifies a slot at address page_start_ + slot_offset.
     171     2808888 :   bool Lookup(int slot_offset) {
     172             :     int bucket_index, cell_index, bit_index;
     173             :     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
     174     2808888 :     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     175     2808888 :     if (bucket == nullptr) return false;
     176     4827248 :     return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
     177             :   }
     178             : 
     179             :   // Iterate over all slots in the set and for each slot invoke the callback.
     180             :   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
     181             :   // Returns the new number of slots.
     182             :   // This method should only be called on the main thread.
     183             :   //
     184             :   // Sample usage:
     185             :   // Iterate([](Address slot_address) {
     186             :   //    if (good(slot_address)) return KEEP_SLOT;
     187             :   //    else return REMOVE_SLOT;
     188             :   // });
     189             :   template <typename Callback>
     190      370411 :   int Iterate(Callback callback, EmptyBucketMode mode) {
     191             :     int new_count = 0;
     192    24374652 :     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
     193    23998266 :       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     194    23998266 :       if (bucket != nullptr) {
     195             :         int in_bucket_count = 0;
     196     1826078 :         int cell_offset = bucket_index * kBitsPerBucket;
     197    59887657 :         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
     198    58055382 :           uint32_t cell = LoadCell(&bucket[i]);
     199    58055382 :           if (cell) {
     200             :             uint32_t old_cell = cell;
     201             :             uint32_t mask = 0;
     202   132384607 :             while (cell) {
     203   110758436 :               int bit_offset = base::bits::CountTrailingZeros32(cell);
     204   110758436 :               uint32_t bit_mask = 1u << bit_offset;
     205   110758436 :               uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
     206   213092341 :               if (callback(page_start_ + slot) == KEEP_SLOT) {
     207    43351848 :                 ++in_bucket_count;
     208             :               } else {
     209    67207828 :                 mask |= bit_mask;
     210             :               }
     211   110559676 :               cell ^= bit_mask;
     212             :             }
     213    21626171 :             uint32_t new_cell = old_cell & ~mask;
     214    21626171 :             if (old_cell != new_cell) {
     215             :               ClearCellBits(&bucket[i], mask);
     216             :             }
     217             :           }
     218             :         }
     219     1832339 :         if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
     220      603005 :           PreFreeEmptyBucket(bucket_index);
     221             :         }
     222     1832054 :         new_count += in_bucket_count;
     223             :       }
     224             :     }
     225      376387 :     return new_count;
     226             :   }
     227             : 
     228          10 :   int NumberOfPreFreedEmptyBuckets() {
     229          10 :     base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
     230          20 :     return static_cast<int>(to_be_freed_buckets_.size());
     231             :   }
     232             : 
     233         252 :   void PreFreeEmptyBuckets() {
     234       16380 :     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
     235       16128 :       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     236       16128 :       if (bucket != nullptr) {
     237         506 :         if (IsEmptyBucket(bucket)) {
     238         506 :           PreFreeEmptyBucket(bucket_index);
     239             :         }
     240             :       }
     241             :     }
     242         252 :   }
     243             : 
     244      147626 :   void FreeEmptyBuckets() {
     245     9595690 :     for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
     246     9448064 :       Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     247     9448064 :       if (bucket != nullptr) {
     248      717030 :         if (IsEmptyBucket(bucket)) {
     249      291272 :           ReleaseBucket(bucket_index);
     250             :         }
     251             :       }
     252             :     }
     253      147626 :   }
     254             : 
     255      453883 :   void FreeToBeFreedBuckets() {
     256      453883 :     base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
     257     1057246 :     while (!to_be_freed_buckets_.empty()) {
     258      603353 :       Bucket top = to_be_freed_buckets_.top();
     259             :       to_be_freed_buckets_.pop();
     260             :       DeleteArray<uint32_t>(top);
     261             :     }
     262             :     DCHECK_EQ(0u, to_be_freed_buckets_.size());
     263      453900 :   }
     264             : 
     265             :  private:
     266             :   typedef uint32_t* Bucket;
     267             :   static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
     268             :   static const int kCellsPerBucket = 32;
     269             :   static const int kCellsPerBucketLog2 = 5;
     270             :   static const int kBitsPerCell = 32;
     271             :   static const int kBitsPerCellLog2 = 5;
     272             :   static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
     273             :   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
     274             :   static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
     275             : 
     276     1306901 :   Bucket AllocateBucket() {
     277     1306901 :     Bucket result = NewArray<uint32_t>(kCellsPerBucket);
     278    41820617 :     for (int i = 0; i < kCellsPerBucket; i++) {
     279    41820617 :       result[i] = 0;
     280             :     }
     281     1306907 :     return result;
     282             :   }
     283             : 
     284      185203 :   void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
     285             :     DCHECK_GE(start_cell, 0);
     286             :     DCHECK_LE(end_cell, kCellsPerBucket);
     287             :     int current_cell = start_cell;
     288     1848860 :     while (current_cell < kCellsPerBucket) {
     289     1663657 :       StoreCell(&bucket[current_cell], 0);
     290     1663657 :       current_cell++;
     291             :     }
     292      185203 :   }
     293             : 
     294      604271 :   void PreFreeEmptyBucket(int bucket_index) {
     295     1208542 :     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     296      604271 :     if (bucket != nullptr) {
     297      603814 :       base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
     298             :       to_be_freed_buckets_.push(bucket);
     299             :       StoreBucket(&buckets_[bucket_index], nullptr);
     300             :     }
     301      604420 :   }
     302             : 
     303     5775957 :   void ReleaseBucket(int bucket_index) {
     304     5775957 :     Bucket bucket = LoadBucket(&buckets_[bucket_index]);
     305             :     StoreBucket(&buckets_[bucket_index], nullptr);
     306             :     DeleteArray<uint32_t>(bucket);
     307     5775958 :   }
     308             : 
     309             :   template <AccessMode access_mode = AccessMode::ATOMIC>
     310             :   Bucket LoadBucket(Bucket* bucket) {
     311             :     if (access_mode == AccessMode::ATOMIC)
     312             :       return base::AsAtomicPointer::Acquire_Load(bucket);
     313             :     return *bucket;
     314             :   }
     315             : 
     316             :   template <AccessMode access_mode = AccessMode::ATOMIC>
     317             :   void StoreBucket(Bucket* bucket, Bucket value) {
     318             :     if (access_mode == AccessMode::ATOMIC) {
     319             :       base::AsAtomicPointer::Release_Store(bucket, value);
     320             :     } else {
     321             :       *bucket = value;
     322             :     }
     323             :   }
     324             : 
     325      717536 :   bool IsEmptyBucket(Bucket bucket) {
     326    11806579 :     for (int i = 0; i < kCellsPerBucket; i++) {
     327    24464674 :       if (LoadCell(&bucket[i])) {
     328             :         return false;
     329             :       }
     330             :     }
     331             :     return true;
     332             :   }
     333             : 
     334             :   template <AccessMode access_mode = AccessMode::ATOMIC>
     335     1211857 :   bool SwapInNewBucket(Bucket* bucket, Bucket value) {
     336             :     if (access_mode == AccessMode::ATOMIC) {
     337             :       return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
     338     1211857 :                                                            value) == nullptr;
     339             :     } else {
     340             :       DCHECK_NULL(*bucket);
     341       95049 :       *bucket = value;
     342             :       return true;
     343             :     }
     344             :   }
     345             : 
     346             :   template <AccessMode access_mode = AccessMode::ATOMIC>
     347             :   uint32_t LoadCell(uint32_t* cell) {
     348             :     if (access_mode == AccessMode::ATOMIC)
     349             :       return base::AsAtomic32::Acquire_Load(cell);
     350             :     return *cell;
     351             :   }
     352             : 
     353             :   void StoreCell(uint32_t* cell, uint32_t value) {
     354             :     base::AsAtomic32::Release_Store(cell, value);
     355             :   }
     356             : 
     357             :   void ClearCellBits(uint32_t* cell, uint32_t mask) {
     358    20484096 :     base::AsAtomic32::SetBits(cell, 0u, mask);
     359             :   }
     360             : 
     361             :   template <AccessMode access_mode = AccessMode::ATOMIC>
     362             :   void SetCellBits(uint32_t* cell, uint32_t mask) {
     363             :     if (access_mode == AccessMode::ATOMIC) {
     364    89928973 :       base::AsAtomic32::SetBits(cell, mask, mask);
     365             :     } else {
     366     7245087 :       *cell = (*cell & ~mask) | mask;
     367             :     }
     368             :   }
     369             : 
     370             :   // Converts the slot offset into bucket/cell/bit index.
     371             :   void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
     372             :                      int* bit_index) {
     373             :     DCHECK_EQ(slot_offset % kPointerSize, 0);
     374   227070670 :     int slot = slot_offset >> kPointerSizeLog2;
     375             :     DCHECK(slot >= 0 && slot <= kMaxSlots);
     376   227070670 :     *bucket_index = slot >> kBitsPerBucketLog2;
     377   227070670 :     *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
     378   227070670 :     *bit_index = slot & (kBitsPerCell - 1);
     379             :   }
     380             : 
     381             :   Bucket buckets_[kBuckets];
     382             :   Address page_start_;
     383             :   base::Mutex to_be_freed_buckets_mutex_;
     384             :   std::stack<uint32_t*> to_be_freed_buckets_;
     385             : };
     386             : 
     387             : enum SlotType {
     388             :   EMBEDDED_OBJECT_SLOT,
     389             :   OBJECT_SLOT,
     390             :   CODE_TARGET_SLOT,
     391             :   CODE_ENTRY_SLOT,
     392             :   CLEARED_SLOT
     393             : };
     394             : 
     395             : // Data structure for maintaining a multiset of typed slots in a page.
     396             : // Typed slots can only appear in Code and JSFunction objects, so
     397             : // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
     398             : // The implementation is a chain of chunks, where each chunks is an array of
     399             : // encoded (slot type, slot offset) pairs.
     400             : // There is no duplicate detection and we do not expect many duplicates because
     401             : // typed slots contain V8 internal pointers that are not directly exposed to JS.
     402             : class TypedSlotSet {
     403             :  public:
     404             :   enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
     405             : 
     406             :   typedef std::pair<SlotType, uint32_t> TypeAndOffset;
     407             : 
     408             :   struct TypedSlot {
     409     1235400 :     TypedSlot() : type_and_offset_(0), host_offset_(0) {}
     410             : 
     411             :     TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
     412      396161 :         : type_and_offset_(TypeField::encode(type) |
     413             :                            OffsetField::encode(offset)),
     414      396161 :           host_offset_(host_offset) {}
     415             : 
     416             :     bool operator==(const TypedSlot other) {
     417             :       return type_and_offset() == other.type_and_offset() &&
     418             :              host_offset() == other.host_offset();
     419             :     }
     420             : 
     421             :     bool operator!=(const TypedSlot other) { return !(*this == other); }
     422             : 
     423             :     SlotType type() const { return TypeField::decode(type_and_offset()); }
     424             : 
     425             :     uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
     426             : 
     427             :     TypeAndOffset GetTypeAndOffset() const {
     428             :       uint32_t t_and_o = type_and_offset();
     429             :       return std::make_pair(TypeField::decode(t_and_o),
     430             :                             OffsetField::decode(t_and_o));
     431             :     }
     432             : 
     433             :     uint32_t type_and_offset() const {
     434      267285 :       return base::AsAtomic32::Acquire_Load(&type_and_offset_);
     435             :     }
     436             : 
     437             :     uint32_t host_offset() const {
     438      333585 :       return base::AsAtomic32::Acquire_Load(&host_offset_);
     439             :     }
     440             : 
     441      396161 :     void Set(TypedSlot slot) {
     442             :       base::AsAtomic32::Release_Store(&type_and_offset_,
     443      396161 :                                       slot.type_and_offset());
     444      396161 :       base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
     445      396161 :     }
     446             : 
     447      134066 :     void Clear() {
     448             :       base::AsAtomic32::Release_Store(
     449             :           &type_and_offset_,
     450      134066 :           TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
     451      134066 :       base::AsAtomic32::Release_Store(&host_offset_, 0);
     452      134066 :     }
     453             : 
     454             :     uint32_t type_and_offset_;
     455             :     uint32_t host_offset_;
     456             :   };
     457             :   static const int kMaxOffset = 1 << 29;
     458             : 
     459        9058 :   explicit TypedSlotSet(Address page_start)
     460       27174 :       : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
     461             : 
     462       17576 :   ~TypedSlotSet() {
     463             :     Chunk* chunk = load_top();
     464       27360 :     while (chunk != nullptr) {
     465             :       Chunk* n = chunk->next();
     466        9784 :       delete chunk;
     467             :       chunk = n;
     468             :     }
     469        8788 :     FreeToBeFreedChunks();
     470        8788 :   }
     471             : 
     472             :   // The slot offset specifies a slot at address page_start_ + offset.
     473             :   // This method can only be called on the main thread.
     474      396161 :   void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
     475             :     TypedSlot slot(type, host_offset, offset);
     476        1097 :     Chunk* top_chunk = load_top();
     477      396161 :     if (!top_chunk) {
     478             :       top_chunk = new Chunk(nullptr, kInitialBufferSize);
     479             :       set_top(top_chunk);
     480             :     }
     481      396161 :     if (!top_chunk->AddSlot(slot)) {
     482             :       Chunk* new_top_chunk =
     483             :           new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
     484        1097 :       bool added = new_top_chunk->AddSlot(slot);
     485             :       set_top(new_top_chunk);
     486             :       DCHECK(added);
     487             :       USE(added);
     488             :     }
     489      396161 :   }
     490             : 
     491             :   // Iterate over all slots in the set and for each slot invoke the callback.
     492             :   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
     493             :   // Returns the new number of slots.
     494             :   //
     495             :   // Sample usage:
     496             :   // Iterate([](SlotType slot_type, Address slot_address) {
     497             :   //    if (good(slot_type, slot_address)) return KEEP_SLOT;
     498             :   //    else return REMOVE_SLOT;
     499             :   // });
     500             :   template <typename Callback>
     501        4982 :   int Iterate(Callback callback, IterationMode mode) {
     502             :     STATIC_ASSERT(CLEARED_SLOT < 8);
     503        4982 :     Chunk* chunk = load_top();
     504             :     Chunk* previous = nullptr;
     505             :     int new_count = 0;
     506       15724 :     while (chunk != nullptr) {
     507        5760 :       TypedSlot* buf = chunk->buffer();
     508             :       bool empty = true;
     509      546090 :       for (int i = 0; i < chunk->count(); i++) {
     510             :         // Order is important here. We have to read out the slot type last to
     511             :         // observe the concurrent removal case consistently.
     512      267389 :         Address host_addr = page_start_ + buf[i].host_offset();
     513             :         TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
     514             :         SlotType type = type_and_offset.first;
     515      267285 :         if (type != CLEARED_SLOT) {
     516      230441 :           Address addr = page_start_ + type_and_offset.second;
     517      231679 :           if (callback(type, host_addr, addr) == KEEP_SLOT) {
     518       99822 :             new_count++;
     519             :             empty = false;
     520             :           } else {
     521      131857 :             buf[i].Clear();
     522             :           }
     523             :         }
     524             :       }
     525             : 
     526        5760 :       Chunk* n = chunk->next();
     527        5760 :       if (mode == PREFREE_EMPTY_CHUNKS && empty) {
     528             :         // We remove the chunk from the list but let it still point its next
     529             :         // chunk to allow concurrent iteration.
     530           0 :         if (previous) {
     531             :           previous->set_next(n);
     532             :         } else {
     533             :           set_top(n);
     534             :         }
     535           0 :         base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
     536             :         to_be_freed_chunks_.push(chunk);
     537             :       } else {
     538        5760 :         previous = chunk;
     539             :       }
     540        5760 :       chunk = n;
     541             :     }
     542        4982 :     return new_count;
     543             :   }
     544             : 
     545       10067 :   void FreeToBeFreedChunks() {
     546       10067 :     base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
     547       10067 :     while (!to_be_freed_chunks_.empty()) {
     548           0 :       Chunk* top = to_be_freed_chunks_.top();
     549             :       to_be_freed_chunks_.pop();
     550           0 :       delete top;
     551             :     }
     552       10067 :   }
     553             : 
     554        1285 :   void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
     555        1463 :     Chunk* chunk = load_top();
     556        4033 :     while (chunk != nullptr) {
     557             :       TypedSlot* buf = chunk->buffer();
     558      135526 :       for (int i = 0; i < chunk->count(); i++) {
     559       66300 :         uint32_t host_offset = buf[i].host_offset();
     560             :         std::map<uint32_t, uint32_t>::iterator upper_bound =
     561             :             invalid_ranges.upper_bound(host_offset);
     562       66300 :         if (upper_bound == invalid_ranges.begin()) continue;
     563             :         // upper_bounds points to the invalid range after the given slot. Hence,
     564             :         // we have to go to the previous element.
     565             :         upper_bound--;
     566             :         DCHECK_LE(upper_bound->first, host_offset);
     567       29912 :         if (upper_bound->second > host_offset) {
     568        2212 :           buf[i].Clear();
     569             :         }
     570             :       }
     571             :       chunk = chunk->next();
     572             :     }
     573        1285 :   }
     574             : 
     575             :  private:
     576             :   static const int kInitialBufferSize = 100;
     577             :   static const int kMaxBufferSize = 16 * KB;
     578             : 
     579             :   static int NextCapacity(int capacity) {
     580        1097 :     return Min(kMaxBufferSize, capacity * 2);
     581             :   }
     582             : 
     583             :   class OffsetField : public BitField<int, 0, 29> {};
     584             :   class TypeField : public BitField<SlotType, 29, 3> {};
     585             : 
     586             :   struct Chunk : Malloced {
     587             :     explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
     588       10155 :       next_ = next_chunk;
     589       10155 :       buffer_ = NewArray<TypedSlot>(chunk_capacity);
     590       10155 :       capacity_ = chunk_capacity;
     591       10155 :       count_ = 0;
     592             :     }
     593             : 
     594        9784 :     ~Chunk() { DeleteArray(buffer_); }
     595             : 
     596     1190677 :     bool AddSlot(TypedSlot slot) {
     597             :       int current_count = count();
     598      397258 :       if (current_count == capacity()) return false;
     599             :       TypedSlot* current_buffer = buffer();
     600             :       // Order is important here. We have to write the slot first before
     601             :       // increasing the counter to guarantee that a consistent state is
     602             :       // observed by concurrent threads.
     603      396161 :       current_buffer[current_count].Set(slot);
     604      396161 :       set_count(current_count + 1);
     605      396161 :       return true;
     606             :     }
     607             : 
     608       17007 :     Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
     609             : 
     610             :     void set_next(Chunk* n) {
     611           0 :       return base::AsAtomicPointer::Release_Store(&next_, n);
     612             :     }
     613             : 
     614             :     TypedSlot* buffer() const { return buffer_; }
     615             : 
     616             :     int32_t capacity() const { return capacity_; }
     617             : 
     618      738066 :     int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
     619             : 
     620             :     void set_count(int32_t new_value) {
     621             :       base::AsAtomic32::Release_Store(&count_, new_value);
     622             :     }
     623             : 
     624             :    private:
     625             :     Chunk* next_;
     626             :     TypedSlot* buffer_;
     627             :     int32_t capacity_;
     628             :     int32_t count_;
     629             :   };
     630             : 
     631      411216 :   Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
     632             : 
     633             :   void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
     634             : 
     635             :   Address page_start_;
     636             :   Chunk* top_;
     637             :   base::Mutex to_be_freed_chunks_mutex_;
     638             :   std::stack<Chunk*> to_be_freed_chunks_;
     639             : };
     640             : 
     641             : }  // namespace internal
     642             : }  // namespace v8
     643             : 
     644             : #endif  // V8_SLOT_SET_H

Generated by: LCOV version 1.10