/src/abseil-cpp/absl/container/internal/raw_hash_set.cc
Line  | Count  | Source  | 
1  |  | // Copyright 2018 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  |  | 
15  |  | #include "absl/container/internal/raw_hash_set.h"  | 
16  |  |  | 
17  |  | #include <atomic>  | 
18  |  | #include <cassert>  | 
19  |  | #include <cstddef>  | 
20  |  | #include <cstdint>  | 
21  |  | #include <cstring>  | 
22  |  | #include <tuple>  | 
23  |  | #include <utility>  | 
24  |  |  | 
25  |  | #include "absl/base/attributes.h"  | 
26  |  | #include "absl/base/config.h"  | 
27  |  | #include "absl/base/dynamic_annotations.h"  | 
28  |  | #include "absl/base/internal/endian.h"  | 
29  |  | #include "absl/base/internal/raw_logging.h"  | 
30  |  | #include "absl/base/optimization.h"  | 
31  |  | #include "absl/container/internal/container_memory.h"  | 
32  |  | #include "absl/container/internal/hashtable_control_bytes.h"  | 
33  |  | #include "absl/container/internal/hashtablez_sampler.h"  | 
34  |  | #include "absl/container/internal/raw_hash_set_resize_impl.h"  | 
35  |  | #include "absl/functional/function_ref.h"  | 
36  |  | #include "absl/hash/hash.h"  | 
37  |  |  | 
38  |  | namespace absl { | 
39  |  | ABSL_NAMESPACE_BEGIN  | 
40  |  | namespace container_internal { | 
41  |  |  | 
42  |  | // Represents a control byte corresponding to a full slot with arbitrary hash.  | 
43  | 0  | constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); } | 
44  |  |  | 
45  |  | // A single control byte for default-constructed iterators. We leave it  | 
46  |  | // uninitialized because reading this memory is a bug.  | 
47  |  | ABSL_DLL ctrl_t kDefaultIterControl;  | 
48  |  |  | 
49  |  | // We need one full byte followed by a sentinel byte for iterator::operator++.  | 
50  |  | ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(), | 
51  |  |                                                         ctrl_t::kSentinel};  | 
52  |  |  | 
53  |  | namespace { | 
54  |  |  | 
55  |  | #ifdef ABSL_SWISSTABLE_ASSERT  | 
56  |  | #error ABSL_SWISSTABLE_ASSERT cannot be directly set  | 
57  |  | #else  | 
58  |  | // We use this macro for assertions that users may see when the table is in an  | 
59  |  | // invalid state that sanitizers may help diagnose.  | 
60  |  | #define ABSL_SWISSTABLE_ASSERT(CONDITION) \  | 
61  | 40.4M  |   assert((CONDITION) && "Try enabling sanitizers.")  | 
62  |  | #endif  | 
63  |  |  | 
64  | 0  | [[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() { | 
65  | 0  |   ABSL_RAW_LOG(FATAL, "Hash table size overflow");  | 
66  | 0  | }  | 
67  |  |  | 
68  | 0  | void ValidateMaxSize(size_t size, size_t slot_size) { | 
69  | 0  |   if (IsAboveValidSize(size, slot_size)) { | 
70  | 0  |     HashTableSizeOverflow();  | 
71  | 0  |   }  | 
72  | 0  | }  | 
73  |  |  | 
74  |  | // Returns "random" seed.  | 
75  | 0  | inline size_t RandomSeed() { | 
76  | 0  | #ifdef ABSL_HAVE_THREAD_LOCAL  | 
77  | 0  |   static thread_local size_t counter = 0;  | 
78  | 0  |   size_t value = ++counter;  | 
79  |  | #else   // ABSL_HAVE_THREAD_LOCAL  | 
80  |  |   static std::atomic<size_t> counter(0);  | 
81  |  |   size_t value = counter.fetch_add(1, std::memory_order_relaxed);  | 
82  |  | #endif  // ABSL_HAVE_THREAD_LOCAL  | 
83  | 0  |   return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));  | 
84  | 0  | }  | 
85  |  |  | 
86  | 0  | bool ShouldRehashForBugDetection(size_t capacity) { | 
87  |  |   // Note: we can't use the abseil-random library because abseil-random  | 
88  |  |   // depends on swisstable. We want to return true with probability  | 
89  |  |   // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,  | 
90  |  |   // we probe based on a random hash and see if the offset is less than  | 
91  |  |   // RehashProbabilityConstant().  | 
92  | 0  |   return probe(capacity, absl::HashOf(RandomSeed())).offset() <  | 
93  | 0  |          RehashProbabilityConstant();  | 
94  | 0  | }  | 
95  |  |  | 
96  |  | // Find a non-deterministic hash for single group table.  | 
97  |  | // Last two bits are used to find a position for a newly inserted element after  | 
98  |  | // resize.  | 
99  |  | // This function basically using H2 last bits to save on shift operation.  | 
100  | 123k  | size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) { | 
101  | 123k  |   return hash ^ seed.seed();  | 
102  | 123k  | }  | 
103  |  |  | 
104  |  | // Returns the offset of the new element after resize from capacity 1 to 3.  | 
105  | 56.7k  | size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) { | 
106  |  |   // After resize from capacity 1 to 3, we always have exactly the slot with  | 
107  |  |   // index 1 occupied, so we need to insert either at index 0 or index 2.  | 
108  | 56.7k  |   static_assert(SooSlotIndex() == 1);  | 
109  | 56.7k  |   return SingleGroupTableH1(hash, seed) & 2;  | 
110  | 56.7k  | }  | 
111  |  |  | 
112  |  | // Returns the address of the ith slot in slots where each slot occupies  | 
113  |  | // slot_size.  | 
114  | 295k  | inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { | 
115  | 295k  |   return static_cast<void*>(static_cast<char*>(slot_array) +  | 
116  | 295k  |                             (slot * slot_size));  | 
117  | 295k  | }  | 
118  |  |  | 
119  |  | // Returns the address of the slot `i` iterations after `slot` assuming each  | 
120  |  | // slot has the specified size.  | 
121  | 67.1k  | inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) { | 
122  | 67.1k  |   return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +  | 
123  | 67.1k  |                                  slot_size * i);  | 
124  | 67.1k  | }  | 
125  |  |  | 
126  |  | // Returns the address of the slot just before `slot` assuming each slot has the  | 
127  |  | // specified size.  | 
128  | 0  | inline void* PrevSlot(void* slot, size_t slot_size) { | 
129  | 0  |   return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);  | 
130  | 0  | }  | 
131  |  |  | 
132  |  | }  // namespace  | 
133  |  |  | 
134  | 0  | GenerationType* EmptyGeneration() { | 
135  | 0  |   if (SwisstableGenerationsEnabled()) { | 
136  | 0  |     constexpr size_t kNumEmptyGenerations = 1024;  | 
137  | 0  |     static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{}; | 
138  | 0  |     return const_cast<GenerationType*>(  | 
139  | 0  |         &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);  | 
140  | 0  |   }  | 
141  | 0  |   return nullptr;  | 
142  | 0  | }  | 
143  |  |  | 
144  |  | bool CommonFieldsGenerationInfoEnabled::  | 
145  | 0  |     should_rehash_for_bug_detection_on_insert(size_t capacity) const { | 
146  | 0  |   if (reserved_growth_ == kReservedGrowthJustRanOut) return true;  | 
147  | 0  |   if (reserved_growth_ > 0) return false;  | 
148  | 0  |   return ShouldRehashForBugDetection(capacity);  | 
149  | 0  | }  | 
150  |  |  | 
151  |  | bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(  | 
152  | 0  |     size_t capacity) const { | 
153  | 0  |   return ShouldRehashForBugDetection(capacity);  | 
154  | 0  | }  | 
155  |  |  | 
156  |  | namespace { | 
157  |  |  | 
158  |  | // Probes an array of control bits using a probe sequence,  | 
159  |  | // and returns the mask corresponding to the first group with a deleted or empty  | 
160  |  | // slot.  | 
161  |  | inline Group::NonIterableBitMaskType probe_till_first_non_full_group(  | 
162  |  |     const ctrl_t* ctrl, probe_seq<Group::kWidth>& seq,  | 
163  | 79.9k  |     [[maybe_unused]] size_t capacity) { | 
164  | 82.6k  |   while (true) { | 
165  | 82.6k  |     GroupFullEmptyOrDeleted g{ctrl + seq.offset()}; | 
166  | 82.6k  |     auto mask = g.MaskEmptyOrDeleted();  | 
167  | 82.6k  |     if (mask) { | 
168  | 79.9k  |       return mask;  | 
169  | 79.9k  |     }  | 
170  | 2.62k  |     seq.next();  | 
171  | 2.62k  |     ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");  | 
172  | 2.62k  |   }  | 
173  | 79.9k  | }  | 
174  |  |  | 
175  |  | FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,  | 
176  | 166k  |                                      size_t capacity) { | 
177  | 166k  |   auto seq = probe_h1(capacity, h1);  | 
178  | 166k  |   if (IsEmptyOrDeleted(ctrl[seq.offset()])) { | 
179  | 86.8k  |     return {seq.offset(), /*probe_length=*/0}; | 
180  | 86.8k  |   }  | 
181  | 79.9k  |   auto mask = probe_till_first_non_full_group(ctrl, seq, capacity);  | 
182  | 79.9k  |   return {seq.offset(mask.LowestBitSet()), seq.index()}; | 
183  | 166k  | }  | 
184  |  |  | 
185  |  | // Probes an array of control bits using a probe sequence derived from `hash`,  | 
186  |  | // and returns the offset corresponding to the first deleted or empty slot.  | 
187  |  | //  | 
188  |  | // Behavior when the entire table is full is undefined.  | 
189  |  | //  | 
190  |  | // NOTE: this function must work with tables having both empty and deleted  | 
191  |  | // slots in the same group. Such tables appear during `erase()`.  | 
192  | 104k  | FindInfo find_first_non_full(const CommonFields& common, size_t hash) { | 
193  | 104k  |   return find_first_non_full_from_h1(common.control(), H1(hash),  | 
194  | 104k  |                                      common.capacity());  | 
195  | 104k  | }  | 
196  |  |  | 
197  |  | // Same as `find_first_non_full`, but returns the mask corresponding to the  | 
198  |  | // first group with a deleted or empty slot.  | 
199  |  | std::pair<FindInfo, Group::NonIterableBitMaskType> find_first_non_full_group(  | 
200  | 0  |     const CommonFields& common, size_t hash) { | 
201  | 0  |   auto seq = probe(common, hash);  | 
202  | 0  |   auto mask =  | 
203  | 0  |       probe_till_first_non_full_group(common.control(), seq, common.capacity());  | 
204  | 0  |   return {{seq.offset(), seq.index()}, mask}; | 
205  | 0  | }  | 
206  |  |  | 
207  |  | // Whether a table fits in half a group. A half-group table fits entirely into a  | 
208  |  | // probing group, i.e., has a capacity < `Group::kWidth`.  | 
209  |  | //  | 
210  |  | // In half-group mode we are able to use the whole capacity. The extra control  | 
211  |  | // bytes give us at least one "empty" control byte to stop the iteration.  | 
212  |  | // This is important to make 1 a valid capacity.  | 
213  |  | //  | 
214  |  | // In half-group mode only the first `capacity` control bytes after the sentinel  | 
215  |  | // are valid. The rest contain dummy ctrl_t::kEmpty values that do not  | 
216  |  | // represent a real slot.  | 
217  | 0  | constexpr bool is_half_group(size_t capacity) { | 
218  | 0  |   return capacity < Group::kWidth - 1;  | 
219  | 0  | }  | 
220  |  |  | 
221  |  | template <class Fn>  | 
222  | 0  | void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) { | 
223  | 0  |   const size_t cap = c.capacity();  | 
224  | 0  |   ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap));  | 
225  | 0  |   const ctrl_t* ctrl = c.control();  | 
226  | 0  |   void* slot = c.slot_array();  | 
227  | 0  |   if (is_half_group(cap)) { | 
228  |  |     // Mirrored/cloned control bytes in half-group table are also located in the  | 
229  |  |     // first group (starting from position 0). We are taking group from position  | 
230  |  |     // `capacity` in order to avoid duplicates.  | 
231  |  |  | 
232  |  |     // Half-group tables capacity fits into portable group, where  | 
233  |  |     // GroupPortableImpl::MaskFull is more efficient for the  | 
234  |  |     // capacity <= GroupPortableImpl::kWidth.  | 
235  | 0  |     ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&  | 
236  | 0  |                            "unexpectedly large half-group capacity");  | 
237  | 0  |     static_assert(Group::kWidth >= GroupPortableImpl::kWidth,  | 
238  | 0  |                   "unexpected group width");  | 
239  |  |     // Group starts from kSentinel slot, so indices in the mask will  | 
240  |  |     // be increased by 1.  | 
241  | 0  |     const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();  | 
242  | 0  |     --ctrl;  | 
243  | 0  |     slot = PrevSlot(slot, slot_size);  | 
244  | 0  |     for (uint32_t i : mask) { | 
245  | 0  |       cb(ctrl + i, SlotAddress(slot, i, slot_size));  | 
246  | 0  |     }  | 
247  | 0  |     return;  | 
248  | 0  |   }  | 
249  | 0  |   size_t remaining = c.size();  | 
250  | 0  |   ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;  | 
251  | 0  |   while (remaining != 0) { | 
252  | 0  |     for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) { | 
253  | 0  |       ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&  | 
254  | 0  |                              "hash table was modified unexpectedly");  | 
255  | 0  |       cb(ctrl + i, SlotAddress(slot, i, slot_size));  | 
256  | 0  |       --remaining;  | 
257  | 0  |     }  | 
258  | 0  |     ctrl += Group::kWidth;  | 
259  | 0  |     slot = NextSlot(slot, slot_size, Group::kWidth);  | 
260  | 0  |     ABSL_SWISSTABLE_ASSERT(  | 
261  | 0  |         (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&  | 
262  | 0  |         "hash table was modified unexpectedly");  | 
263  | 0  |   }  | 
264  |  |   // NOTE: erasure of the current element is allowed in callback for  | 
265  |  |   // absl::erase_if specialization. So we use `>=`.  | 
266  | 0  |   ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&  | 
267  | 0  |                          "hash table was modified unexpectedly");  | 
268  | 0  | } Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)> >(absl::container_internal::CommonFields const&, unsigned long, absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)>) Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0>(absl::container_internal::CommonFields const&, unsigned long, absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0)  | 
269  |  |  | 
270  |  | }  // namespace  | 
271  |  |  | 
272  | 0  | void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { | 
273  | 0  |   ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);  | 
274  | 0  |   ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));  | 
275  | 0  |   for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) { | 
276  | 0  |     Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); | 
277  | 0  |   }  | 
278  |  |   // Copy the cloned ctrl bytes.  | 
279  | 0  |   std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());  | 
280  | 0  |   ctrl[capacity] = ctrl_t::kSentinel;  | 
281  | 0  | }  | 
282  |  |  | 
283  |  | void IterateOverFullSlots(const CommonFields& c, size_t slot_size,  | 
284  | 0  |                           absl::FunctionRef<void(const ctrl_t*, void*)> cb) { | 
285  | 0  |   IterateOverFullSlotsImpl(c, slot_size, cb);  | 
286  | 0  | }  | 
287  |  |  | 
288  |  | namespace { | 
289  |  |  | 
290  | 406k  | void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) { | 
291  | 406k  |   growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);  | 
292  | 406k  | }  | 
293  |  |  | 
294  | 234k  | void ResetGrowthLeft(CommonFields& common) { | 
295  | 234k  |   ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());  | 
296  | 234k  | }  | 
297  |  |  | 
298  |  | // Finds guaranteed to exists empty slot from the given position.  | 
299  |  | // NOTE: this function is almost never triggered inside of the  | 
300  |  | // DropDeletesWithoutResize, so we keep it simple.  | 
301  |  | // The table is rather sparse, so empty slot will be found very quickly.  | 
302  | 0  | size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) { | 
303  | 0  |   for (size_t i = start; i < end; ++i) { | 
304  | 0  |     if (IsEmpty(ctrl[i])) { | 
305  | 0  |       return i;  | 
306  | 0  |     }  | 
307  | 0  |   }  | 
308  | 0  |   ABSL_UNREACHABLE();  | 
309  | 0  | }  | 
310  |  |  | 
311  |  | // Finds guaranteed to exist full slot starting from the given position.  | 
312  |  | // NOTE: this function is only triggered for rehash(0), when we need to  | 
313  |  | // go back to SOO state, so we keep it simple.  | 
314  | 0  | size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) { | 
315  | 0  |   for (size_t i = start; i < end; ++i) { | 
316  | 0  |     if (IsFull(ctrl[i])) { | 
317  | 0  |       return i;  | 
318  | 0  |     }  | 
319  | 0  |   }  | 
320  | 0  |   ABSL_UNREACHABLE();  | 
321  | 0  | }  | 
322  |  |  | 
323  | 9.50M  | void PrepareInsertCommon(CommonFields& common) { | 
324  | 9.50M  |   common.increment_size();  | 
325  | 9.50M  |   common.maybe_increment_generation_on_insert();  | 
326  | 9.50M  | }  | 
327  |  |  | 
328  |  | // Sets sanitizer poisoning for slot corresponding to control byte being set.  | 
329  |  | inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,  | 
330  | 9.50M  |                                 size_t slot_size) { | 
331  | 9.50M  |   ABSL_SWISSTABLE_ASSERT(i < c.capacity());  | 
332  | 9.50M  |   auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;  | 
333  | 9.50M  |   if (IsFull(h)) { | 
334  | 9.50M  |     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);  | 
335  | 9.50M  |   } else { | 
336  | 0  |     SanitizerPoisonMemoryRegion(slot_i, slot_size);  | 
337  | 0  |   }  | 
338  | 9.50M  | }  | 
339  |  |  | 
340  |  | // Sets `ctrl[i]` to `h`.  | 
341  |  | //  | 
342  |  | // Unlike setting it directly, this function will perform bounds checks and  | 
343  |  | // mirror the value to the cloned tail if necessary.  | 
344  |  | inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,  | 
345  | 9.27M  |                     size_t slot_size) { | 
346  | 9.27M  |   ABSL_SWISSTABLE_ASSERT(!c.is_small());  | 
347  | 9.27M  |   DoSanitizeOnSetCtrl(c, i, h, slot_size);  | 
348  | 9.27M  |   ctrl_t* ctrl = c.control();  | 
349  | 9.27M  |   ctrl[i] = h;  | 
350  | 9.27M  |   ctrl[((i - NumClonedBytes()) & c.capacity()) +  | 
351  | 9.27M  |        (NumClonedBytes() & c.capacity())] = h;  | 
352  | 9.27M  | }  | 
353  |  | // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.  | 
354  | 9.27M  | inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) { | 
355  | 9.27M  |   SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);  | 
356  | 9.27M  | }  | 
357  |  |  | 
358  |  | // Like SetCtrl, but in a single group table, we can save some operations when  | 
359  |  | // setting the cloned control byte.  | 
360  |  | inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,  | 
361  | 67.1k  |                                       size_t slot_size) { | 
362  | 67.1k  |   ABSL_SWISSTABLE_ASSERT(!c.is_small());  | 
363  | 67.1k  |   ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));  | 
364  | 67.1k  |   DoSanitizeOnSetCtrl(c, i, h, slot_size);  | 
365  | 67.1k  |   ctrl_t* ctrl = c.control();  | 
366  | 67.1k  |   ctrl[i] = h;  | 
367  | 67.1k  |   ctrl[i + c.capacity() + 1] = h;  | 
368  | 67.1k  | }  | 
369  |  | // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.  | 
370  |  | inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,  | 
371  | 67.1k  |                                       size_t slot_size) { | 
372  | 67.1k  |   SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);  | 
373  | 67.1k  | }  | 
374  |  |  | 
375  |  | // Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,  | 
376  |  | // we can save some operations when setting the cloned control byte.  | 
377  |  | inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,  | 
378  | 166k  |                                 size_t slot_size) { | 
379  | 166k  |   ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);  | 
380  | 166k  |   DoSanitizeOnSetCtrl(c, i, h, slot_size);  | 
381  | 166k  |   ctrl_t* ctrl = c.control();  | 
382  | 166k  |   ctrl[i] = h;  | 
383  | 166k  |   ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;  | 
384  | 166k  | }  | 
385  |  | // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.  | 
386  |  | inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,  | 
387  | 166k  |                                 size_t slot_size) { | 
388  | 166k  |   SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);  | 
389  | 166k  | }  | 
390  |  |  | 
391  |  | size_t DropDeletesWithoutResizeAndPrepareInsert(  | 
392  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
393  | 0  |     size_t new_hash) { | 
394  | 0  |   void* set = &common;  | 
395  | 0  |   void* slot_array = common.slot_array();  | 
396  | 0  |   const size_t capacity = common.capacity();  | 
397  | 0  |   ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));  | 
398  | 0  |   ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));  | 
399  |  |   // Algorithm:  | 
400  |  |   // - mark all DELETED slots as EMPTY  | 
401  |  |   // - mark all FULL slots as DELETED  | 
402  |  |   // - for each slot marked as DELETED  | 
403  |  |   //     hash = Hash(element)  | 
404  |  |   //     target = find_first_non_full(hash)  | 
405  |  |   //     if target is in the same group  | 
406  |  |   //       mark slot as FULL  | 
407  |  |   //     else if target is EMPTY  | 
408  |  |   //       transfer element to target  | 
409  |  |   //       mark slot as EMPTY  | 
410  |  |   //       mark target as FULL  | 
411  |  |   //     else if target is DELETED  | 
412  |  |   //       swap current element with target element  | 
413  |  |   //       mark target as FULL  | 
414  |  |   //       repeat procedure for current slot with moved from element (target)  | 
415  | 0  |   ctrl_t* ctrl = common.control();  | 
416  | 0  |   ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);  | 
417  | 0  |   const void* hash_fn = policy.hash_fn(common);  | 
418  | 0  |   auto hasher = policy.hash_slot;  | 
419  | 0  |   auto transfer_n = policy.transfer_n;  | 
420  | 0  |   const size_t slot_size = policy.slot_size;  | 
421  |  | 
  | 
422  | 0  |   size_t total_probe_length = 0;  | 
423  | 0  |   void* slot_ptr = SlotAddress(slot_array, 0, slot_size);  | 
424  |  |  | 
425  |  |   // The index of an empty slot that can be used as temporary memory for  | 
426  |  |   // the swap operation.  | 
427  | 0  |   constexpr size_t kUnknownId = ~size_t{}; | 
428  | 0  |   size_t tmp_space_id = kUnknownId;  | 
429  |  | 
  | 
430  | 0  |   for (size_t i = 0; i != capacity;  | 
431  | 0  |        ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) { | 
432  | 0  |     ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));  | 
433  | 0  |     if (IsEmpty(ctrl[i])) { | 
434  | 0  |       tmp_space_id = i;  | 
435  | 0  |       continue;  | 
436  | 0  |     }  | 
437  | 0  |     if (!IsDeleted(ctrl[i])) continue;  | 
438  | 0  |     const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed());  | 
439  | 0  |     const FindInfo target = find_first_non_full(common, hash);  | 
440  | 0  |     const size_t new_i = target.offset;  | 
441  | 0  |     total_probe_length += target.probe_length;  | 
442  |  |  | 
443  |  |     // Verify if the old and new i fall within the same group wrt the hash.  | 
444  |  |     // If they do, we don't need to move the object as it falls already in the  | 
445  |  |     // best probe we can.  | 
446  | 0  |     const size_t probe_offset = probe(common, hash).offset();  | 
447  | 0  |     const h2_t h2 = H2(hash);  | 
448  | 0  |     const auto probe_index = [probe_offset, capacity](size_t pos) { | 
449  | 0  |       return ((pos - probe_offset) & capacity) / Group::kWidth;  | 
450  | 0  |     };  | 
451  |  |  | 
452  |  |     // Element doesn't move.  | 
453  | 0  |     if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { | 
454  | 0  |       SetCtrlInLargeTable(common, i, h2, slot_size);  | 
455  | 0  |       continue;  | 
456  | 0  |     }  | 
457  |  |  | 
458  | 0  |     void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);  | 
459  | 0  |     if (IsEmpty(ctrl[new_i])) { | 
460  |  |       // Transfer element to the empty spot.  | 
461  |  |       // SetCtrl poisons/unpoisons the slots so we have to call it at the  | 
462  |  |       // right time.  | 
463  | 0  |       SetCtrlInLargeTable(common, new_i, h2, slot_size);  | 
464  | 0  |       (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);  | 
465  | 0  |       SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);  | 
466  |  |       // Initialize or change empty space id.  | 
467  | 0  |       tmp_space_id = i;  | 
468  | 0  |     } else { | 
469  | 0  |       ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));  | 
470  | 0  |       SetCtrlInLargeTable(common, new_i, h2, slot_size);  | 
471  |  |       // Until we are done rehashing, DELETED marks previously FULL slots.  | 
472  |  | 
  | 
473  | 0  |       if (tmp_space_id == kUnknownId) { | 
474  | 0  |         tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);  | 
475  | 0  |       }  | 
476  | 0  |       void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);  | 
477  | 0  |       SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);  | 
478  |  |  | 
479  |  |       // Swap i and new_i elements.  | 
480  | 0  |       (*transfer_n)(set, tmp_space, new_slot_ptr, 1);  | 
481  | 0  |       (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);  | 
482  | 0  |       (*transfer_n)(set, slot_ptr, tmp_space, 1);  | 
483  |  | 
  | 
484  | 0  |       SanitizerPoisonMemoryRegion(tmp_space, slot_size);  | 
485  |  |  | 
486  |  |       // repeat the processing of the ith slot  | 
487  | 0  |       --i;  | 
488  | 0  |       slot_ptr = PrevSlot(slot_ptr, slot_size);  | 
489  | 0  |     }  | 
490  | 0  |   }  | 
491  |  |   // Prepare insert for the new element.  | 
492  | 0  |   PrepareInsertCommon(common);  | 
493  | 0  |   ResetGrowthLeft(common);  | 
494  | 0  |   FindInfo find_info = find_first_non_full(common, new_hash);  | 
495  | 0  |   SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);  | 
496  | 0  |   common.infoz().RecordInsert(new_hash, find_info.probe_length);  | 
497  | 0  |   common.infoz().RecordRehash(total_probe_length);  | 
498  | 0  |   return find_info.offset;  | 
499  | 0  | }  | 
500  |  |  | 
501  | 0  | bool WasNeverFull(CommonFields& c, size_t index) { | 
502  | 0  |   if (is_single_group(c.capacity())) { | 
503  | 0  |     return true;  | 
504  | 0  |   }  | 
505  | 0  |   const size_t index_before = (index - Group::kWidth) & c.capacity();  | 
506  | 0  |   const auto empty_after = Group(c.control() + index).MaskEmpty();  | 
507  | 0  |   const auto empty_before = Group(c.control() + index_before).MaskEmpty();  | 
508  |  |  | 
509  |  |   // We count how many consecutive non empties we have to the right and to the  | 
510  |  |   // left of `it`. If the sum is >= kWidth then there is at least one probe  | 
511  |  |   // window that might have seen a full group.  | 
512  | 0  |   return empty_before && empty_after &&  | 
513  | 0  |          static_cast<size_t>(empty_after.TrailingZeros()) +  | 
514  | 0  |                  empty_before.LeadingZeros() <  | 
515  | 0  |              Group::kWidth;  | 
516  | 0  | }  | 
517  |  |  | 
518  |  | // Updates the control bytes to indicate a completely empty table such that all  | 
519  |  | // control bytes are kEmpty except for the kSentinel byte.  | 
520  | 234k  | void ResetCtrl(CommonFields& common, size_t slot_size) { | 
521  | 234k  |   const size_t capacity = common.capacity();  | 
522  | 234k  |   ctrl_t* ctrl = common.control();  | 
523  | 234k  |   static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;  | 
524  | 234k  |   if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) { | 
525  | 210k  |     if (IsSmallCapacity(capacity)) return;  | 
526  | 210k  |     std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);  | 
527  | 210k  |     std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),  | 
528  | 210k  |                 Group::kWidth);  | 
529  | 210k  |     if (capacity == kTwoGroupCapacity) { | 
530  | 86.4k  |       std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),  | 
531  | 86.4k  |                   Group::kWidth);  | 
532  | 86.4k  |     }  | 
533  | 210k  |   } else { | 
534  | 23.9k  |     std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),  | 
535  | 23.9k  |                 capacity + 1 + NumClonedBytes());  | 
536  | 23.9k  |   }  | 
537  | 234k  |   ctrl[capacity] = ctrl_t::kSentinel;  | 
538  | 234k  |   SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);  | 
539  | 234k  | }  | 
540  |  |  | 
541  |  | // Initializes control bytes for growing from capacity 1 to 3.  | 
542  |  | // `orig_h2` is placed in the position `SooSlotIndex()`.  | 
543  |  | // `new_h2` is placed in the position `new_offset`.  | 
544  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes(  | 
545  | 56.7k  |     h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) { | 
546  | 56.7k  |   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());  | 
547  | 56.7k  |   static_assert(kNewCapacity == 3);  | 
548  | 56.7k  |   static_assert(is_single_group(kNewCapacity));  | 
549  | 56.7k  |   static_assert(SooSlotIndex() == 1);  | 
550  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);  | 
551  |  |  | 
552  | 56.7k  |   static constexpr uint64_t kEmptyXorSentinel =  | 
553  | 56.7k  |       static_cast<uint8_t>(ctrl_t::kEmpty) ^  | 
554  | 56.7k  |       static_cast<uint8_t>(ctrl_t::kSentinel);  | 
555  | 56.7k  |   static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);  | 
556  | 56.7k  |   static constexpr size_t kMirroredSooSlotIndex =  | 
557  | 56.7k  |       SooSlotIndex() + kNewCapacity + 1;  | 
558  |  |   // The first 8 bytes, where SOO slot original and mirrored positions are  | 
559  |  |   // replaced with 0.  | 
560  |  |   // Result will look like: E0ESE0EE  | 
561  | 56.7k  |   static constexpr uint64_t kFirstCtrlBytesWithZeroes =  | 
562  | 56.7k  |       k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^  | 
563  | 56.7k  |       (kEmptyXorSentinel << (8 * kNewCapacity)) ^  | 
564  | 56.7k  |       (kEmpty64 << (8 * kMirroredSooSlotIndex));  | 
565  |  |  | 
566  | 56.7k  |   const uint64_t soo_h2 = static_cast<uint64_t>(orig_h2);  | 
567  | 56.7k  |   const uint64_t new_h2_xor_empty =  | 
568  | 56.7k  |       static_cast<uint64_t>(new_h2 ^ static_cast<uint8_t>(ctrl_t::kEmpty));  | 
569  |  |   // Fill the original and mirrored bytes for SOO slot.  | 
570  |  |   // Result will look like:  | 
571  |  |   // EHESEHEE  | 
572  |  |   // Where H = soo_h2, E = kEmpty, S = kSentinel.  | 
573  | 56.7k  |   uint64_t first_ctrl_bytes =  | 
574  | 56.7k  |       ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |  | 
575  | 56.7k  |       (soo_h2 << (8 * kMirroredSooSlotIndex));  | 
576  |  |   // Replace original and mirrored empty bytes for the new position.  | 
577  |  |   // Result for new_offset 0 will look like:  | 
578  |  |   // NHESNHEE  | 
579  |  |   // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.  | 
580  |  |   // Result for new_offset 2 will look like:  | 
581  |  |   // EHNSEHNE  | 
582  | 56.7k  |   first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));  | 
583  | 56.7k  |   size_t new_mirrored_offset = new_offset + kNewCapacity + 1;  | 
584  | 56.7k  |   first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));  | 
585  |  |  | 
586  |  |   // Fill last bytes with kEmpty.  | 
587  | 56.7k  |   std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),  | 
588  | 56.7k  |               Group::kWidth);  | 
589  |  |   // Overwrite the first 8 bytes with first_ctrl_bytes.  | 
590  | 56.7k  |   absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);  | 
591  |  |  | 
592  |  |   // Example for group size 16:  | 
593  |  |   // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE  | 
594  |  |   // new_offset 0:  | 
595  |  |   // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE  | 
596  |  |   // new_offset 2:  | 
597  |  |   // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE  | 
598  |  |  | 
599  |  |   // Example for group size 8:  | 
600  |  |   // new_ctrl after 1st memset =      ???EEEEEEEE  | 
601  |  |   // new_offset 0:  | 
602  |  |   // new_ctrl after 2nd store  =      NHESNHEEEEE  | 
603  |  |   // new_offset 2:  | 
604  |  |   // new_ctrl after 2nd store  =      EHNSEHNEEEE  | 
605  | 56.7k  | }  | 
606  |  |  | 
607  |  | }  // namespace  | 
608  |  |  | 
609  | 0  | void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) { | 
610  | 0  |   ABSL_SWISSTABLE_ASSERT(c.is_small());  | 
611  | 0  |   if (soo_enabled) { | 
612  | 0  |     c.set_empty_soo();  | 
613  | 0  |     return;  | 
614  | 0  |   }  | 
615  | 0  |   c.decrement_size();  | 
616  | 0  |   c.infoz().RecordErase();  | 
617  | 0  |   SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);  | 
618  | 0  | }  | 
619  |  |  | 
620  | 0  | void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) { | 
621  | 0  |   ABSL_SWISSTABLE_ASSERT(!c.is_small());  | 
622  | 0  |   ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");  | 
623  | 0  |   c.decrement_size();  | 
624  | 0  |   c.infoz().RecordErase();  | 
625  |  | 
  | 
626  | 0  |   size_t index = static_cast<size_t>(ctrl - c.control());  | 
627  |  | 
  | 
628  | 0  |   if (WasNeverFull(c, index)) { | 
629  | 0  |     SetCtrl(c, index, ctrl_t::kEmpty, slot_size);  | 
630  | 0  |     c.growth_info().OverwriteFullAsEmpty();  | 
631  | 0  |     return;  | 
632  | 0  |   }  | 
633  |  |  | 
634  | 0  |   c.growth_info().OverwriteFullAsDeleted();  | 
635  | 0  |   SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);  | 
636  | 0  | }  | 
637  |  |  | 
638  |  | void ClearBackingArray(CommonFields& c,  | 
639  |  |                        const PolicyFunctions& __restrict policy, void* alloc,  | 
640  | 251k  |                        bool reuse, bool soo_enabled) { | 
641  | 251k  |   if (reuse) { | 
642  | 234k  |     c.set_size_to_zero();  | 
643  | 234k  |     ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());  | 
644  | 234k  |     ResetCtrl(c, policy.slot_size);  | 
645  | 234k  |     ResetGrowthLeft(c);  | 
646  | 234k  |     c.infoz().RecordStorageChanged(0, c.capacity());  | 
647  | 234k  |   } else { | 
648  |  |     // We need to record infoz before calling dealloc, which will unregister  | 
649  |  |     // infoz.  | 
650  | 16.7k  |     c.infoz().RecordClearedReservation();  | 
651  | 16.7k  |     c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);  | 
652  | 16.7k  |     c.infoz().Unregister();  | 
653  | 16.7k  |     (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,  | 
654  | 16.7k  |                       policy.slot_align, c.has_infoz());  | 
655  | 16.7k  |     c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}}; | 
656  | 16.7k  |   }  | 
657  | 251k  | }  | 
658  |  |  | 
659  |  | namespace { | 
660  |  |  | 
661  |  | enum class ResizeNonSooMode { | 
662  |  |   kGuaranteedEmpty,  | 
663  |  |   kGuaranteedAllocated,  | 
664  |  | };  | 
665  |  |  | 
666  |  | // Iterates over full slots in old table, finds new positions for them and  | 
667  |  | // transfers the slots.  | 
668  |  | // This function is used for reserving or rehashing non-empty tables.  | 
669  |  | // This use case is rare so the function is type erased.  | 
670  |  | // Returns the total probe length.  | 
671  |  | size_t FindNewPositionsAndTransferSlots(  | 
672  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
673  | 0  |     ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) { | 
674  | 0  |   void* new_slots = common.slot_array();  | 
675  | 0  |   const void* hash_fn = policy.hash_fn(common);  | 
676  | 0  |   const size_t slot_size = policy.slot_size;  | 
677  | 0  |   const size_t seed = common.seed().seed();  | 
678  |  | 
  | 
679  | 0  |   const auto insert_slot = [&](void* slot) { | 
680  | 0  |     size_t hash = policy.hash_slot(hash_fn, slot, seed);  | 
681  | 0  |     FindInfo target;  | 
682  | 0  |     if (common.is_small()) { | 
683  | 0  |       target = FindInfo{0, 0}; | 
684  | 0  |     } else { | 
685  | 0  |       target = find_first_non_full(common, hash);  | 
686  | 0  |       SetCtrl(common, target.offset, H2(hash), slot_size);  | 
687  | 0  |     }  | 
688  | 0  |     policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),  | 
689  | 0  |                       slot, 1);  | 
690  | 0  |     return target.probe_length;  | 
691  | 0  |   };  | 
692  | 0  |   if (IsSmallCapacity(old_capacity)) { | 
693  | 0  |     if (common.size() == 1) insert_slot(old_slots);  | 
694  | 0  |     return 0;  | 
695  | 0  |   }  | 
696  | 0  |   size_t total_probe_length = 0;  | 
697  | 0  |   for (size_t i = 0; i < old_capacity; ++i) { | 
698  | 0  |     if (IsFull(old_ctrl[i])) { | 
699  | 0  |       total_probe_length += insert_slot(old_slots);  | 
700  | 0  |     }  | 
701  | 0  |     old_slots = NextSlot(old_slots, slot_size);  | 
702  | 0  |   }  | 
703  | 0  |   return total_probe_length;  | 
704  | 0  | }  | 
705  |  |  | 
706  |  | void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,  | 
707  |  |                              size_t hash, size_t total_probe_length,  | 
708  | 0  |                              size_t distance_from_desired) { | 
709  | 0  |   ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());  | 
710  | 0  |   infoz.RecordStorageChanged(common.size() - 1, common.capacity());  | 
711  | 0  |   infoz.RecordRehash(total_probe_length);  | 
712  | 0  |   infoz.RecordInsert(hash, distance_from_desired);  | 
713  | 0  |   common.set_has_infoz();  | 
714  |  |   // TODO(b/413062340): we could potentially store infoz in place of the  | 
715  |  |   // control pointer for the capacity 1 case.  | 
716  | 0  |   common.set_infoz(infoz);  | 
717  | 0  | }  | 
718  |  |  | 
719  |  | // Specialization to avoid passing two 0s from hot function.  | 
720  |  | ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(  | 
721  | 0  |     CommonFields& common, HashtablezInfoHandle infoz, size_t hash) { | 
722  | 0  |   ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,  | 
723  | 0  |                           /*distance_from_desired=*/0);  | 
724  | 0  | }  | 
725  |  |  | 
726  |  | ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,  | 
727  |  |                                                  HashtablezInfoHandle infoz,  | 
728  |  |                                                  size_t hash,  | 
729  |  |                                                  size_t total_probe_length,  | 
730  | 0  |                                                  size_t distance_from_desired) { | 
731  | 0  |   ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,  | 
732  | 0  |                           distance_from_desired);  | 
733  | 0  | }  | 
734  |  |  | 
735  |  | ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,  | 
736  |  |                                                  HashtablezInfoHandle infoz,  | 
737  | 0  |                                                  size_t total_probe_length) { | 
738  | 0  |   ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());  | 
739  | 0  |   infoz.RecordStorageChanged(common.size(), common.capacity());  | 
740  | 0  |   infoz.RecordRehash(total_probe_length);  | 
741  | 0  |   common.set_has_infoz();  | 
742  | 0  |   common.set_infoz(infoz);  | 
743  | 0  | }  | 
744  |  |  | 
745  |  | struct BackingArrayPtrs { | 
746  |  |   ctrl_t* ctrl;  | 
747  |  |   void* slots;  | 
748  |  | };  | 
749  |  |  | 
750  |  | BackingArrayPtrs AllocBackingArray(CommonFields& common,  | 
751  |  |                                    const PolicyFunctions& __restrict policy,  | 
752  |  |                                    size_t new_capacity, bool has_infoz,  | 
753  | 228k  |                                    void* alloc) { | 
754  | 228k  |   RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align,  | 
755  | 228k  |                           has_infoz);  | 
756  | 228k  |   char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));  | 
757  | 228k  |   const GenerationType old_generation = common.generation();  | 
758  | 228k  |   common.set_generation_ptr(  | 
759  | 228k  |       reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));  | 
760  | 228k  |   common.set_generation(NextGeneration(old_generation));  | 
761  |  |  | 
762  | 228k  |   return {reinterpret_cast<ctrl_t*>(mem + layout.control_offset()), | 
763  | 228k  |           mem + layout.slot_offset()};  | 
764  | 228k  | }  | 
765  |  |  | 
766  |  | template <ResizeNonSooMode kMode>  | 
767  |  | void ResizeNonSooImpl(CommonFields& common,  | 
768  |  |                       const PolicyFunctions& __restrict policy,  | 
769  | 0  |                       size_t new_capacity, HashtablezInfoHandle infoz) { | 
770  | 0  |   ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));  | 
771  | 0  |   ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());  | 
772  |  |  | 
773  | 0  |   [[maybe_unused]] const size_t old_capacity = common.capacity();  | 
774  | 0  |   [[maybe_unused]] ctrl_t* old_ctrl;  | 
775  | 0  |   [[maybe_unused]] void* old_slots;  | 
776  | 0  |   if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) { | 
777  | 0  |     old_ctrl = common.control();  | 
778  | 0  |     old_slots = common.slot_array();  | 
779  | 0  |   }  | 
780  |  | 
  | 
781  | 0  |   const size_t slot_size = policy.slot_size;  | 
782  | 0  |   [[maybe_unused]] const size_t slot_align = policy.slot_align;  | 
783  | 0  |   const bool has_infoz = infoz.IsSampled();  | 
784  | 0  |   void* alloc = policy.get_char_alloc(common);  | 
785  |  | 
  | 
786  | 0  |   common.set_capacity(new_capacity);  | 
787  | 0  |   const auto [new_ctrl, new_slots] =  | 
788  | 0  |       AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);  | 
789  | 0  |   common.set_control(new_ctrl);  | 
790  | 0  |   common.set_slots(new_slots);  | 
791  | 0  |   common.generate_new_seed(has_infoz);  | 
792  |  | 
  | 
793  | 0  |   size_t total_probe_length = 0;  | 
794  | 0  |   ResetCtrl(common, slot_size);  | 
795  | 0  |   ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||  | 
796  | 0  |                          old_capacity == policy.soo_capacity());  | 
797  | 0  |   ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||  | 
798  | 0  |                          old_capacity > 0);  | 
799  | 0  |   if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) { | 
800  | 0  |     total_probe_length = FindNewPositionsAndTransferSlots(  | 
801  | 0  |         common, policy, old_ctrl, old_slots, old_capacity);  | 
802  | 0  |     (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,  | 
803  | 0  |                       has_infoz);  | 
804  | 0  |     ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,  | 
805  | 0  |                     common.size());  | 
806  | 0  |   } else { | 
807  | 0  |     GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(  | 
808  | 0  |         CapacityToGrowth(new_capacity));  | 
809  | 0  |   }  | 
810  |  | 
  | 
811  | 0  |   if (ABSL_PREDICT_FALSE(has_infoz)) { | 
812  | 0  |     ReportResizeToInfoz(common, infoz, total_probe_length);  | 
813  | 0  |   }  | 
814  | 0  | } Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)0>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle) Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)1>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)  | 
815  |  |  | 
816  |  | void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,  | 
817  |  |                                       const PolicyFunctions& __restrict policy,  | 
818  | 0  |                                       size_t new_capacity, bool force_infoz) { | 
819  | 0  |   ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));  | 
820  | 0  |   ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());  | 
821  | 0  |   ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);  | 
822  | 0  |   ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());  | 
823  | 0  |   ABSL_SWISSTABLE_ASSERT(common.empty());  | 
824  | 0  |   const size_t slot_size = policy.slot_size;  | 
825  | 0  |   HashtablezInfoHandle infoz;  | 
826  | 0  |   const bool should_sample =  | 
827  | 0  |       policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());  | 
828  | 0  |   if (ABSL_PREDICT_FALSE(should_sample)) { | 
829  | 0  |     infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,  | 
830  | 0  |                             policy.soo_capacity());  | 
831  | 0  |   }  | 
832  | 0  |   ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,  | 
833  | 0  |                                                        new_capacity, infoz);  | 
834  | 0  | }  | 
835  |  |  | 
836  |  | // If the table was SOO, initializes new control bytes and transfers slot.  | 
837  |  | // After transferring the slot, sets control and slots in CommonFields.  | 
838  |  | // It is rare to resize an SOO table with one element to a large size.  | 
839  |  | // Requires: `c` contains SOO data.  | 
840  |  | void InsertOldSooSlotAndInitializeControlBytes(  | 
841  |  |     CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl,  | 
842  | 0  |     void* new_slots, bool has_infoz) { | 
843  | 0  |   ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());  | 
844  | 0  |   ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);  | 
845  | 0  |   size_t new_capacity = c.capacity();  | 
846  |  | 
  | 
847  | 0  |   c.generate_new_seed(has_infoz);  | 
848  |  | 
  | 
849  | 0  |   const size_t soo_slot_hash =  | 
850  | 0  |       policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed());  | 
851  | 0  |   size_t offset = probe(new_capacity, soo_slot_hash).offset();  | 
852  | 0  |   offset = offset == new_capacity ? 0 : offset;  | 
853  | 0  |   SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);  | 
854  | 0  |   void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);  | 
855  | 0  |   SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);  | 
856  | 0  |   policy.transfer_n(&c, target_slot, c.soo_data(), 1);  | 
857  | 0  |   c.set_control(new_ctrl);  | 
858  | 0  |   c.set_slots(new_slots);  | 
859  | 0  |   ResetCtrl(c, policy.slot_size);  | 
860  | 0  |   SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size);  | 
861  | 0  | }  | 
862  |  |  | 
863  |  | enum class ResizeFullSooTableSamplingMode { | 
864  |  |   kNoSampling,  | 
865  |  |   // Force sampling. If the table was still not sampled, do not resize.  | 
866  |  |   kForceSampleNoResizeIfUnsampled,  | 
867  |  | };  | 
868  |  |  | 
869  |  | void AssertSoo([[maybe_unused]] CommonFields& common,  | 
870  | 56.7k  |                [[maybe_unused]] const PolicyFunctions& policy) { | 
871  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);  | 
872  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());  | 
873  | 56.7k  | }  | 
874  |  | void AssertFullSoo([[maybe_unused]] CommonFields& common,  | 
875  | 0  |                    [[maybe_unused]] const PolicyFunctions& policy) { | 
876  | 0  |   AssertSoo(common, policy);  | 
877  | 0  |   ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());  | 
878  | 0  | }  | 
879  |  |  | 
880  |  | void ResizeFullSooTable(CommonFields& common,  | 
881  |  |                         const PolicyFunctions& __restrict policy,  | 
882  |  |                         size_t new_capacity,  | 
883  | 0  |                         ResizeFullSooTableSamplingMode sampling_mode) { | 
884  | 0  |   AssertFullSoo(common, policy);  | 
885  | 0  |   const size_t slot_size = policy.slot_size;  | 
886  | 0  |   void* alloc = policy.get_char_alloc(common);  | 
887  |  | 
  | 
888  | 0  |   HashtablezInfoHandle infoz;  | 
889  | 0  |   bool has_infoz = false;  | 
890  | 0  |   if (sampling_mode ==  | 
891  | 0  |       ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) { | 
892  | 0  |     if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) { | 
893  | 0  |       infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,  | 
894  | 0  |                               policy.soo_capacity());  | 
895  | 0  |     }  | 
896  |  | 
  | 
897  | 0  |     if (!infoz.IsSampled()) return;  | 
898  | 0  |     has_infoz = true;  | 
899  | 0  |   }  | 
900  |  |  | 
901  | 0  |   common.set_capacity(new_capacity);  | 
902  |  |  | 
903  |  |   // We do not set control and slots in CommonFields yet to avoid overriding  | 
904  |  |   // SOO data.  | 
905  | 0  |   const auto [new_ctrl, new_slots] =  | 
906  | 0  |       AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);  | 
907  |  | 
  | 
908  | 0  |   InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots,  | 
909  | 0  |                                             has_infoz);  | 
910  | 0  |   ResetGrowthLeft(common);  | 
911  | 0  |   if (has_infoz) { | 
912  | 0  |     common.set_has_infoz();  | 
913  | 0  |     common.set_infoz(infoz);  | 
914  | 0  |     infoz.RecordStorageChanged(common.size(), new_capacity);  | 
915  | 0  |   }  | 
916  | 0  | }  | 
917  |  |  | 
918  |  | void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,  | 
919  |  |                                             size_t old_capacity,  | 
920  |  |                                             ctrl_t* __restrict new_ctrl,  | 
921  | 67.1k  |                                             size_t new_capacity) { | 
922  | 67.1k  |   ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));  | 
923  | 67.1k  |   constexpr size_t kHalfWidth = Group::kWidth / 2;  | 
924  | 67.1k  |   ABSL_ASSUME(old_capacity < kHalfWidth);  | 
925  | 67.1k  |   ABSL_ASSUME(old_capacity > 0);  | 
926  | 67.1k  |   static_assert(Group::kWidth == 8 || Group::kWidth == 16,  | 
927  | 67.1k  |                 "Group size is not supported.");  | 
928  |  |  | 
929  |  |   // NOTE: operations are done with compile time known size = 8.  | 
930  |  |   // Compiler optimizes that into single ASM operation.  | 
931  |  |  | 
932  |  |   // Load the bytes from old_capacity. This contains  | 
933  |  |   // - the sentinel byte  | 
934  |  |   // - all the old control bytes  | 
935  |  |   // - the rest is filled with kEmpty bytes  | 
936  |  |   // Example:  | 
937  |  |   // old_ctrl =     012S012EEEEEEEEE...  | 
938  |  |   // copied_bytes = S012EEEE  | 
939  | 67.1k  |   uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);  | 
940  |  |  | 
941  |  |   // We change the sentinel byte to kEmpty before storing to both the start of  | 
942  |  |   // the new_ctrl, and past the end of the new_ctrl later for the new cloned  | 
943  |  |   // bytes. Note that this is faster than setting the sentinel byte to kEmpty  | 
944  |  |   // after the copy directly in new_ctrl because we are limited on store  | 
945  |  |   // bandwidth.  | 
946  | 67.1k  |   static constexpr uint64_t kEmptyXorSentinel =  | 
947  | 67.1k  |       static_cast<uint8_t>(ctrl_t::kEmpty) ^  | 
948  | 67.1k  |       static_cast<uint8_t>(ctrl_t::kSentinel);  | 
949  |  |  | 
950  |  |   // Replace the first byte kSentinel with kEmpty.  | 
951  |  |   // Resulting bytes will be shifted by one byte old control blocks.  | 
952  |  |   // Example:  | 
953  |  |   // old_ctrl = 012S012EEEEEEEEE...  | 
954  |  |   // before =   S012EEEE  | 
955  |  |   // after  =   E012EEEE  | 
956  | 67.1k  |   copied_bytes ^= kEmptyXorSentinel;  | 
957  |  |  | 
958  | 67.1k  |   if (Group::kWidth == 8) { | 
959  |  |     // With group size 8, we can grow with two write operations.  | 
960  | 0  |     ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&  | 
961  | 0  |                            "old_capacity is too large for group size 8");  | 
962  | 0  |     absl::little_endian::Store64(new_ctrl, copied_bytes);  | 
963  |  | 
  | 
964  | 0  |     static constexpr uint64_t kSentinal64 =  | 
965  | 0  |         static_cast<uint8_t>(ctrl_t::kSentinel);  | 
966  |  |  | 
967  |  |     // Prepend kSentinel byte to the beginning of copied_bytes.  | 
968  |  |     // We have maximum 3 non-empty bytes at the beginning of copied_bytes for  | 
969  |  |     // group size 8.  | 
970  |  |     // Example:  | 
971  |  |     // old_ctrl = 012S012EEEE  | 
972  |  |     // before =   E012EEEE  | 
973  |  |     // after  =   SE012EEE  | 
974  | 0  |     copied_bytes = (copied_bytes << 8) ^ kSentinal64;  | 
975  | 0  |     absl::little_endian::Store64(new_ctrl + new_capacity, copied_bytes);  | 
976  |  |     // Example for capacity 3:  | 
977  |  |     // old_ctrl = 012S012EEEE  | 
978  |  |     // After the first store:  | 
979  |  |     //           >!  | 
980  |  |     // new_ctrl = E012EEEE???????  | 
981  |  |     // After the second store:  | 
982  |  |     //                  >!  | 
983  |  |     // new_ctrl = E012EEESE012EEE  | 
984  | 0  |     return;  | 
985  | 0  |   }  | 
986  |  |  | 
987  | 67.1k  |   ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);  // NOLINT(misc-static-assert)  | 
988  |  |  | 
989  |  |   // Fill the second half of the main control bytes with kEmpty.  | 
990  |  |   // For small capacity that may write into mirrored control bytes.  | 
991  |  |   // It is fine as we will overwrite all the bytes later.  | 
992  | 67.1k  |   std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),  | 
993  | 67.1k  |               kHalfWidth);  | 
994  |  |   // Fill the second half of the mirrored control bytes with kEmpty.  | 
995  | 67.1k  |   std::memset(new_ctrl + new_capacity + kHalfWidth,  | 
996  | 67.1k  |               static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);  | 
997  |  |   // Copy the first half of the non-mirrored control bytes.  | 
998  | 67.1k  |   absl::little_endian::Store64(new_ctrl, copied_bytes);  | 
999  | 67.1k  |   new_ctrl[new_capacity] = ctrl_t::kSentinel;  | 
1000  |  |   // Copy the first half of the mirrored control bytes.  | 
1001  | 67.1k  |   absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);  | 
1002  |  |  | 
1003  |  |   // Example for growth capacity 1->3:  | 
1004  |  |   // old_ctrl =                  0S0EEEEEEEEEEEEEE  | 
1005  |  |   // new_ctrl at the end =       E0ESE0EEEEEEEEEEEEE  | 
1006  |  |   //                                    >!  | 
1007  |  |   // new_ctrl after 1st memset = ????????EEEEEEEE???  | 
1008  |  |   //                                       >!  | 
1009  |  |   // new_ctrl after 2nd memset = ????????EEEEEEEEEEE  | 
1010  |  |   //                            >!  | 
1011  |  |   // new_ctrl after 1st store =  E0EEEEEEEEEEEEEEEEE  | 
1012  |  |   // new_ctrl after kSentinel =  E0ESEEEEEEEEEEEEEEE  | 
1013  |  |   //                                >!  | 
1014  |  |   // new_ctrl after 2nd store =  E0ESE0EEEEEEEEEEEEE  | 
1015  |  |  | 
1016  |  |   // Example for growth capacity 3->7:  | 
1017  |  |   // old_ctrl =                  012S012EEEEEEEEEEEE  | 
1018  |  |   // new_ctrl at the end =       E012EEESE012EEEEEEEEEEE  | 
1019  |  |   //                                    >!  | 
1020  |  |   // new_ctrl after 1st memset = ????????EEEEEEEE???????  | 
1021  |  |   //                                           >!  | 
1022  |  |   // new_ctrl after 2nd memset = ????????EEEEEEEEEEEEEEE  | 
1023  |  |   //                            >!  | 
1024  |  |   // new_ctrl after 1st store =  E012EEEEEEEEEEEEEEEEEEE  | 
1025  |  |   // new_ctrl after kSentinel =  E012EEESEEEEEEEEEEEEEEE  | 
1026  |  |   //                                >!  | 
1027  |  |   // new_ctrl after 2nd store =  E012EEESE012EEEEEEEEEEE  | 
1028  |  |  | 
1029  |  |   // Example for growth capacity 7->15:  | 
1030  |  |   // old_ctrl =                  0123456S0123456EEEEEEEE  | 
1031  |  |   // new_ctrl at the end =       E0123456EEEEEEESE0123456EEEEEEE  | 
1032  |  |   //                                    >!  | 
1033  |  |   // new_ctrl after 1st memset = ????????EEEEEEEE???????????????  | 
1034  |  |   //                                                   >!  | 
1035  |  |   // new_ctrl after 2nd memset = ????????EEEEEEEE???????EEEEEEEE  | 
1036  |  |   //                            >!  | 
1037  |  |   // new_ctrl after 1st store =  E0123456EEEEEEEE???????EEEEEEEE  | 
1038  |  |   // new_ctrl after kSentinel =  E0123456EEEEEEES???????EEEEEEEE  | 
1039  |  |   //                                            >!  | 
1040  |  |   // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE  | 
1041  | 67.1k  | }  | 
1042  |  |  | 
1043  |  | // Size of the buffer we allocate on stack for storing probed elements in  | 
1044  |  | // GrowToNextCapacity algorithm.  | 
1045  |  | constexpr size_t kProbedElementsBufferSize = 512;  | 
1046  |  |  | 
1047  |  | // Decodes information about probed elements from contiguous memory.  | 
1048  |  | // Finds new position for each element and transfers it to the new slots.  | 
1049  |  | // Returns the total probe length.  | 
1050  |  | template <typename ProbedItem>  | 
1051  |  | ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(  | 
1052  |  |     CommonFields& c, const PolicyFunctions& __restrict policy,  | 
1053  | 27.2k  |     const ProbedItem* start, const ProbedItem* end, void* old_slots) { | 
1054  | 27.2k  |   const size_t new_capacity = c.capacity();  | 
1055  |  |  | 
1056  | 27.2k  |   void* new_slots = c.slot_array();  | 
1057  | 27.2k  |   ctrl_t* new_ctrl = c.control();  | 
1058  | 27.2k  |   size_t total_probe_length = 0;  | 
1059  |  |  | 
1060  | 27.2k  |   const size_t slot_size = policy.slot_size;  | 
1061  | 27.2k  |   auto transfer_n = policy.transfer_n;  | 
1062  |  |  | 
1063  | 89.6k  |   for (; start < end; ++start) { | 
1064  | 62.3k  |     const FindInfo target = find_first_non_full_from_h1(  | 
1065  | 62.3k  |         new_ctrl, static_cast<size_t>(start->h1), new_capacity);  | 
1066  | 62.3k  |     total_probe_length += target.probe_length;  | 
1067  | 62.3k  |     const size_t old_index = static_cast<size_t>(start->source_offset);  | 
1068  | 62.3k  |     const size_t new_i = target.offset;  | 
1069  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);  | 
1070  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);  | 
1071  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));  | 
1072  | 62.3k  |     void* src_slot = SlotAddress(old_slots, old_index, slot_size);  | 
1073  | 62.3k  |     void* dst_slot = SlotAddress(new_slots, new_i, slot_size);  | 
1074  | 62.3k  |     SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);  | 
1075  | 62.3k  |     transfer_n(&c, dst_slot, src_slot, 1);  | 
1076  | 62.3k  |     SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);  | 
1077  | 62.3k  |   }  | 
1078  | 27.2k  |   return total_probe_length;  | 
1079  | 27.2k  | } raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned int, 32ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, void*) Line  | Count  | Source  |  1053  | 27.2k  |     const ProbedItem* start, const ProbedItem* end, void* old_slots) { |  1054  | 27.2k  |   const size_t new_capacity = c.capacity();  |  1055  |  |  |  1056  | 27.2k  |   void* new_slots = c.slot_array();  |  1057  | 27.2k  |   ctrl_t* new_ctrl = c.control();  |  1058  | 27.2k  |   size_t total_probe_length = 0;  |  1059  |  |  |  1060  | 27.2k  |   const size_t slot_size = policy.slot_size;  |  1061  | 27.2k  |   auto transfer_n = policy.transfer_n;  |  1062  |  |  |  1063  | 89.6k  |   for (; start < end; ++start) { |  1064  | 62.3k  |     const FindInfo target = find_first_non_full_from_h1(  |  1065  | 62.3k  |         new_ctrl, static_cast<size_t>(start->h1), new_capacity);  |  1066  | 62.3k  |     total_probe_length += target.probe_length;  |  1067  | 62.3k  |     const size_t old_index = static_cast<size_t>(start->source_offset);  |  1068  | 62.3k  |     const size_t new_i = target.offset;  |  1069  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);  |  1070  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);  |  1071  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));  |  1072  | 62.3k  |     void* src_slot = SlotAddress(old_slots, old_index, slot_size);  |  1073  | 62.3k  |     void* dst_slot = SlotAddress(new_slots, new_i, slot_size);  |  1074  | 62.3k  |     SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);  |  1075  | 62.3k  |     transfer_n(&c, dst_slot, src_slot, 1);  |  1076  | 62.3k  |     SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);  |  1077  | 62.3k  |   }  |  1078  | 27.2k  |   return total_probe_length;  |  1079  | 27.2k  | }  |  
 Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 64ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, void*) Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 122ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, void*)  | 
1080  |  |  | 
1081  |  | // Sentinel value for the start of marked elements.  | 
1082  |  | // Signals that there are no marked elements.  | 
1083  |  | constexpr size_t kNoMarkedElementsSentinel = ~size_t{}; | 
1084  |  |  | 
1085  |  | // Process probed elements that did not fit into available buffers.  | 
1086  |  | // We marked them in control bytes as kSentinel.  | 
1087  |  | // Hash recomputation and full probing is done here.  | 
1088  |  | // This use case should be extremely rare.  | 
1089  |  | ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(  | 
1090  |  |     CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,  | 
1091  | 0  |     void* old_slots, size_t start) { | 
1092  | 0  |   size_t old_capacity = PreviousCapacity(c.capacity());  | 
1093  | 0  |   const size_t slot_size = policy.slot_size;  | 
1094  | 0  |   void* new_slots = c.slot_array();  | 
1095  | 0  |   size_t total_probe_length = 0;  | 
1096  | 0  |   const void* hash_fn = policy.hash_fn(c);  | 
1097  | 0  |   auto hash_slot = policy.hash_slot;  | 
1098  | 0  |   auto transfer_n = policy.transfer_n;  | 
1099  | 0  |   const size_t seed = c.seed().seed();  | 
1100  | 0  |   for (size_t old_index = start; old_index < old_capacity; ++old_index) { | 
1101  | 0  |     if (old_ctrl[old_index] != ctrl_t::kSentinel) { | 
1102  | 0  |       continue;  | 
1103  | 0  |     }  | 
1104  | 0  |     void* src_slot = SlotAddress(old_slots, old_index, slot_size);  | 
1105  | 0  |     const size_t hash = hash_slot(hash_fn, src_slot, seed);  | 
1106  | 0  |     const FindInfo target = find_first_non_full(c, hash);  | 
1107  | 0  |     total_probe_length += target.probe_length;  | 
1108  | 0  |     const size_t new_i = target.offset;  | 
1109  | 0  |     void* dst_slot = SlotAddress(new_slots, new_i, slot_size);  | 
1110  | 0  |     SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);  | 
1111  | 0  |     transfer_n(&c, dst_slot, src_slot, 1);  | 
1112  | 0  |   }  | 
1113  | 0  |   return total_probe_length;  | 
1114  | 0  | }  | 
1115  |  |  | 
1116  |  | // The largest old capacity for which it is guaranteed that all probed elements  | 
1117  |  | // fit in ProbedItemEncoder's local buffer.  | 
1118  |  | // For such tables, `encode_probed_element` is trivial.  | 
1119  |  | constexpr size_t kMaxLocalBufferOldCapacity =  | 
1120  |  |     kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;  | 
1121  |  | static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));  | 
1122  |  | constexpr size_t kMaxLocalBufferNewCapacity =  | 
1123  |  |     NextCapacity(kMaxLocalBufferOldCapacity);  | 
1124  |  | static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);  | 
1125  |  | static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=  | 
1126  |  |               ProbedItem4Bytes::kMaxNewCapacity);  | 
1127  |  |  | 
1128  |  | // Initializes mirrored control bytes after  | 
1129  |  | // transfer_unprobed_elements_to_next_capacity.  | 
1130  | 104k  | void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) { | 
1131  | 104k  |   std::memcpy(new_ctrl + new_capacity,  | 
1132  |  |               // We own GrowthInfo just before control bytes. So it is ok  | 
1133  |  |               // to read one byte from it.  | 
1134  | 104k  |               new_ctrl - 1, Group::kWidth);  | 
1135  | 104k  |   new_ctrl[new_capacity] = ctrl_t::kSentinel;  | 
1136  | 104k  | }  | 
1137  |  |  | 
1138  |  | // Encodes probed elements into available memory.  | 
1139  |  | // At first, a local (on stack) buffer is used. The size of the buffer is  | 
1140  |  | // kProbedElementsBufferSize bytes.  | 
1141  |  | // When the local buffer is full, we switch to `control_` buffer. We are allowed  | 
1142  |  | // to overwrite `control_` buffer till the `source_offset` byte. In case we have  | 
1143  |  | // no space in `control_` buffer, we fallback to a naive algorithm for all the  | 
1144  |  | // rest of the probed elements. We mark elements as kSentinel in control bytes  | 
1145  |  | // and later process them fully. See ProcessMarkedElements for details. It  | 
1146  |  | // should be extremely rare.  | 
1147  |  | template <typename ProbedItemType,  | 
1148  |  |           // If true, we only use the local buffer and never switch to the  | 
1149  |  |           // control buffer.  | 
1150  |  |           bool kGuaranteedFitToBuffer = false>  | 
1151  |  | class ProbedItemEncoder { | 
1152  |  |  public:  | 
1153  |  |   using ProbedItem = ProbedItemType;  | 
1154  | 104k  |   explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProbedItemEncoder(absl::container_internal::ctrl_t*) Line  | Count  | Source  |  1154  | 88.0k  |   explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {} |  
 raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*) Line  | Count  | Source  |  1154  | 16.3k  |   explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {} |  
 Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)  | 
1155  |  |  | 
1156  |  |   // Encode item into the best available location.  | 
1157  | 62.3k  |   void EncodeItem(ProbedItem item) { | 
1158  | 62.3k  |     if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) { | 
1159  | 0  |       return ProcessEncodeWithOverflow(item);  | 
1160  | 0  |     }  | 
1161  | 62.3k  |     ABSL_SWISSTABLE_ASSERT(pos_ < end_);  | 
1162  | 62.3k  |     *pos_ = item;  | 
1163  | 62.3k  |     ++pos_;  | 
1164  | 62.3k  |   } raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>) Line  | Count  | Source  |  1157  | 40.4k  |   void EncodeItem(ProbedItem item) { |  1158  | 40.4k  |     if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) { |  1159  | 0  |       return ProcessEncodeWithOverflow(item);  |  1160  | 0  |     }  |  1161  | 40.4k  |     ABSL_SWISSTABLE_ASSERT(pos_ < end_);  |  1162  | 40.4k  |     *pos_ = item;  |  1163  | 40.4k  |     ++pos_;  |  1164  | 40.4k  |   }  |  
 raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>) Line  | Count  | Source  |  1157  | 21.9k  |   void EncodeItem(ProbedItem item) { |  1158  | 21.9k  |     if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) { |  1159  | 0  |       return ProcessEncodeWithOverflow(item);  |  1160  | 0  |     }  |  1161  | 21.9k  |     ABSL_SWISSTABLE_ASSERT(pos_ < end_);  |  1162  | 21.9k  |     *pos_ = item;  |  1163  | 21.9k  |     ++pos_;  |  1164  | 21.9k  |   }  |  
 Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)  | 
1165  |  |  | 
1166  |  |   // Decodes information about probed elements from all available sources.  | 
1167  |  |   // Finds new position for each element and transfers it to the new slots.  | 
1168  |  |   // Returns the total probe length.  | 
1169  |  |   size_t DecodeAndInsertToTable(CommonFields& common,  | 
1170  |  |                                 const PolicyFunctions& __restrict policy,  | 
1171  | 104k  |                                 void* old_slots) const { | 
1172  | 104k  |     if (pos_ == buffer_) { | 
1173  | 77.1k  |       return 0;  | 
1174  | 77.1k  |     }  | 
1175  | 27.2k  |     if constexpr (kGuaranteedFitToBuffer) { | 
1176  | 20.6k  |       return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);  | 
1177  | 20.6k  |     }  | 
1178  | 0  |     size_t total_probe_length = DecodeAndInsertImpl(  | 
1179  | 27.2k  |         common, policy, buffer_,  | 
1180  | 27.2k  |         local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);  | 
1181  | 27.2k  |     if (!local_buffer_full_) { | 
1182  | 6.63k  |       return total_probe_length;  | 
1183  | 6.63k  |     }  | 
1184  | 20.6k  |     total_probe_length +=  | 
1185  | 20.6k  |         DecodeAndInsertToTableOverflow(common, policy, old_slots);  | 
1186  | 20.6k  |     return total_probe_length;  | 
1187  | 27.2k  |   } raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Line  | Count  | Source  |  1171  | 88.0k  |                                 void* old_slots) const { |  1172  | 88.0k  |     if (pos_ == buffer_) { |  1173  | 67.3k  |       return 0;  |  1174  | 67.3k  |     }  |  1175  | 20.6k  |     if constexpr (kGuaranteedFitToBuffer) { |  1176  | 20.6k  |       return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);  |  1177  | 20.6k  |     }  |  1178  | 0  |     size_t total_probe_length = DecodeAndInsertImpl(  |  1179  | 20.6k  |         common, policy, buffer_,  |  1180  | 20.6k  |         local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);  |  1181  | 20.6k  |     if (!local_buffer_full_) { |  1182  | 0  |       return total_probe_length;  |  1183  | 0  |     }  |  1184  | 20.6k  |     total_probe_length +=  |  1185  | 20.6k  |         DecodeAndInsertToTableOverflow(common, policy, old_slots);  |  1186  | 20.6k  |     return total_probe_length;  |  1187  | 20.6k  |   }  |  
 raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Line  | Count  | Source  |  1171  | 16.3k  |                                 void* old_slots) const { |  1172  | 16.3k  |     if (pos_ == buffer_) { |  1173  | 9.74k  |       return 0;  |  1174  | 9.74k  |     }  |  1175  |  |     if constexpr (kGuaranteedFitToBuffer) { |  1176  |  |       return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);  |  1177  |  |     }  |  1178  | 6.63k  |     size_t total_probe_length = DecodeAndInsertImpl(  |  1179  | 6.63k  |         common, policy, buffer_,  |  1180  | 6.63k  |         local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);  |  1181  | 6.63k  |     if (!local_buffer_full_) { |  1182  | 6.63k  |       return total_probe_length;  |  1183  | 6.63k  |     }  |  1184  | 0  |     total_probe_length +=  |  1185  | 0  |         DecodeAndInsertToTableOverflow(common, policy, old_slots);  |  1186  | 0  |     return total_probe_length;  |  1187  | 6.63k  |   }  |  
 Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const  | 
1188  |  |  | 
1189  |  |  private:  | 
1190  | 0  |   static ProbedItem* AlignToNextItem(void* ptr) { | 
1191  | 0  |     return reinterpret_cast<ProbedItem*>(AlignUpTo(  | 
1192  | 0  |         reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));  | 
1193  | 0  |   } Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::AlignToNextItem(void*) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::AlignToNextItem(void*) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::AlignToNextItem(void*) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::AlignToNextItem(void*)  | 
1194  |  |  | 
1195  | 0  |   ProbedItem* OverflowBufferStart() const { | 
1196  |  |     // We reuse GrowthInfo memory as well.  | 
1197  | 0  |     return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));  | 
1198  | 0  |   } Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::OverflowBufferStart() const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::OverflowBufferStart() const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::OverflowBufferStart() const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::OverflowBufferStart() const  | 
1199  |  |  | 
1200  |  |   // Encodes item when previously allocated buffer is full.  | 
1201  |  |   // At first that happens when local buffer is full.  | 
1202  |  |   // We switch from the local buffer to the control buffer.  | 
1203  |  |   // Every time this function is called, the available buffer is extended till  | 
1204  |  |   // `item.source_offset` byte in the control buffer.  | 
1205  |  |   // After the buffer is extended, this function wouldn't be called till the  | 
1206  |  |   // buffer is exhausted.  | 
1207  |  |   //  | 
1208  |  |   // If there's no space in the control buffer, we fallback to naive algorithm  | 
1209  |  |   // and mark probed elements as kSentinel in the control buffer. In this case,  | 
1210  |  |   // we will call this function for every subsequent probed element.  | 
1211  | 0  |   ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) { | 
1212  | 0  |     if (!local_buffer_full_) { | 
1213  | 0  |       local_buffer_full_ = true;  | 
1214  | 0  |       pos_ = OverflowBufferStart();  | 
1215  | 0  |     }  | 
1216  | 0  |     const size_t source_offset = static_cast<size_t>(item.source_offset);  | 
1217  |  |     // We are in fallback mode so we can't reuse control buffer anymore.  | 
1218  |  |     // Probed elements are marked as kSentinel in the control buffer.  | 
1219  | 0  |     if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=  | 
1220  | 0  |                            kNoMarkedElementsSentinel)) { | 
1221  | 0  |       control_[source_offset] = ctrl_t::kSentinel;  | 
1222  | 0  |       return;  | 
1223  | 0  |     }  | 
1224  |  |     // Refresh the end pointer to the new available position.  | 
1225  |  |     // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes  | 
1226  |  |     // to write.  | 
1227  | 0  |     end_ = control_ + source_offset + 1 - sizeof(ProbedItem);  | 
1228  | 0  |     if (ABSL_PREDICT_TRUE(pos_ < end_)) { | 
1229  | 0  |       *pos_ = item;  | 
1230  | 0  |       ++pos_;  | 
1231  | 0  |       return;  | 
1232  | 0  |     }  | 
1233  | 0  |     control_[source_offset] = ctrl_t::kSentinel;  | 
1234  | 0  |     marked_elements_starting_position_ = source_offset;  | 
1235  |  |     // Now we will always fall down to `ProcessEncodeWithOverflow`.  | 
1236  | 0  |     ABSL_SWISSTABLE_ASSERT(pos_ >= end_);  | 
1237  | 0  |   } Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)  | 
1238  |  |  | 
1239  |  |   // Decodes information about probed elements from control buffer and processes  | 
1240  |  |   // marked elements.  | 
1241  |  |   // Finds new position for each element and transfers it to the new slots.  | 
1242  |  |   // Returns the total probe length.  | 
1243  |  |   ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(  | 
1244  |  |       CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1245  | 0  |       void* old_slots) const { | 
1246  | 0  |     ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&  | 
1247  | 0  |                            "must not be called when local buffer is not full");  | 
1248  | 0  |     size_t total_probe_length = DecodeAndInsertImpl(  | 
1249  | 0  |         common, policy, OverflowBufferStart(), pos_, old_slots);  | 
1250  | 0  |     if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==  | 
1251  | 0  |                           kNoMarkedElementsSentinel)) { | 
1252  | 0  |       return total_probe_length;  | 
1253  | 0  |     }  | 
1254  | 0  |     total_probe_length +=  | 
1255  | 0  |         ProcessProbedMarkedElements(common, policy, control_, old_slots,  | 
1256  | 0  |                                     marked_elements_starting_position_);  | 
1257  | 0  |     return total_probe_length;  | 
1258  | 0  |   } Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const  | 
1259  |  |  | 
1260  |  |   static constexpr size_t kBufferSize =  | 
1261  |  |       kProbedElementsBufferSize / sizeof(ProbedItem);  | 
1262  |  |   ProbedItem buffer_[kBufferSize];  | 
1263  |  |   // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,  | 
1264  |  |   // otherwise, they're in the overflow buffer.  | 
1265  |  |   ProbedItem* pos_ = buffer_;  | 
1266  |  |   const void* end_ = buffer_ + kBufferSize;  | 
1267  |  |   ctrl_t* const control_;  | 
1268  |  |   size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;  | 
1269  |  |   bool local_buffer_full_ = false;  | 
1270  |  | };  | 
1271  |  |  | 
1272  |  | // Grows to next capacity with specified encoder type.  | 
1273  |  | // Encoder is used to store probed elements that are processed later.  | 
1274  |  | // Different encoder is used depending on the capacity of the table.  | 
1275  |  | // Returns total probe length.  | 
1276  |  | template <typename Encoder>  | 
1277  |  | size_t GrowToNextCapacity(CommonFields& common,  | 
1278  |  |                           const PolicyFunctions& __restrict policy,  | 
1279  | 104k  |                           ctrl_t* old_ctrl, void* old_slots) { | 
1280  | 104k  |   using ProbedItem = typename Encoder::ProbedItem;  | 
1281  | 104k  |   ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);  | 
1282  | 104k  |   Encoder encoder(old_ctrl);  | 
1283  | 104k  |   policy.transfer_unprobed_elements_to_next_capacity(  | 
1284  | 104k  |       common, old_ctrl, old_slots, &encoder,  | 
1285  | 104k  |       [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) { | 
1286  | 62.3k  |         auto encoder_ptr = static_cast<Encoder*>(probed_storage);  | 
1287  | 62.3k  |         encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));  | 
1288  | 62.3k  |       }); raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) constLine  | Count  | Source  |  1285  | 40.4k  |       [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) { |  1286  | 40.4k  |         auto encoder_ptr = static_cast<Encoder*>(probed_storage);  |  1287  | 40.4k  |         encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));  |  1288  | 40.4k  |       });  |  
 raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) constLine  | Count  | Source  |  1285  | 21.9k  |       [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) { |  1286  | 21.9k  |         auto encoder_ptr = static_cast<Encoder*>(probed_storage);  |  1287  | 21.9k  |         encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));  |  1288  | 21.9k  |       });  |  
 Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) constUnexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const | 
1289  | 104k  |   InitializeMirroredControlBytes(common.control(), common.capacity());  | 
1290  | 104k  |   return encoder.DecodeAndInsertToTable(common, policy, old_slots);  | 
1291  | 104k  | } raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*) Line  | Count  | Source  |  1279  | 88.0k  |                           ctrl_t* old_ctrl, void* old_slots) { |  1280  | 88.0k  |   using ProbedItem = typename Encoder::ProbedItem;  |  1281  | 88.0k  |   ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);  |  1282  | 88.0k  |   Encoder encoder(old_ctrl);  |  1283  | 88.0k  |   policy.transfer_unprobed_elements_to_next_capacity(  |  1284  | 88.0k  |       common, old_ctrl, old_slots, &encoder,  |  1285  | 88.0k  |       [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) { |  1286  | 88.0k  |         auto encoder_ptr = static_cast<Encoder*>(probed_storage);  |  1287  | 88.0k  |         encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));  |  1288  | 88.0k  |       });  |  1289  | 88.0k  |   InitializeMirroredControlBytes(common.control(), common.capacity());  |  1290  | 88.0k  |   return encoder.DecodeAndInsertToTable(common, policy, old_slots);  |  1291  | 88.0k  | }  |  
 raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*) Line  | Count  | Source  |  1279  | 16.3k  |                           ctrl_t* old_ctrl, void* old_slots) { |  1280  | 16.3k  |   using ProbedItem = typename Encoder::ProbedItem;  |  1281  | 16.3k  |   ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);  |  1282  | 16.3k  |   Encoder encoder(old_ctrl);  |  1283  | 16.3k  |   policy.transfer_unprobed_elements_to_next_capacity(  |  1284  | 16.3k  |       common, old_ctrl, old_slots, &encoder,  |  1285  | 16.3k  |       [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) { |  1286  | 16.3k  |         auto encoder_ptr = static_cast<Encoder*>(probed_storage);  |  1287  | 16.3k  |         encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));  |  1288  | 16.3k  |       });  |  1289  | 16.3k  |   InitializeMirroredControlBytes(common.control(), common.capacity());  |  1290  | 16.3k  |   return encoder.DecodeAndInsertToTable(common, policy, old_slots);  |  1291  | 16.3k  | }  |  
 Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*) Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)  | 
1292  |  |  | 
1293  |  | // Grows to next capacity for relatively small tables so that even if all  | 
1294  |  | // elements are probed, we don't need to overflow the local buffer.  | 
1295  |  | // Returns total probe length.  | 
1296  |  | size_t GrowToNextCapacityThatFitsInLocalBuffer(  | 
1297  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1298  | 88.0k  |     ctrl_t* old_ctrl, void* old_slots) { | 
1299  | 88.0k  |   ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);  | 
1300  | 88.0k  |   return GrowToNextCapacity<  | 
1301  | 88.0k  |       ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(  | 
1302  | 88.0k  |       common, policy, old_ctrl, old_slots);  | 
1303  | 88.0k  | }  | 
1304  |  |  | 
1305  |  | // Grows to next capacity with different encodings. Returns total probe length.  | 
1306  |  | // These functions are useful to simplify profile analysis.  | 
1307  |  | size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,  | 
1308  |  |                                        const PolicyFunctions& __restrict policy,  | 
1309  | 16.3k  |                                        ctrl_t* old_ctrl, void* old_slots) { | 
1310  | 16.3k  |   return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(  | 
1311  | 16.3k  |       common, policy, old_ctrl, old_slots);  | 
1312  | 16.3k  | }  | 
1313  |  | size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,  | 
1314  |  |                                        const PolicyFunctions& __restrict policy,  | 
1315  | 0  |                                        ctrl_t* old_ctrl, void* old_slots) { | 
1316  | 0  |   return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(  | 
1317  | 0  |       common, policy, old_ctrl, old_slots);  | 
1318  | 0  | }  | 
1319  |  | size_t GrowToNextCapacity16BytesEncoder(  | 
1320  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1321  | 0  |     ctrl_t* old_ctrl, void* old_slots) { | 
1322  | 0  |   return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(  | 
1323  | 0  |       common, policy, old_ctrl, old_slots);  | 
1324  | 0  | }  | 
1325  |  |  | 
1326  |  | // Grows to next capacity for tables with relatively large capacity so that we  | 
1327  |  | // can't guarantee that all probed elements fit in the local buffer. Returns  | 
1328  |  | // total probe length.  | 
1329  |  | size_t GrowToNextCapacityOverflowLocalBuffer(  | 
1330  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1331  | 16.3k  |     ctrl_t* old_ctrl, void* old_slots) { | 
1332  | 16.3k  |   const size_t new_capacity = common.capacity();  | 
1333  | 16.3k  |   if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) { | 
1334  | 16.3k  |     return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);  | 
1335  | 16.3k  |   }  | 
1336  | 0  |   if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) { | 
1337  | 0  |     return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);  | 
1338  | 0  |   }  | 
1339  |  |   // 16 bytes encoding supports the maximum swisstable capacity.  | 
1340  | 0  |   return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);  | 
1341  | 0  | }  | 
1342  |  |  | 
1343  |  | // Dispatches to the appropriate `GrowToNextCapacity*` function based on the  | 
1344  |  | // capacity of the table. Returns total probe length.  | 
1345  |  | ABSL_ATTRIBUTE_NOINLINE  | 
1346  |  | size_t GrowToNextCapacityDispatch(CommonFields& common,  | 
1347  |  |                                   const PolicyFunctions& __restrict policy,  | 
1348  | 104k  |                                   ctrl_t* old_ctrl, void* old_slots) { | 
1349  | 104k  |   const size_t new_capacity = common.capacity();  | 
1350  | 104k  |   if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) { | 
1351  | 88.0k  |     return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,  | 
1352  | 88.0k  |                                                    old_slots);  | 
1353  | 88.0k  |   } else { | 
1354  | 16.3k  |     return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,  | 
1355  | 16.3k  |                                                  old_slots);  | 
1356  | 16.3k  |   }  | 
1357  | 104k  | }  | 
1358  |  |  | 
1359  |  | void IncrementSmallSizeNonSoo(CommonFields& common,  | 
1360  | 0  |                               const PolicyFunctions& __restrict policy) { | 
1361  | 0  |   ABSL_SWISSTABLE_ASSERT(common.is_small());  | 
1362  | 0  |   common.increment_size();  | 
1363  | 0  |   SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);  | 
1364  | 0  | }  | 
1365  |  |  | 
1366  |  | void IncrementSmallSize(CommonFields& common,  | 
1367  | 0  |                         const PolicyFunctions& __restrict policy) { | 
1368  | 0  |   ABSL_SWISSTABLE_ASSERT(common.is_small());  | 
1369  | 0  |   if (policy.soo_enabled) { | 
1370  | 0  |     common.set_full_soo();  | 
1371  | 0  |   } else { | 
1372  | 0  |     IncrementSmallSizeNonSoo(common, policy);  | 
1373  | 0  |   }  | 
1374  | 0  | }  | 
1375  |  |  | 
1376  |  | std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(  | 
1377  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1378  | 0  |     absl::FunctionRef<size_t(size_t)> get_hash) { | 
1379  |  |   // TODO(b/413062340): Refactor to reuse more code with  | 
1380  |  |   // GrowSooTableToNextCapacityAndPrepareInsert.  | 
1381  | 0  |   ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);  | 
1382  | 0  |   ABSL_SWISSTABLE_ASSERT(!common.empty());  | 
1383  | 0  |   ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);  | 
1384  | 0  |   constexpr size_t kOldCapacity = 1;  | 
1385  | 0  |   constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);  | 
1386  | 0  |   ctrl_t* old_ctrl = common.control();  | 
1387  | 0  |   void* old_slots = common.slot_array();  | 
1388  |  | 
  | 
1389  | 0  |   const size_t slot_size = policy.slot_size;  | 
1390  | 0  |   const size_t slot_align = policy.slot_align;  | 
1391  | 0  |   void* alloc = policy.get_char_alloc(common);  | 
1392  | 0  |   HashtablezInfoHandle infoz = common.infoz();  | 
1393  | 0  |   const bool has_infoz = infoz.IsSampled();  | 
1394  | 0  |   common.set_capacity(kNewCapacity);  | 
1395  |  | 
  | 
1396  | 0  |   const auto [new_ctrl, new_slots] =  | 
1397  | 0  |       AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);  | 
1398  | 0  |   common.set_control(new_ctrl);  | 
1399  | 0  |   common.set_slots(new_slots);  | 
1400  | 0  |   SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);  | 
1401  |  | 
  | 
1402  | 0  |   if (ABSL_PREDICT_TRUE(!has_infoz)) { | 
1403  |  |     // When we're sampled, we already have a seed.  | 
1404  | 0  |     common.generate_new_seed(/*has_infoz=*/false);  | 
1405  | 0  |   }  | 
1406  | 0  |   const size_t new_hash = get_hash(common.seed().seed());  | 
1407  | 0  |   h2_t new_h2 = H2(new_hash);  | 
1408  | 0  |   size_t orig_hash =  | 
1409  | 0  |       policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed());  | 
1410  | 0  |   size_t offset = Resize1To3NewOffset(new_hash, common.seed());  | 
1411  | 0  |   InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);  | 
1412  |  | 
  | 
1413  | 0  |   void* old_element_target = NextSlot(new_slots, slot_size);  | 
1414  | 0  |   SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);  | 
1415  | 0  |   policy.transfer_n(&common, old_element_target, old_slots, 1);  | 
1416  |  | 
  | 
1417  | 0  |   void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);  | 
1418  | 0  |   SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);  | 
1419  |  | 
  | 
1420  | 0  |   policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,  | 
1421  | 0  |                  has_infoz);  | 
1422  | 0  |   PrepareInsertCommon(common);  | 
1423  | 0  |   ABSL_SWISSTABLE_ASSERT(common.size() == 2);  | 
1424  | 0  |   GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);  | 
1425  |  | 
  | 
1426  | 0  |   if (ABSL_PREDICT_FALSE(has_infoz)) { | 
1427  | 0  |     ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);  | 
1428  | 0  |   }  | 
1429  | 0  |   return {new_ctrl + offset, new_element_target_slot}; | 
1430  | 0  | }  | 
1431  |  |  | 
1432  |  | // Grows to next capacity and prepares insert for the given new_hash.  | 
1433  |  | // Returns the offset of the new element.  | 
1434  |  | size_t GrowToNextCapacityAndPrepareInsert(  | 
1435  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1436  | 171k  |     size_t new_hash) { | 
1437  | 171k  |   ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);  | 
1438  | 171k  |   const size_t old_capacity = common.capacity();  | 
1439  | 171k  |   ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());  | 
1440  | 171k  |   ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));  | 
1441  |  |  | 
1442  | 171k  |   const size_t new_capacity = NextCapacity(old_capacity);  | 
1443  | 171k  |   ctrl_t* old_ctrl = common.control();  | 
1444  | 171k  |   void* old_slots = common.slot_array();  | 
1445  |  |  | 
1446  | 171k  |   common.set_capacity(new_capacity);  | 
1447  | 171k  |   const size_t slot_size = policy.slot_size;  | 
1448  | 171k  |   const size_t slot_align = policy.slot_align;  | 
1449  | 171k  |   void* alloc = policy.get_char_alloc(common);  | 
1450  | 171k  |   HashtablezInfoHandle infoz = common.infoz();  | 
1451  | 171k  |   const bool has_infoz = infoz.IsSampled();  | 
1452  |  |  | 
1453  | 171k  |   const auto [new_ctrl, new_slots] =  | 
1454  | 171k  |       AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);  | 
1455  | 171k  |   common.set_control(new_ctrl);  | 
1456  | 171k  |   common.set_slots(new_slots);  | 
1457  | 171k  |   SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);  | 
1458  |  |  | 
1459  | 171k  |   h2_t new_h2 = H2(new_hash);  | 
1460  | 171k  |   size_t total_probe_length = 0;  | 
1461  | 171k  |   FindInfo find_info;  | 
1462  | 171k  |   if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) { | 
1463  | 67.1k  |     size_t offset;  | 
1464  | 67.1k  |     GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,  | 
1465  | 67.1k  |                                            new_capacity);  | 
1466  |  |     // We put the new element either at the beginning or at the end of the  | 
1467  |  |     // table with approximately equal probability.  | 
1468  | 67.1k  |     offset =  | 
1469  | 67.1k  |         SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;  | 
1470  |  |  | 
1471  | 67.1k  |     ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));  | 
1472  | 67.1k  |     SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);  | 
1473  | 67.1k  |     find_info = FindInfo{offset, 0}; | 
1474  |  |     // Single group tables have all slots full on resize. So we can transfer  | 
1475  |  |     // all slots without checking the control bytes.  | 
1476  | 67.1k  |     ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);  | 
1477  | 67.1k  |     void* target = NextSlot(new_slots, slot_size);  | 
1478  | 67.1k  |     SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);  | 
1479  | 67.1k  |     policy.transfer_n(&common, target, old_slots, old_capacity);  | 
1480  | 104k  |   } else { | 
1481  | 104k  |     total_probe_length =  | 
1482  | 104k  |         GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);  | 
1483  | 104k  |     find_info = find_first_non_full(common, new_hash);  | 
1484  | 104k  |     SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);  | 
1485  | 104k  |   }  | 
1486  | 171k  |   ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());  | 
1487  | 171k  |   (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,  | 
1488  | 171k  |                     has_infoz);  | 
1489  | 171k  |   PrepareInsertCommon(common);  | 
1490  | 171k  |   ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,  | 
1491  | 171k  |                   common.size());  | 
1492  |  |  | 
1493  | 171k  |   if (ABSL_PREDICT_FALSE(has_infoz)) { | 
1494  | 0  |     ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,  | 
1495  | 0  |                         find_info.probe_length);  | 
1496  | 0  |   }  | 
1497  | 171k  |   return find_info.offset;  | 
1498  | 171k  | }  | 
1499  |  |  | 
1500  |  | }  // namespace  | 
1501  |  |  | 
1502  |  | std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(  | 
1503  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1504  | 0  |     absl::FunctionRef<size_t(size_t)> get_hash) { | 
1505  | 0  |   ABSL_SWISSTABLE_ASSERT(common.is_small());  | 
1506  | 0  |   ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);  | 
1507  | 0  |   if (common.capacity() == 1) { | 
1508  | 0  |     if (common.empty()) { | 
1509  | 0  |       IncrementSmallSizeNonSoo(common, policy);  | 
1510  | 0  |       return {SooControl(), common.slot_array()}; | 
1511  | 0  |     } else { | 
1512  | 0  |       return Grow1To3AndPrepareInsert(common, policy, get_hash);  | 
1513  | 0  |     }  | 
1514  | 0  |   }  | 
1515  |  |  | 
1516  |  |   // Growing from 0 to 1 capacity.  | 
1517  | 0  |   ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);  | 
1518  | 0  |   constexpr size_t kNewCapacity = 1;  | 
1519  |  | 
  | 
1520  | 0  |   common.set_capacity(kNewCapacity);  | 
1521  | 0  |   HashtablezInfoHandle infoz;  | 
1522  | 0  |   const bool should_sample =  | 
1523  | 0  |       policy.is_hashtablez_eligible && ShouldSampleNextTable();  | 
1524  | 0  |   if (ABSL_PREDICT_FALSE(should_sample)) { | 
1525  | 0  |     infoz = ForcedTrySample(policy.slot_size, policy.key_size,  | 
1526  | 0  |                             policy.value_size, policy.soo_capacity());  | 
1527  | 0  |   }  | 
1528  | 0  |   const bool has_infoz = infoz.IsSampled();  | 
1529  | 0  |   void* alloc = policy.get_char_alloc(common);  | 
1530  |  | 
  | 
1531  | 0  |   const auto [new_ctrl, new_slots] =  | 
1532  | 0  |       AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);  | 
1533  | 0  |   common.set_control(new_ctrl);  | 
1534  | 0  |   common.set_slots(new_slots);  | 
1535  |  | 
  | 
1536  | 0  |   static_assert(NextCapacity(0) == 1);  | 
1537  | 0  |   PrepareInsertCommon(common);  | 
1538  |  | 
  | 
1539  | 0  |   if (ABSL_PREDICT_FALSE(has_infoz)) { | 
1540  | 0  |     common.generate_new_seed(/*has_infoz=*/true);  | 
1541  | 0  |     ReportSingleGroupTableGrowthToInfoz(common, infoz,  | 
1542  | 0  |                                         get_hash(common.seed().seed()));  | 
1543  | 0  |   }  | 
1544  | 0  |   return {SooControl(), new_slots}; | 
1545  | 0  | }  | 
1546  |  |  | 
1547  |  | namespace { | 
1548  |  |  | 
1549  |  | // Called whenever the table needs to vacate empty slots either by removing  | 
1550  |  | // tombstones via rehash or growth to next capacity.  | 
1551  |  | ABSL_ATTRIBUTE_NOINLINE  | 
1552  |  | size_t RehashOrGrowToNextCapacityAndPrepareInsert(  | 
1553  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1554  | 0  |     size_t new_hash) { | 
1555  | 0  |   const size_t cap = common.capacity();  | 
1556  | 0  |   ABSL_ASSUME(cap > 0);  | 
1557  | 0  |   if (cap > Group::kWidth &&  | 
1558  |  |       // Do these calculations in 64-bit to avoid overflow.  | 
1559  | 0  |       common.size() * uint64_t{32} <= cap * uint64_t{25}) { | 
1560  |  |     // Squash DELETED without growing if there is enough capacity.  | 
1561  |  |     //  | 
1562  |  |     // Rehash in place if the current size is <= 25/32 of capacity.  | 
1563  |  |     // Rationale for such a high factor: 1) DropDeletesWithoutResize() is  | 
1564  |  |     // faster than resize, and 2) it takes quite a bit of work to add  | 
1565  |  |     // tombstones.  In the worst case, seems to take approximately 4  | 
1566  |  |     // insert/erase pairs to create a single tombstone and so if we are  | 
1567  |  |     // rehashing because of tombstones, we can afford to rehash-in-place as  | 
1568  |  |     // long as we are reclaiming at least 1/8 the capacity without doing more  | 
1569  |  |     // than 2X the work.  (Where "work" is defined to be size() for rehashing  | 
1570  |  |     // or rehashing in place, and 1 for an insert or erase.)  But rehashing in  | 
1571  |  |     // place is faster per operation than inserting or even doubling the size  | 
1572  |  |     // of the table, so we actually afford to reclaim even less space from a  | 
1573  |  |     // resize-in-place.  The decision is to rehash in place if we can reclaim  | 
1574  |  |     // at about 1/8th of the usable capacity (specifically 3/28 of the  | 
1575  |  |     // capacity) which means that the total cost of rehashing will be a small  | 
1576  |  |     // fraction of the total work.  | 
1577  |  |     //  | 
1578  |  |     // Here is output of an experiment using the BM_CacheInSteadyState  | 
1579  |  |     // benchmark running the old case (where we rehash-in-place only if we can  | 
1580  |  |     // reclaim at least 7/16*capacity) vs. this code (which rehashes in place  | 
1581  |  |     // if we can recover 3/32*capacity).  | 
1582  |  |     //  | 
1583  |  |     // Note that although in the worst-case number of rehashes jumped up from  | 
1584  |  |     // 15 to 190, but the number of operations per second is almost the same.  | 
1585  |  |     //  | 
1586  |  |     // Abridged output of running BM_CacheInSteadyState benchmark from  | 
1587  |  |     // raw_hash_set_benchmark.   N is the number of insert/erase operations.  | 
1588  |  |     //  | 
1589  |  |     //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)  | 
1590  |  |     // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes  | 
1591  |  |     //  448 | 145284       0.44        18 | 140118       0.44        19  | 
1592  |  |     //  493 | 152546       0.24        11 | 151417       0.48        28  | 
1593  |  |     //  538 | 151439       0.26        11 | 151152       0.53        38  | 
1594  |  |     //  583 | 151765       0.28        11 | 150572       0.57        50  | 
1595  |  |     //  628 | 150241       0.31        11 | 150853       0.61        66  | 
1596  |  |     //  672 | 149602       0.33        12 | 150110       0.66        90  | 
1597  |  |     //  717 | 149998       0.35        12 | 149531       0.70       129  | 
1598  |  |     //  762 | 149836       0.37        13 | 148559       0.74       190  | 
1599  |  |     //  807 | 149736       0.39        14 | 151107       0.39        14  | 
1600  |  |     //  852 | 150204       0.42        15 | 151019       0.42        15  | 
1601  | 0  |     return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);  | 
1602  | 0  |   } else { | 
1603  |  |     // Otherwise grow the container.  | 
1604  | 0  |     return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);  | 
1605  | 0  |   }  | 
1606  | 0  | }  | 
1607  |  |  | 
1608  |  | // Slow path for PrepareInsertLarge that is called when the table has deleted  | 
1609  |  | // slots or need to be resized or rehashed.  | 
1610  |  | size_t PrepareInsertLargeSlow(CommonFields& common,  | 
1611  |  |                               const PolicyFunctions& __restrict policy,  | 
1612  | 171k  |                               size_t hash) { | 
1613  | 171k  |   const GrowthInfo growth_info = common.growth_info();  | 
1614  | 171k  |   ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());  | 
1615  | 171k  |   if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) { | 
1616  |  |     // Table without deleted slots (>95% cases) that needs to be resized.  | 
1617  | 171k  |     ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&  | 
1618  | 171k  |                            growth_info.GetGrowthLeft() == 0);  | 
1619  | 171k  |     return GrowToNextCapacityAndPrepareInsert(common, policy, hash);  | 
1620  | 171k  |   }  | 
1621  | 0  |   if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) { | 
1622  |  |     // Table with deleted slots that needs to be rehashed or resized.  | 
1623  | 0  |     return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);  | 
1624  | 0  |   }  | 
1625  |  |   // Table with deleted slots that has space for the inserting element.  | 
1626  | 0  |   FindInfo target = find_first_non_full(common, hash);  | 
1627  | 0  |   PrepareInsertCommon(common);  | 
1628  | 0  |   common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);  | 
1629  | 0  |   SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);  | 
1630  | 0  |   common.infoz().RecordInsert(hash, target.probe_length);  | 
1631  | 0  |   return target.offset;  | 
1632  | 0  | }  | 
1633  |  |  | 
1634  |  | // Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),  | 
1635  |  | // forces the table to be sampled and prepares the insert.  | 
1636  |  | // SOO tables need to switch from SOO to heap in order to store the infoz.  | 
1637  |  | // Requires:  | 
1638  |  | //   1. `c.capacity() == SooCapacity()`.  | 
1639  |  | //   2. `c.empty()`.  | 
1640  |  | ABSL_ATTRIBUTE_NOINLINE size_t  | 
1641  |  | GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(  | 
1642  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1643  | 0  |     absl::FunctionRef<size_t(size_t)> get_hash) { | 
1644  | 0  |   ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),  | 
1645  | 0  |                                    /*force_infoz=*/true);  | 
1646  | 0  |   PrepareInsertCommon(common);  | 
1647  | 0  |   common.growth_info().OverwriteEmptyAsFull();  | 
1648  | 0  |   const size_t new_hash = get_hash(common.seed().seed());  | 
1649  | 0  |   SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),  | 
1650  | 0  |                             policy.slot_size);  | 
1651  | 0  |   common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);  | 
1652  | 0  |   return SooSlotIndex();  | 
1653  | 0  | }  | 
1654  |  |  | 
1655  |  | // Resizes empty non-allocated table to the capacity to fit new_size elements.  | 
1656  |  | // Requires:  | 
1657  |  | //   1. `c.capacity() == policy.soo_capacity()`.  | 
1658  |  | //   2. `c.empty()`.  | 
1659  |  | //   3. `new_size > policy.soo_capacity()`.  | 
1660  |  | // The table will be attempted to be sampled.  | 
1661  |  | void ReserveEmptyNonAllocatedTableToFitNewSize(  | 
1662  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1663  | 0  |     size_t new_size) { | 
1664  | 0  |   ValidateMaxSize(new_size, policy.slot_size);  | 
1665  | 0  |   ABSL_ASSUME(new_size > 0);  | 
1666  | 0  |   ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),  | 
1667  | 0  |                                    /*force_infoz=*/false);  | 
1668  |  |   // This is after resize, to ensure that we have completed the allocation  | 
1669  |  |   // and have potentially sampled the hashtable.  | 
1670  | 0  |   common.infoz().RecordReservation(new_size);  | 
1671  | 0  | }  | 
1672  |  |  | 
1673  |  | // Type erased version of raw_hash_set::reserve for tables that have an  | 
1674  |  | // allocated backing array.  | 
1675  |  | //  | 
1676  |  | // Requires:  | 
1677  |  | //   1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.  | 
1678  |  | // Reserving already allocated tables is considered to be a rare case.  | 
1679  |  | ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(  | 
1680  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1681  | 0  |     size_t new_size) { | 
1682  | 0  |   const size_t cap = common.capacity();  | 
1683  | 0  |   ValidateMaxSize(new_size, policy.slot_size);  | 
1684  | 0  |   ABSL_ASSUME(new_size > 0);  | 
1685  | 0  |   const size_t new_capacity = SizeToCapacity(new_size);  | 
1686  | 0  |   if (cap == policy.soo_capacity()) { | 
1687  | 0  |     ABSL_SWISSTABLE_ASSERT(!common.empty());  | 
1688  | 0  |     ResizeFullSooTable(common, policy, new_capacity,  | 
1689  | 0  |                        ResizeFullSooTableSamplingMode::kNoSampling);  | 
1690  | 0  |   } else { | 
1691  | 0  |     ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());  | 
1692  |  |     // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.  | 
1693  | 0  |     ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);  | 
1694  | 0  |   }  | 
1695  | 0  |   common.infoz().RecordReservation(new_size);  | 
1696  | 0  | }  | 
1697  |  |  | 
1698  |  | // As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO  | 
1699  |  | // table to be sampled. SOO tables need to switch from SOO to heap in order to  | 
1700  |  | // store the infoz. No-op if sampling is disabled or not possible.  | 
1701  |  | void GrowFullSooTableToNextCapacityForceSampling(  | 
1702  | 0  |     CommonFields& common, const PolicyFunctions& __restrict policy) { | 
1703  | 0  |   AssertFullSoo(common, policy);  | 
1704  | 0  |   ResizeFullSooTable(  | 
1705  | 0  |       common, policy, NextCapacity(SooCapacity()),  | 
1706  | 0  |       ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);  | 
1707  | 0  | }  | 
1708  |  |  | 
1709  |  | }  // namespace  | 
1710  |  |  | 
1711  | 285k  | void* GetRefForEmptyClass(CommonFields& common) { | 
1712  |  |   // Empty base optimization typically make the empty base class address to be  | 
1713  |  |   // the same as the first address of the derived class object.  | 
1714  |  |   // But we generally assume that for empty classes we can return any valid  | 
1715  |  |   // pointer.  | 
1716  | 285k  |   return &common;  | 
1717  | 285k  | }  | 
1718  |  |  | 
1719  |  | void ResizeAllocatedTableWithSeedChange(  | 
1720  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1721  | 0  |     size_t new_capacity) { | 
1722  | 0  |   ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(  | 
1723  | 0  |       common, policy, new_capacity, common.infoz());  | 
1724  | 0  | }  | 
1725  |  |  | 
1726  |  | void ReserveEmptyNonAllocatedTableToFitBucketCount(  | 
1727  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1728  | 0  |     size_t bucket_count) { | 
1729  | 0  |   size_t new_capacity = NormalizeCapacity(bucket_count);  | 
1730  | 0  |   ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);  | 
1731  | 0  |   ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,  | 
1732  | 0  |                                    /*force_infoz=*/false);  | 
1733  | 0  | }  | 
1734  |  |  | 
1735  |  | // Resizes a full SOO table to the NextCapacity(SooCapacity()).  | 
1736  |  | template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>  | 
1737  |  | size_t GrowSooTableToNextCapacityAndPrepareInsert(  | 
1738  |  |     CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1739  | 56.7k  |     absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) { | 
1740  | 56.7k  |   AssertSoo(common, policy);  | 
1741  | 56.7k  |   if (ABSL_PREDICT_FALSE(force_sampling)) { | 
1742  |  |     // The table is empty, it is only used for forced sampling of SOO tables.  | 
1743  | 0  |     return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(  | 
1744  | 0  |         common, policy, get_hash);  | 
1745  | 0  |   }  | 
1746  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());  | 
1747  | 56.7k  |   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());  | 
1748  | 56.7k  |   const size_t slot_size = policy.slot_size;  | 
1749  | 56.7k  |   void* alloc = policy.get_char_alloc(common);  | 
1750  | 56.7k  |   common.set_capacity(kNewCapacity);  | 
1751  |  |  | 
1752  |  |   // Since the table is not empty, it will not be sampled.  | 
1753  |  |   // The decision to sample was already made during the first insertion.  | 
1754  |  |   //  | 
1755  |  |   // We do not set control and slots in CommonFields yet to avoid overriding  | 
1756  |  |   // SOO data.  | 
1757  | 56.7k  |   const auto [new_ctrl, new_slots] = AllocBackingArray(  | 
1758  | 56.7k  |       common, policy, kNewCapacity, /*has_infoz=*/false, alloc);  | 
1759  |  |  | 
1760  | 56.7k  |   PrepareInsertCommon(common);  | 
1761  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(common.size() == 2);  | 
1762  | 56.7k  |   GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);  | 
1763  | 56.7k  |   common.generate_new_seed(/*has_infoz=*/false);  | 
1764  | 56.7k  |   const h2_t soo_slot_h2 = H2(policy.hash_slot(  | 
1765  | 56.7k  |       policy.hash_fn(common), common.soo_data(), common.seed().seed()));  | 
1766  | 56.7k  |   const size_t new_hash = get_hash(common.seed().seed());  | 
1767  |  |  | 
1768  | 56.7k  |   const size_t offset = Resize1To3NewOffset(new_hash, common.seed());  | 
1769  | 56.7k  |   InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,  | 
1770  | 56.7k  |                                       new_ctrl);  | 
1771  |  |  | 
1772  | 56.7k  |   SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);  | 
1773  | 56.7k  |   void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);  | 
1774  | 56.7k  |   SanitizerUnpoisonMemoryRegion(target_slot, slot_size);  | 
1775  | 56.7k  |   if constexpr (TransferUsesMemcpy) { | 
1776  |  |     // Target slot is placed at index 1, but capacity is at  | 
1777  |  |     // minimum 3. So we are allowed to copy at least twice as much  | 
1778  |  |     // memory.  | 
1779  | 56.7k  |     static_assert(SooSlotIndex() == 1);  | 
1780  | 56.7k  |     static_assert(SooSlotMemcpySize > 0);  | 
1781  | 56.7k  |     static_assert(SooSlotMemcpySize <= MaxSooSlotSize());  | 
1782  | 56.7k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);  | 
1783  | 56.7k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);  | 
1784  | 56.7k  |     void* next_slot = SlotAddress(target_slot, 1, slot_size);  | 
1785  | 56.7k  |     SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  | 
1786  | 56.7k  |     std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);  | 
1787  | 56.7k  |     SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  | 
1788  | 56.7k  |   } else { | 
1789  | 0  |     static_assert(SooSlotMemcpySize == 0);  | 
1790  | 0  |     policy.transfer_n(&common, target_slot, common.soo_data(), 1);  | 
1791  | 0  |   }  | 
1792  | 0  |   common.set_control(new_ctrl);  | 
1793  | 56.7k  |   common.set_slots(new_slots);  | 
1794  |  |  | 
1795  |  |   // Full SOO table couldn't be sampled. If SOO table is sampled, it would  | 
1796  |  |   // have been resized to the next capacity.  | 
1797  | 56.7k  |   ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());  | 
1798  | 56.7k  |   SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),  | 
1799  | 56.7k  |                                 slot_size);  | 
1800  | 56.7k  |   return offset;  | 
1801  | 56.7k  | } Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<0ul, false>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool) Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<1ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool) Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<4ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool) unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<8ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool) Line  | Count  | Source  |  1739  | 25.9k  |     absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) { |  1740  | 25.9k  |   AssertSoo(common, policy);  |  1741  | 25.9k  |   if (ABSL_PREDICT_FALSE(force_sampling)) { |  1742  |  |     // The table is empty, it is only used for forced sampling of SOO tables.  |  1743  | 0  |     return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(  |  1744  | 0  |         common, policy, get_hash);  |  1745  | 0  |   }  |  1746  | 25.9k  |   ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());  |  1747  | 25.9k  |   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());  |  1748  | 25.9k  |   const size_t slot_size = policy.slot_size;  |  1749  | 25.9k  |   void* alloc = policy.get_char_alloc(common);  |  1750  | 25.9k  |   common.set_capacity(kNewCapacity);  |  1751  |  |  |  1752  |  |   // Since the table is not empty, it will not be sampled.  |  1753  |  |   // The decision to sample was already made during the first insertion.  |  1754  |  |   //  |  1755  |  |   // We do not set control and slots in CommonFields yet to avoid overriding  |  1756  |  |   // SOO data.  |  1757  | 25.9k  |   const auto [new_ctrl, new_slots] = AllocBackingArray(  |  1758  | 25.9k  |       common, policy, kNewCapacity, /*has_infoz=*/false, alloc);  |  1759  |  |  |  1760  | 25.9k  |   PrepareInsertCommon(common);  |  1761  | 25.9k  |   ABSL_SWISSTABLE_ASSERT(common.size() == 2);  |  1762  | 25.9k  |   GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);  |  1763  | 25.9k  |   common.generate_new_seed(/*has_infoz=*/false);  |  1764  | 25.9k  |   const h2_t soo_slot_h2 = H2(policy.hash_slot(  |  1765  | 25.9k  |       policy.hash_fn(common), common.soo_data(), common.seed().seed()));  |  1766  | 25.9k  |   const size_t new_hash = get_hash(common.seed().seed());  |  1767  |  |  |  1768  | 25.9k  |   const size_t offset = Resize1To3NewOffset(new_hash, common.seed());  |  1769  | 25.9k  |   InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,  |  1770  | 25.9k  |                                       new_ctrl);  |  1771  |  |  |  1772  | 25.9k  |   SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);  |  1773  | 25.9k  |   void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);  |  1774  | 25.9k  |   SanitizerUnpoisonMemoryRegion(target_slot, slot_size);  |  1775  | 25.9k  |   if constexpr (TransferUsesMemcpy) { |  1776  |  |     // Target slot is placed at index 1, but capacity is at  |  1777  |  |     // minimum 3. So we are allowed to copy at least twice as much  |  1778  |  |     // memory.  |  1779  | 25.9k  |     static_assert(SooSlotIndex() == 1);  |  1780  | 25.9k  |     static_assert(SooSlotMemcpySize > 0);  |  1781  | 25.9k  |     static_assert(SooSlotMemcpySize <= MaxSooSlotSize());  |  1782  | 25.9k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);  |  1783  | 25.9k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);  |  1784  | 25.9k  |     void* next_slot = SlotAddress(target_slot, 1, slot_size);  |  1785  | 25.9k  |     SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  |  1786  | 25.9k  |     std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);  |  1787  | 25.9k  |     SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  |  1788  |  |   } else { |  1789  |  |     static_assert(SooSlotMemcpySize == 0);  |  1790  |  |     policy.transfer_n(&common, target_slot, common.soo_data(), 1);  |  1791  |  |   }  |  1792  | 0  |   common.set_control(new_ctrl);  |  1793  | 25.9k  |   common.set_slots(new_slots);  |  1794  |  |  |  1795  |  |   // Full SOO table couldn't be sampled. If SOO table is sampled, it would  |  1796  |  |   // have been resized to the next capacity.  |  1797  | 25.9k  |   ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());  |  1798  | 25.9k  |   SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),  |  1799  | 25.9k  |                                 slot_size);  |  1800  | 25.9k  |   return offset;  |  1801  | 25.9k  | }  |  
 unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<16ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool) Line  | Count  | Source  |  1739  | 30.8k  |     absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) { |  1740  | 30.8k  |   AssertSoo(common, policy);  |  1741  | 30.8k  |   if (ABSL_PREDICT_FALSE(force_sampling)) { |  1742  |  |     // The table is empty, it is only used for forced sampling of SOO tables.  |  1743  | 0  |     return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(  |  1744  | 0  |         common, policy, get_hash);  |  1745  | 0  |   }  |  1746  | 30.8k  |   ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());  |  1747  | 30.8k  |   static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());  |  1748  | 30.8k  |   const size_t slot_size = policy.slot_size;  |  1749  | 30.8k  |   void* alloc = policy.get_char_alloc(common);  |  1750  | 30.8k  |   common.set_capacity(kNewCapacity);  |  1751  |  |  |  1752  |  |   // Since the table is not empty, it will not be sampled.  |  1753  |  |   // The decision to sample was already made during the first insertion.  |  1754  |  |   //  |  1755  |  |   // We do not set control and slots in CommonFields yet to avoid overriding  |  1756  |  |   // SOO data.  |  1757  | 30.8k  |   const auto [new_ctrl, new_slots] = AllocBackingArray(  |  1758  | 30.8k  |       common, policy, kNewCapacity, /*has_infoz=*/false, alloc);  |  1759  |  |  |  1760  | 30.8k  |   PrepareInsertCommon(common);  |  1761  | 30.8k  |   ABSL_SWISSTABLE_ASSERT(common.size() == 2);  |  1762  | 30.8k  |   GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);  |  1763  | 30.8k  |   common.generate_new_seed(/*has_infoz=*/false);  |  1764  | 30.8k  |   const h2_t soo_slot_h2 = H2(policy.hash_slot(  |  1765  | 30.8k  |       policy.hash_fn(common), common.soo_data(), common.seed().seed()));  |  1766  | 30.8k  |   const size_t new_hash = get_hash(common.seed().seed());  |  1767  |  |  |  1768  | 30.8k  |   const size_t offset = Resize1To3NewOffset(new_hash, common.seed());  |  1769  | 30.8k  |   InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,  |  1770  | 30.8k  |                                       new_ctrl);  |  1771  |  |  |  1772  | 30.8k  |   SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);  |  1773  | 30.8k  |   void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);  |  1774  | 30.8k  |   SanitizerUnpoisonMemoryRegion(target_slot, slot_size);  |  1775  | 30.8k  |   if constexpr (TransferUsesMemcpy) { |  1776  |  |     // Target slot is placed at index 1, but capacity is at  |  1777  |  |     // minimum 3. So we are allowed to copy at least twice as much  |  1778  |  |     // memory.  |  1779  | 30.8k  |     static_assert(SooSlotIndex() == 1);  |  1780  | 30.8k  |     static_assert(SooSlotMemcpySize > 0);  |  1781  | 30.8k  |     static_assert(SooSlotMemcpySize <= MaxSooSlotSize());  |  1782  | 30.8k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);  |  1783  | 30.8k  |     ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);  |  1784  | 30.8k  |     void* next_slot = SlotAddress(target_slot, 1, slot_size);  |  1785  | 30.8k  |     SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  |  1786  | 30.8k  |     std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);  |  1787  | 30.8k  |     SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);  |  1788  |  |   } else { |  1789  |  |     static_assert(SooSlotMemcpySize == 0);  |  1790  |  |     policy.transfer_n(&common, target_slot, common.soo_data(), 1);  |  1791  |  |   }  |  1792  | 0  |   common.set_control(new_ctrl);  |  1793  | 30.8k  |   common.set_slots(new_slots);  |  1794  |  |  |  1795  |  |   // Full SOO table couldn't be sampled. If SOO table is sampled, it would  |  1796  |  |   // have been resized to the next capacity.  |  1797  | 30.8k  |   ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());  |  1798  | 30.8k  |   SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),  |  1799  | 30.8k  |                                 slot_size);  |  1800  | 30.8k  |   return offset;  |  1801  | 30.8k  | }  |  
  | 
1802  |  |  | 
1803  |  | void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1804  | 0  |             size_t n) { | 
1805  | 0  |   const size_t cap = common.capacity();  | 
1806  |  | 
  | 
1807  | 0  |   auto clear_backing_array = [&]() { | 
1808  | 0  |     ClearBackingArray(common, policy, policy.get_char_alloc(common),  | 
1809  | 0  |                       /*reuse=*/false, policy.soo_enabled);  | 
1810  | 0  |   };  | 
1811  |  | 
  | 
1812  | 0  |   const size_t slot_size = policy.slot_size;  | 
1813  |  | 
  | 
1814  | 0  |   if (n == 0) { | 
1815  | 0  |     if (cap <= policy.soo_capacity()) return;  | 
1816  | 0  |     if (common.empty()) { | 
1817  | 0  |       clear_backing_array();  | 
1818  | 0  |       return;  | 
1819  | 0  |     }  | 
1820  | 0  |     if (common.size() <= policy.soo_capacity()) { | 
1821  |  |       // When the table is already sampled, we keep it sampled.  | 
1822  | 0  |       if (common.infoz().IsSampled()) { | 
1823  | 0  |         static constexpr size_t kInitialSampledCapacity =  | 
1824  | 0  |             NextCapacity(SooCapacity());  | 
1825  | 0  |         if (cap > kInitialSampledCapacity) { | 
1826  | 0  |           ResizeAllocatedTableWithSeedChange(common, policy,  | 
1827  | 0  |                                              kInitialSampledCapacity);  | 
1828  | 0  |         }  | 
1829  |  |         // This asserts that we didn't lose sampling coverage in `resize`.  | 
1830  | 0  |         ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());  | 
1831  | 0  |         return;  | 
1832  | 0  |       }  | 
1833  | 0  |       ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));  | 
1834  | 0  |       ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));  | 
1835  | 0  |       HeapOrSoo tmp_slot;  | 
1836  | 0  |       size_t begin_offset = FindFirstFullSlot(0, cap, common.control());  | 
1837  | 0  |       policy.transfer_n(  | 
1838  | 0  |           &common, &tmp_slot,  | 
1839  | 0  |           SlotAddress(common.slot_array(), begin_offset, slot_size), 1);  | 
1840  | 0  |       clear_backing_array();  | 
1841  | 0  |       policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);  | 
1842  | 0  |       common.set_full_soo();  | 
1843  | 0  |       return;  | 
1844  | 0  |     }  | 
1845  | 0  |   }  | 
1846  |  |  | 
1847  | 0  |   ValidateMaxSize(n, policy.slot_size);  | 
1848  |  |   // bitor is a faster way of doing `max` here. We will round up to the next  | 
1849  |  |   // power-of-2-minus-1, so bitor is good enough.  | 
1850  | 0  |   const size_t new_capacity =  | 
1851  | 0  |       NormalizeCapacity(n | SizeToCapacity(common.size()));  | 
1852  |  |   // n == 0 unconditionally rehashes as per the standard.  | 
1853  | 0  |   if (n == 0 || new_capacity > cap) { | 
1854  | 0  |     if (cap == policy.soo_capacity()) { | 
1855  | 0  |       if (common.empty()) { | 
1856  | 0  |         ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,  | 
1857  | 0  |                                          /*force_infoz=*/false);  | 
1858  | 0  |       } else { | 
1859  | 0  |         ResizeFullSooTable(common, policy, new_capacity,  | 
1860  | 0  |                            ResizeFullSooTableSamplingMode::kNoSampling);  | 
1861  | 0  |       }  | 
1862  | 0  |     } else { | 
1863  | 0  |       ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);  | 
1864  | 0  |     }  | 
1865  |  |     // This is after resize, to ensure that we have completed the allocation  | 
1866  |  |     // and have potentially sampled the hashtable.  | 
1867  | 0  |     common.infoz().RecordReservation(n);  | 
1868  | 0  |   }  | 
1869  | 0  | }  | 
1870  |  |  | 
1871  |  | void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,  | 
1872  |  |           const CommonFields& other,  | 
1873  | 0  |           absl::FunctionRef<void(void*, const void*)> copy_fn) { | 
1874  | 0  |   const size_t size = other.size();  | 
1875  | 0  |   ABSL_SWISSTABLE_ASSERT(size > 0);  | 
1876  | 0  |   const size_t soo_capacity = policy.soo_capacity();  | 
1877  | 0  |   const size_t slot_size = policy.slot_size;  | 
1878  | 0  |   const bool soo_enabled = policy.soo_enabled;  | 
1879  | 0  |   if (size == 1) { | 
1880  | 0  |     if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1);  | 
1881  | 0  |     IncrementSmallSize(common, policy);  | 
1882  | 0  |     const size_t other_capacity = other.capacity();  | 
1883  | 0  |     const void* other_slot =  | 
1884  | 0  |         other_capacity <= soo_capacity ? other.soo_data()  | 
1885  | 0  |         : other.is_small()  | 
1886  | 0  |             ? other.slot_array()  | 
1887  | 0  |             : SlotAddress(other.slot_array(),  | 
1888  | 0  |                           FindFirstFullSlot(0, other_capacity, other.control()),  | 
1889  | 0  |                           slot_size);  | 
1890  | 0  |     copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot);  | 
1891  |  | 
  | 
1892  | 0  |     if (soo_enabled && policy.is_hashtablez_eligible &&  | 
1893  | 0  |         ShouldSampleNextTable()) { | 
1894  | 0  |       GrowFullSooTableToNextCapacityForceSampling(common, policy);  | 
1895  | 0  |     }  | 
1896  | 0  |     return;  | 
1897  | 0  |   }  | 
1898  |  |  | 
1899  | 0  |   ReserveTableToFitNewSize(common, policy, size);  | 
1900  | 0  |   auto infoz = common.infoz();  | 
1901  | 0  |   ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);  | 
1902  | 0  |   const size_t cap = common.capacity();  | 
1903  | 0  |   ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);  | 
1904  | 0  |   size_t offset = cap;  | 
1905  | 0  |   const void* hash_fn = policy.hash_fn(common);  | 
1906  | 0  |   auto hasher = policy.hash_slot;  | 
1907  | 0  |   const size_t seed = common.seed().seed();  | 
1908  | 0  |   IterateOverFullSlotsImpl(  | 
1909  | 0  |       other, slot_size, [&](const ctrl_t*, void* that_slot) { | 
1910  |  |         // The table is guaranteed to be empty, so we can do faster than  | 
1911  |  |         // a full `insert`.  | 
1912  | 0  |         const size_t hash = (*hasher)(hash_fn, that_slot, seed);  | 
1913  | 0  |         FindInfo target = find_first_non_full(common, hash);  | 
1914  | 0  |         infoz.RecordInsert(hash, target.probe_length);  | 
1915  | 0  |         offset = target.offset;  | 
1916  | 0  |         SetCtrl(common, offset, H2(hash), slot_size);  | 
1917  | 0  |         copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);  | 
1918  | 0  |         common.maybe_increment_generation_on_insert();  | 
1919  | 0  |       });  | 
1920  | 0  |   common.increment_size(size);  | 
1921  | 0  |   common.growth_info().OverwriteManyEmptyAsFull(size);  | 
1922  | 0  | }  | 
1923  |  |  | 
1924  |  | void ReserveTableToFitNewSize(CommonFields& common,  | 
1925  |  |                               const PolicyFunctions& __restrict policy,  | 
1926  | 0  |                               size_t new_size) { | 
1927  | 0  |   common.reset_reserved_growth(new_size);  | 
1928  | 0  |   common.set_reservation_size(new_size);  | 
1929  | 0  |   ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());  | 
1930  | 0  |   const size_t cap = common.capacity();  | 
1931  | 0  |   if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) { | 
1932  | 0  |     return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);  | 
1933  | 0  |   }  | 
1934  |  |  | 
1935  | 0  |   ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());  | 
1936  | 0  |   ABSL_SWISSTABLE_ASSERT(cap > 0);  | 
1937  | 0  |   const size_t max_size_before_growth =  | 
1938  | 0  |       IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();  | 
1939  | 0  |   if (new_size <= max_size_before_growth) { | 
1940  | 0  |     return;  | 
1941  | 0  |   }  | 
1942  | 0  |   ReserveAllocatedTable(common, policy, new_size);  | 
1943  | 0  | }  | 
1944  |  |  | 
1945  |  | namespace { | 
1946  |  | size_t PrepareInsertLargeImpl(CommonFields& common,  | 
1947  |  |                               const PolicyFunctions& __restrict policy,  | 
1948  |  |                               size_t hash,  | 
1949  |  |                               Group::NonIterableBitMaskType mask_empty,  | 
1950  | 9.44M  |                               FindInfo target_group) { | 
1951  | 9.44M  |   ABSL_SWISSTABLE_ASSERT(!common.is_small());  | 
1952  | 9.44M  |   const GrowthInfo growth_info = common.growth_info();  | 
1953  |  |   // When there are no deleted slots in the table  | 
1954  |  |   // and growth_left is positive, we can insert at the first  | 
1955  |  |   // empty slot in the probe sequence (target).  | 
1956  | 9.44M  |   if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) { | 
1957  | 171k  |     return PrepareInsertLargeSlow(common, policy, hash);  | 
1958  | 171k  |   }  | 
1959  | 9.27M  |   PrepareInsertCommon(common);  | 
1960  | 9.27M  |   common.growth_info().OverwriteEmptyAsFull();  | 
1961  | 9.27M  |   target_group.offset += mask_empty.LowestBitSet();  | 
1962  | 9.27M  |   target_group.offset &= common.capacity();  | 
1963  | 9.27M  |   SetCtrl(common, target_group.offset, H2(hash), policy.slot_size);  | 
1964  | 9.27M  |   common.infoz().RecordInsert(hash, target_group.probe_length);  | 
1965  | 9.27M  |   return target_group.offset;  | 
1966  | 9.44M  | }  | 
1967  |  | }  // namespace  | 
1968  |  |  | 
1969  |  | size_t PrepareInsertLarge(CommonFields& common,  | 
1970  |  |                           const PolicyFunctions& __restrict policy, size_t hash,  | 
1971  |  |                           Group::NonIterableBitMaskType mask_empty,  | 
1972  | 9.44M  |                           FindInfo target_group) { | 
1973  |  |   // NOLINTNEXTLINE(misc-static-assert)  | 
1974  | 9.44M  |   ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled());  | 
1975  | 9.44M  |   return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);  | 
1976  | 9.44M  | }  | 
1977  |  |  | 
1978  |  | size_t PrepareInsertLargeGenerationsEnabled(  | 
1979  |  |     CommonFields& common, const PolicyFunctions& policy, size_t hash,  | 
1980  |  |     Group::NonIterableBitMaskType mask_empty, FindInfo target_group,  | 
1981  | 0  |     absl::FunctionRef<size_t(size_t)> recompute_hash) { | 
1982  |  |   // NOLINTNEXTLINE(misc-static-assert)  | 
1983  | 0  |   ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled());  | 
1984  | 0  |   if (common.should_rehash_for_bug_detection_on_insert()) { | 
1985  |  |     // Move to a different heap allocation in order to detect bugs.  | 
1986  | 0  |     const size_t cap = common.capacity();  | 
1987  | 0  |     ResizeAllocatedTableWithSeedChange(  | 
1988  | 0  |         common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));  | 
1989  | 0  |     hash = recompute_hash(common.seed().seed());  | 
1990  | 0  |     std::tie(target_group, mask_empty) =  | 
1991  | 0  |         find_first_non_full_group(common, hash);  | 
1992  | 0  |   }  | 
1993  | 0  |   return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);  | 
1994  | 0  | }  | 
1995  |  |  | 
1996  |  | namespace { | 
1997  |  | // Returns true if the following is true  | 
1998  |  | // 1. OptimalMemcpySizeForSooSlotTransfer(left) >  | 
1999  |  | //    OptimalMemcpySizeForSooSlotTransfer(left - 1)  | 
2000  |  | // 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,  | 
2001  |  | // right].  | 
2002  |  | // This function is used to verify that we have all the possible template  | 
2003  |  | // instantiations for GrowFullSooTableToNextCapacity.  | 
2004  |  | // With this verification the problem may be detected at compile time instead of  | 
2005  |  | // link time.  | 
2006  |  | constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,  | 
2007  | 0  |                                                               size_t right) { | 
2008  | 0  |   size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);  | 
2009  | 0  |   if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) { | 
2010  | 0  |     return false;  | 
2011  | 0  |   }  | 
2012  | 0  |   for (size_t i = left + 1; i <= right; ++i) { | 
2013  | 0  |     if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) { | 
2014  | 0  |       return false;  | 
2015  | 0  |     }  | 
2016  | 0  |   }  | 
2017  | 0  |   return true;  | 
2018  | 0  | }  | 
2019  |  | }  // namespace  | 
2020  |  |  | 
2021  |  | // Extern template instantiation for inline function.  | 
2022  |  | template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,  | 
2023  |  |                                               size_t old_capacity,  | 
2024  |  |                                               ctrl_t* new_ctrl,  | 
2025  |  |                                               size_t new_capacity);  | 
2026  |  |  | 
2027  |  | // We need to instantiate ALL possible template combinations because we define  | 
2028  |  | // the function in the cc file.  | 
2029  |  | template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(  | 
2030  |  |     CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,  | 
2031  |  |     bool);  | 
2032  |  | template size_t GrowSooTableToNextCapacityAndPrepareInsert<  | 
2033  |  |     OptimalMemcpySizeForSooSlotTransfer(1), true>(  | 
2034  |  |     CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,  | 
2035  |  |     bool);  | 
2036  |  |  | 
2037  |  | static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));  | 
2038  |  | template size_t GrowSooTableToNextCapacityAndPrepareInsert<  | 
2039  |  |     OptimalMemcpySizeForSooSlotTransfer(3), true>(  | 
2040  |  |     CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,  | 
2041  |  |     bool);  | 
2042  |  |  | 
2043  |  | static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));  | 
2044  |  | template size_t GrowSooTableToNextCapacityAndPrepareInsert<  | 
2045  |  |     OptimalMemcpySizeForSooSlotTransfer(8), true>(  | 
2046  |  |     CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,  | 
2047  |  |     bool);  | 
2048  |  |  | 
2049  |  | #if UINTPTR_MAX == UINT32_MAX  | 
2050  |  | static_assert(MaxSooSlotSize() == 8);  | 
2051  |  | #else  | 
2052  |  | static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));  | 
2053  |  | template size_t GrowSooTableToNextCapacityAndPrepareInsert<  | 
2054  |  |     OptimalMemcpySizeForSooSlotTransfer(16), true>(  | 
2055  |  |     CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,  | 
2056  |  |     bool);  | 
2057  |  | static_assert(MaxSooSlotSize() == 16);  | 
2058  |  | #endif  | 
2059  |  |  | 
2060  |  | }  // namespace container_internal  | 
2061  |  | ABSL_NAMESPACE_END  | 
2062  |  | }  // namespace absl  |