/src/abseil-cpp/absl/container/internal/raw_hash_set.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2018 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // An open-addressing |
16 | | // hashtable with quadratic probing. |
17 | | // |
18 | | // This is a low level hashtable on top of which different interfaces can be |
19 | | // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. |
20 | | // |
21 | | // The table interface is similar to that of std::unordered_set. Notable |
22 | | // differences are that most member functions support heterogeneous keys when |
23 | | // BOTH the hash and eq functions are marked as transparent. They do so by |
24 | | // providing a typedef called `is_transparent`. |
25 | | // |
26 | | // When heterogeneous lookup is enabled, functions that take key_type act as if |
27 | | // they have an overload set like: |
28 | | // |
29 | | // iterator find(const key_type& key); |
30 | | // template <class K> |
31 | | // iterator find(const K& key); |
32 | | // |
33 | | // size_type erase(const key_type& key); |
34 | | // template <class K> |
35 | | // size_type erase(const K& key); |
36 | | // |
37 | | // std::pair<iterator, iterator> equal_range(const key_type& key); |
38 | | // template <class K> |
39 | | // std::pair<iterator, iterator> equal_range(const K& key); |
40 | | // |
41 | | // When heterogeneous lookup is disabled, only the explicit `key_type` overloads |
42 | | // exist. |
43 | | // |
44 | | // find() also supports passing the hash explicitly: |
45 | | // |
46 | | // iterator find(const key_type& key, size_t hash); |
47 | | // template <class U> |
48 | | // iterator find(const U& key, size_t hash); |
49 | | // |
50 | | // In addition the pointer to element and iterator stability guarantees are |
51 | | // weaker: all iterators and pointers are invalidated after a new element is |
52 | | // inserted. |
53 | | // |
54 | | // IMPLEMENTATION DETAILS |
55 | | // |
56 | | // # Table Layout |
57 | | // |
58 | | // A raw_hash_set's backing array consists of control bytes followed by slots |
59 | | // that may or may not contain objects. |
60 | | // |
61 | | // The layout of the backing array, for `capacity` slots, is thus, as a |
62 | | // pseudo-struct: |
63 | | // |
64 | | // struct BackingArray { |
65 | | // // Sampling handler. This field isn't present when the sampling is |
66 | | // // disabled or this allocation hasn't been selected for sampling. |
67 | | // HashtablezInfoHandle infoz_; |
68 | | // // The number of elements we can insert before growing the capacity. |
69 | | // size_t growth_left; |
70 | | // // Control bytes for the "real" slots. |
71 | | // ctrl_t ctrl[capacity]; |
72 | | // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to |
73 | | // // stop and serves no other purpose. |
74 | | // ctrl_t sentinel; |
75 | | // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so |
76 | | // // that if a probe sequence picks a value near the end of `ctrl`, |
77 | | // // `Group` will have valid control bytes to look at. |
78 | | // ctrl_t clones[kWidth - 1]; |
79 | | // // The actual slot data. |
80 | | // slot_type slots[capacity]; |
81 | | // }; |
82 | | // |
83 | | // The length of this array is computed by `RawHashSetLayout::alloc_size` below. |
84 | | // |
85 | | // Control bytes (`ctrl_t`) are bytes (collected into groups of a |
86 | | // platform-specific size) that define the state of the corresponding slot in |
87 | | // the slot array. Group manipulation is tightly optimized to be as efficient |
88 | | // as possible: SSE and friends on x86, clever bit operations on other arches. |
89 | | // |
90 | | // Group 1 Group 2 Group 3 |
91 | | // +---------------+---------------+---------------+ |
92 | | // | | | | | | | | | | | | | | | | | | | | | | | | | |
93 | | // +---------------+---------------+---------------+ |
94 | | // |
95 | | // Each control byte is either a special value for empty slots, deleted slots |
96 | | // (sometimes called *tombstones*), and a special end-of-table marker used by |
97 | | // iterators, or, if occupied, seven bits (H2) from the hash of the value in the |
98 | | // corresponding slot. |
99 | | // |
100 | | // Storing control bytes in a separate array also has beneficial cache effects, |
101 | | // since more logical slots will fit into a cache line. |
102 | | // |
103 | | // # Small Object Optimization (SOO) |
104 | | // |
105 | | // When the size/alignment of the value_type and the capacity of the table are |
106 | | // small, we enable small object optimization and store the values inline in |
107 | | // the raw_hash_set object. This optimization allows us to avoid |
108 | | // allocation/deallocation as well as cache/dTLB misses. |
109 | | // |
110 | | // # Hashing |
111 | | // |
112 | | // We compute two separate hashes, `H1` and `H2`, from the hash of an object. |
113 | | // `H1(hash(x))` is an index into `slots`, and essentially the starting point |
114 | | // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out |
115 | | // objects that cannot possibly be the one we are looking for. |
116 | | // |
117 | | // # Table operations. |
118 | | // |
119 | | // The key operations are `insert`, `find`, and `erase`. |
120 | | // |
121 | | // Since `insert` and `erase` are implemented in terms of `find`, we describe |
122 | | // `find` first. To `find` a value `x`, we compute `hash(x)`. From |
123 | | // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every |
124 | | // group of slots in some interesting order. |
125 | | // |
126 | | // We now walk through these indices. At each index, we select the entire group |
127 | | // starting with that index and extract potential candidates: occupied slots |
128 | | // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the |
129 | | // group, we stop and return an error. Each candidate slot `y` is compared with |
130 | | // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the |
131 | | // next probe index. Tombstones effectively behave like full slots that never |
132 | | // match the value we're looking for. |
133 | | // |
134 | | // The `H2` bits ensure when we compare a slot to an object with `==`, we are |
135 | | // likely to have actually found the object. That is, the chance is low that |
136 | | // `==` is called and returns `false`. Thus, when we search for an object, we |
137 | | // are unlikely to call `==` many times. This likelyhood can be analyzed as |
138 | | // follows (assuming that H2 is a random enough hash function). |
139 | | // |
140 | | // Let's assume that there are `k` "wrong" objects that must be examined in a |
141 | | // probe sequence. For example, when doing a `find` on an object that is in the |
142 | | // table, `k` is the number of objects between the start of the probe sequence |
143 | | // and the final found object (not including the final found object). The |
144 | | // expected number of objects with an H2 match is then `k/128`. Measurements |
145 | | // and analysis indicate that even at high load factors, `k` is less than 32, |
146 | | // meaning that the number of "false positive" comparisons we must perform is |
147 | | // less than 1/8 per `find`. |
148 | | |
149 | | // `insert` is implemented in terms of `unchecked_insert`, which inserts a |
150 | | // value presumed to not be in the table (violating this requirement will cause |
151 | | // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert |
152 | | // it, we construct a `probe_seq` once again, and use it to find the first |
153 | | // group with an unoccupied (empty *or* deleted) slot. We place `x` into the |
154 | | // first such slot in the group and mark it as full with `x`'s H2. |
155 | | // |
156 | | // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and |
157 | | // perform a `find` to see if it's already present; if it is, we're done. If |
158 | | // it's not, we may decide the table is getting overcrowded (i.e. the load |
159 | | // factor is greater than 7/8 for big tables; `is_small()` tables use a max load |
160 | | // factor of 1); in this case, we allocate a bigger array, `unchecked_insert` |
161 | | // each element of the table into the new array (we know that no insertion here |
162 | | // will insert an already-present value), and discard the old backing array. At |
163 | | // this point, we may `unchecked_insert` the value `x`. |
164 | | // |
165 | | // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which |
166 | | // presents a viable, initialized slot pointee to the caller. |
167 | | // |
168 | | // `erase` is implemented in terms of `erase_at`, which takes an index to a |
169 | | // slot. Given an offset, we simply create a tombstone and destroy its contents. |
170 | | // If we can prove that the slot would not appear in a probe sequence, we can |
171 | | // make the slot as empty, instead. We can prove this by observing that if a |
172 | | // group has any empty slots, it has never been full (assuming we never create |
173 | | // an empty slot in a group with no empties, which this heuristic guarantees we |
174 | | // never do) and find would stop at this group anyways (since it does not probe |
175 | | // beyond groups with empties). |
176 | | // |
177 | | // `erase` is `erase_at` composed with `find`: if we |
178 | | // have a value `x`, we can perform a `find`, and then `erase_at` the resulting |
179 | | // slot. |
180 | | // |
181 | | // To iterate, we simply traverse the array, skipping empty and deleted slots |
182 | | // and stopping when we hit a `kSentinel`. |
183 | | |
184 | | #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ |
185 | | #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ |
186 | | |
187 | | #include <algorithm> |
188 | | #include <cassert> |
189 | | #include <cmath> |
190 | | #include <cstddef> |
191 | | #include <cstdint> |
192 | | #include <cstring> |
193 | | #include <initializer_list> |
194 | | #include <iterator> |
195 | | #include <limits> |
196 | | #include <memory> |
197 | | #include <tuple> |
198 | | #include <type_traits> |
199 | | #include <utility> |
200 | | |
201 | | #include "absl/base/attributes.h" |
202 | | #include "absl/base/config.h" |
203 | | #include "absl/base/internal/endian.h" |
204 | | #include "absl/base/internal/raw_logging.h" |
205 | | #include "absl/base/macros.h" |
206 | | #include "absl/base/optimization.h" |
207 | | #include "absl/base/options.h" |
208 | | #include "absl/base/port.h" |
209 | | #include "absl/base/prefetch.h" |
210 | | #include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle |
211 | | #include "absl/container/internal/compressed_tuple.h" |
212 | | #include "absl/container/internal/container_memory.h" |
213 | | #include "absl/container/internal/hash_policy_traits.h" |
214 | | #include "absl/container/internal/hashtable_debug_hooks.h" |
215 | | #include "absl/container/internal/hashtablez_sampler.h" |
216 | | #include "absl/memory/memory.h" |
217 | | #include "absl/meta/type_traits.h" |
218 | | #include "absl/numeric/bits.h" |
219 | | #include "absl/utility/utility.h" |
220 | | |
221 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
222 | | #include <emmintrin.h> |
223 | | #endif |
224 | | |
225 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
226 | | #include <tmmintrin.h> |
227 | | #endif |
228 | | |
229 | | #ifdef _MSC_VER |
230 | | #include <intrin.h> |
231 | | #endif |
232 | | |
233 | | #ifdef ABSL_INTERNAL_HAVE_ARM_NEON |
234 | | #include <arm_neon.h> |
235 | | #endif |
236 | | |
237 | | namespace absl { |
238 | | ABSL_NAMESPACE_BEGIN |
239 | | namespace container_internal { |
240 | | |
241 | | #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS |
242 | | #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set |
243 | | #elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ |
244 | | defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \ |
245 | | defined(ABSL_HAVE_MEMORY_SANITIZER)) && \ |
246 | | !defined(NDEBUG_SANITIZER) // If defined, performance is important. |
247 | | // When compiled in sanitizer mode, we add generation integers to the backing |
248 | | // array and iterators. In the backing array, we store the generation between |
249 | | // the control bytes and the slots. When iterators are dereferenced, we assert |
250 | | // that the container has not been mutated in a way that could cause iterator |
251 | | // invalidation since the iterator was initialized. |
252 | | #define ABSL_SWISSTABLE_ENABLE_GENERATIONS |
253 | | #endif |
254 | | |
255 | | // We use uint8_t so we don't need to worry about padding. |
256 | | using GenerationType = uint8_t; |
257 | | |
258 | | // A sentinel value for empty generations. Using 0 makes it easy to constexpr |
259 | | // initialize an array of this value. |
260 | 8 | constexpr GenerationType SentinelEmptyGeneration() { return 0; } |
261 | | |
262 | 8 | constexpr GenerationType NextGeneration(GenerationType generation) { |
263 | 8 | return ++generation == SentinelEmptyGeneration() ? ++generation : generation; |
264 | 8 | } |
265 | | |
266 | | #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS |
267 | | constexpr bool SwisstableGenerationsEnabled() { return true; } |
268 | | constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); } |
269 | | #else |
270 | 0 | constexpr bool SwisstableGenerationsEnabled() { return false; } |
271 | 14 | constexpr size_t NumGenerationBytes() { return 0; } |
272 | | #endif |
273 | | |
274 | | template <typename AllocType> |
275 | | void SwapAlloc(AllocType& lhs, AllocType& rhs, |
276 | | std::true_type /* propagate_on_container_swap */) { |
277 | | using std::swap; |
278 | | swap(lhs, rhs); |
279 | | } |
280 | | template <typename AllocType> |
281 | | void SwapAlloc(AllocType& lhs, AllocType& rhs, |
282 | | std::false_type /* propagate_on_container_swap */) { |
283 | | (void)lhs; |
284 | | (void)rhs; |
285 | | assert(lhs == rhs && |
286 | | "It's UB to call swap with unequal non-propagating allocators."); |
287 | | } |
288 | | |
289 | | template <typename AllocType> |
290 | | void CopyAlloc(AllocType& lhs, AllocType& rhs, |
291 | | std::true_type /* propagate_alloc */) { |
292 | | lhs = rhs; |
293 | | } |
294 | | template <typename AllocType> |
295 | | void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {} |
296 | | |
297 | | // The state for a probe sequence. |
298 | | // |
299 | | // Currently, the sequence is a triangular progression of the form |
300 | | // |
301 | | // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) |
302 | | // |
303 | | // The use of `Width` ensures that each probe step does not overlap groups; |
304 | | // the sequence effectively outputs the addresses of *groups* (although not |
305 | | // necessarily aligned to any boundary). The `Group` machinery allows us |
306 | | // to check an entire group with minimal branching. |
307 | | // |
308 | | // Wrapping around at `mask + 1` is important, but not for the obvious reason. |
309 | | // As described above, the first few entries of the control byte array |
310 | | // are mirrored at the end of the array, which `Group` will find and use |
311 | | // for selecting candidates. However, when those candidates' slots are |
312 | | // actually inspected, there are no corresponding slots for the cloned bytes, |
313 | | // so we need to make sure we've treated those offsets as "wrapping around". |
314 | | // |
315 | | // It turns out that this probe sequence visits every group exactly once if the |
316 | | // number of groups is a power of two, since (i^2+i)/2 is a bijection in |
317 | | // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing |
318 | | template <size_t Width> |
319 | | class probe_seq { |
320 | | public: |
321 | | // Creates a new probe sequence using `hash` as the initial value of the |
322 | | // sequence and `mask` (usually the capacity of the table) as the mask to |
323 | | // apply to each value in the progression. |
324 | 40 | probe_seq(size_t hash, size_t mask) { |
325 | 40 | assert(((mask + 1) & mask) == 0 && "not a mask"); |
326 | 40 | mask_ = mask; |
327 | 40 | offset_ = hash & mask_; |
328 | 40 | } |
329 | | |
330 | | // The offset within the table, i.e., the value `p(i)` above. |
331 | 40 | size_t offset() const { return offset_; } |
332 | 50 | size_t offset(size_t i) const { return (offset_ + i) & mask_; } |
333 | | |
334 | 0 | void next() { |
335 | 0 | index_ += Width; |
336 | 0 | offset_ += index_; |
337 | 0 | offset_ &= mask_; |
338 | 0 | } |
339 | | // 0-based probe index, a multiple of `Width`. |
340 | 16 | size_t index() const { return index_; } |
341 | | |
342 | | private: |
343 | | size_t mask_; |
344 | | size_t offset_; |
345 | | size_t index_ = 0; |
346 | | }; |
347 | | |
348 | | template <class ContainerKey, class Hash, class Eq> |
349 | | struct RequireUsableKey { |
350 | | template <class PassedKey, class... Args> |
351 | | std::pair< |
352 | | decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())), |
353 | | decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(), |
354 | | std::declval<const PassedKey&>()))>* |
355 | | operator()(const PassedKey&, const Args&...) const; |
356 | | }; |
357 | | |
358 | | template <class E, class Policy, class Hash, class Eq, class... Ts> |
359 | | struct IsDecomposable : std::false_type {}; |
360 | | |
361 | | template <class Policy, class Hash, class Eq, class... Ts> |
362 | | struct IsDecomposable< |
363 | | absl::void_t<decltype(Policy::apply( |
364 | | RequireUsableKey<typename Policy::key_type, Hash, Eq>(), |
365 | | std::declval<Ts>()...))>, |
366 | | Policy, Hash, Eq, Ts...> : std::true_type {}; |
367 | | |
368 | | // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. |
369 | | template <class T> |
370 | | constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { |
371 | | using std::swap; |
372 | | return noexcept(swap(std::declval<T&>(), std::declval<T&>())); |
373 | | } |
374 | | template <class T> |
375 | | constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { |
376 | | return false; |
377 | | } |
378 | | |
379 | | template <typename T> |
380 | 162 | uint32_t TrailingZeros(T x) { |
381 | 162 | ABSL_ASSUME(x != 0); |
382 | 162 | return static_cast<uint32_t>(countr_zero(x)); |
383 | 162 | } unsigned int absl::container_internal::TrailingZeros<unsigned long>(unsigned long) Line | Count | Source | 380 | 112 | uint32_t TrailingZeros(T x) { | 381 | 112 | ABSL_ASSUME(x != 0); | 382 | 112 | return static_cast<uint32_t>(countr_zero(x)); | 383 | 112 | } |
unsigned int absl::container_internal::TrailingZeros<unsigned short>(unsigned short) Line | Count | Source | 380 | 50 | uint32_t TrailingZeros(T x) { | 381 | 50 | ABSL_ASSUME(x != 0); | 382 | 50 | return static_cast<uint32_t>(countr_zero(x)); | 383 | 50 | } |
Unexecuted instantiation: unsigned int absl::container_internal::TrailingZeros<unsigned int>(unsigned int) |
384 | | |
385 | | // 8 bytes bitmask with most significant bit set for every byte. |
386 | | constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL; |
387 | | |
388 | | // An abstract bitmask, such as that emitted by a SIMD instruction. |
389 | | // |
390 | | // Specifically, this type implements a simple bitset whose representation is |
391 | | // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number |
392 | | // of abstract bits in the bitset, while `Shift` is the log-base-two of the |
393 | | // width of an abstract bit in the representation. |
394 | | // This mask provides operations for any number of real bits set in an abstract |
395 | | // bit. To add iteration on top of that, implementation must guarantee no more |
396 | | // than the most significant real bit is set in a set abstract bit. |
397 | | template <class T, int SignificantBits, int Shift = 0> |
398 | | class NonIterableBitMask { |
399 | | public: |
400 | 172 | explicit NonIterableBitMask(T mask) : mask_(mask) {} absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::NonIterableBitMask(unsigned long) Line | Count | Source | 400 | 56 | explicit NonIterableBitMask(T mask) : mask_(mask) {} |
absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::NonIterableBitMask(unsigned short) Line | Count | Source | 400 | 116 | explicit NonIterableBitMask(T mask) : mask_(mask) {} |
|
401 | | |
402 | 16 | explicit operator bool() const { return this->mask_ != 0; } |
403 | | |
404 | | // Returns the index of the lowest *abstract* bit set in `self`. |
405 | 162 | uint32_t LowestBitSet() const { |
406 | 162 | return container_internal::TrailingZeros(mask_) >> Shift; |
407 | 162 | } absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::LowestBitSet() const Line | Count | Source | 405 | 112 | uint32_t LowestBitSet() const { | 406 | 112 | return container_internal::TrailingZeros(mask_) >> Shift; | 407 | 112 | } |
absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::LowestBitSet() const Line | Count | Source | 405 | 50 | uint32_t LowestBitSet() const { | 406 | 50 | return container_internal::TrailingZeros(mask_) >> Shift; | 407 | 50 | } |
|
408 | | |
409 | | // Returns the index of the highest *abstract* bit set in `self`. |
410 | 0 | uint32_t HighestBitSet() const { |
411 | 0 | return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift); |
412 | 0 | } |
413 | | |
414 | | // Returns the number of trailing zero *abstract* bits. |
415 | 0 | uint32_t TrailingZeros() const { |
416 | 0 | return container_internal::TrailingZeros(mask_) >> Shift; |
417 | 0 | } |
418 | | |
419 | | // Returns the number of leading zero *abstract* bits. |
420 | 0 | uint32_t LeadingZeros() const { |
421 | 0 | constexpr int total_significant_bits = SignificantBits << Shift; |
422 | 0 | constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; |
423 | 0 | return static_cast<uint32_t>( |
424 | 0 | countl_zero(static_cast<T>(mask_ << extra_bits))) >> |
425 | 0 | Shift; |
426 | 0 | } |
427 | | |
428 | | T mask_; |
429 | | }; |
430 | | |
431 | | // Mask that can be iterable |
432 | | // |
433 | | // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just |
434 | | // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When |
435 | | // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as |
436 | | // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. |
437 | | // If NullifyBitsOnIteration is true (only allowed for Shift == 3), |
438 | | // non zero abstract bit is allowed to have additional bits |
439 | | // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not). |
440 | | // |
441 | | // For example: |
442 | | // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2 |
443 | | // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3 |
444 | | template <class T, int SignificantBits, int Shift = 0, |
445 | | bool NullifyBitsOnIteration = false> |
446 | | class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> { |
447 | | using Base = NonIterableBitMask<T, SignificantBits, Shift>; |
448 | | static_assert(std::is_unsigned<T>::value, ""); |
449 | | static_assert(Shift == 0 || Shift == 3, ""); |
450 | | static_assert(!NullifyBitsOnIteration || Shift == 3, ""); |
451 | | |
452 | | public: |
453 | 156 | explicit BitMask(T mask) : Base(mask) { |
454 | 156 | if (Shift == 3 && !NullifyBitsOnIteration) { |
455 | 56 | assert(this->mask_ == (this->mask_ & kMsbs8Bytes)); |
456 | 56 | } |
457 | 156 | } absl::container_internal::BitMask<unsigned long, 8, 3, false>::BitMask(unsigned long) Line | Count | Source | 453 | 56 | explicit BitMask(T mask) : Base(mask) { | 454 | 56 | if (Shift == 3 && !NullifyBitsOnIteration) { | 455 | 56 | assert(this->mask_ == (this->mask_ & kMsbs8Bytes)); | 456 | 56 | } | 457 | 56 | } |
absl::container_internal::BitMask<unsigned short, 16, 0, false>::BitMask(unsigned short) Line | Count | Source | 453 | 100 | explicit BitMask(T mask) : Base(mask) { | 454 | 100 | if (Shift == 3 && !NullifyBitsOnIteration) { | 455 | 0 | assert(this->mask_ == (this->mask_ & kMsbs8Bytes)); | 456 | 0 | } | 457 | 100 | } |
|
458 | | // BitMask is an iterator over the indices of its abstract bits. |
459 | | using value_type = int; |
460 | | using iterator = BitMask; |
461 | | using const_iterator = BitMask; |
462 | | |
463 | 130 | BitMask& operator++() { |
464 | 130 | if (Shift == 3 && NullifyBitsOnIteration) { |
465 | 0 | this->mask_ &= kMsbs8Bytes; |
466 | 0 | } |
467 | 130 | this->mask_ &= (this->mask_ - 1); |
468 | 130 | return *this; |
469 | 130 | } absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator++() Line | Count | Source | 463 | 112 | BitMask& operator++() { | 464 | 112 | if (Shift == 3 && NullifyBitsOnIteration) { | 465 | 0 | this->mask_ &= kMsbs8Bytes; | 466 | 0 | } | 467 | 112 | this->mask_ &= (this->mask_ - 1); | 468 | 112 | return *this; | 469 | 112 | } |
absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator++() Line | Count | Source | 463 | 18 | BitMask& operator++() { | 464 | 18 | if (Shift == 3 && NullifyBitsOnIteration) { | 465 | 0 | this->mask_ &= kMsbs8Bytes; | 466 | 0 | } | 467 | 18 | this->mask_ &= (this->mask_ - 1); | 468 | 18 | return *this; | 469 | 18 | } |
|
470 | | |
471 | 146 | uint32_t operator*() const { return Base::LowestBitSet(); } absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator*() const Line | Count | Source | 471 | 112 | uint32_t operator*() const { return Base::LowestBitSet(); } |
absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator*() const Line | Count | Source | 471 | 34 | uint32_t operator*() const { return Base::LowestBitSet(); } |
|
472 | | |
473 | 62 | BitMask begin() const { return *this; } absl::container_internal::BitMask<unsigned long, 8, 3, false>::begin() const Line | Count | Source | 473 | 28 | BitMask begin() const { return *this; } |
absl::container_internal::BitMask<unsigned short, 16, 0, false>::begin() const Line | Count | Source | 473 | 34 | BitMask begin() const { return *this; } |
|
474 | 62 | BitMask end() const { return BitMask(0); } absl::container_internal::BitMask<unsigned long, 8, 3, false>::end() const Line | Count | Source | 474 | 28 | BitMask end() const { return BitMask(0); } |
absl::container_internal::BitMask<unsigned short, 16, 0, false>::end() const Line | Count | Source | 474 | 34 | BitMask end() const { return BitMask(0); } |
|
475 | | |
476 | | private: |
477 | | friend bool operator==(const BitMask& a, const BitMask& b) { |
478 | | return a.mask_ == b.mask_; |
479 | | } |
480 | 192 | friend bool operator!=(const BitMask& a, const BitMask& b) { |
481 | 192 | return a.mask_ != b.mask_; |
482 | 192 | } absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned long, 8, 3, false> const&, absl::container_internal::BitMask<unsigned long, 8, 3, false> const&) Line | Count | Source | 480 | 140 | friend bool operator!=(const BitMask& a, const BitMask& b) { | 481 | 140 | return a.mask_ != b.mask_; | 482 | 140 | } |
absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned short, 16, 0, false> const&, absl::container_internal::BitMask<unsigned short, 16, 0, false> const&) Line | Count | Source | 480 | 52 | friend bool operator!=(const BitMask& a, const BitMask& b) { | 481 | 52 | return a.mask_ != b.mask_; | 482 | 52 | } |
|
483 | | }; |
484 | | |
485 | | using h2_t = uint8_t; |
486 | | |
487 | | // The values here are selected for maximum performance. See the static asserts |
488 | | // below for details. |
489 | | |
490 | | // A `ctrl_t` is a single control byte, which can have one of four |
491 | | // states: empty, deleted, full (which has an associated seven-bit h2_t value) |
492 | | // and the sentinel. They have the following bit patterns: |
493 | | // |
494 | | // empty: 1 0 0 0 0 0 0 0 |
495 | | // deleted: 1 1 1 1 1 1 1 0 |
496 | | // full: 0 h h h h h h h // h represents the hash bits. |
497 | | // sentinel: 1 1 1 1 1 1 1 1 |
498 | | // |
499 | | // These values are specifically tuned for SSE-flavored SIMD. |
500 | | // The static_asserts below detail the source of these choices. |
501 | | // |
502 | | // We use an enum class so that when strict aliasing is enabled, the compiler |
503 | | // knows ctrl_t doesn't alias other types. |
504 | | enum class ctrl_t : int8_t { |
505 | | kEmpty = -128, // 0b10000000 |
506 | | kDeleted = -2, // 0b11111110 |
507 | | kSentinel = -1, // 0b11111111 |
508 | | }; |
509 | | static_assert( |
510 | | (static_cast<int8_t>(ctrl_t::kEmpty) & |
511 | | static_cast<int8_t>(ctrl_t::kDeleted) & |
512 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0, |
513 | | "Special markers need to have the MSB to make checking for them efficient"); |
514 | | static_assert( |
515 | | ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, |
516 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " |
517 | | "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); |
518 | | static_assert( |
519 | | ctrl_t::kSentinel == static_cast<ctrl_t>(-1), |
520 | | "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " |
521 | | "registers (pcmpeqd xmm, xmm)"); |
522 | | static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128), |
523 | | "ctrl_t::kEmpty must be -128 to make the SIMD check for its " |
524 | | "existence efficient (psignb xmm, xmm)"); |
525 | | static_assert( |
526 | | (~static_cast<int8_t>(ctrl_t::kEmpty) & |
527 | | ~static_cast<int8_t>(ctrl_t::kDeleted) & |
528 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0, |
529 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " |
530 | | "shared by ctrl_t::kSentinel to make the scalar test for " |
531 | | "MaskEmptyOrDeleted() efficient"); |
532 | | static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2), |
533 | | "ctrl_t::kDeleted must be -2 to make the implementation of " |
534 | | "ConvertSpecialToEmptyAndFullToDeleted efficient"); |
535 | | |
536 | | // See definition comment for why this is size 32. |
537 | | ABSL_DLL extern const ctrl_t kEmptyGroup[32]; |
538 | | |
539 | | // Returns a pointer to a control byte group that can be used by empty tables. |
540 | 82 | inline ctrl_t* EmptyGroup() { |
541 | | // Const must be cast away here; no uses of this function will actually write |
542 | | // to it because it is only used for empty tables. |
543 | 82 | return const_cast<ctrl_t*>(kEmptyGroup + 16); |
544 | 82 | } |
545 | | |
546 | | // For use in SOO iterators. |
547 | | // TODO(b/289225379): we could potentially get rid of this by adding an is_soo |
548 | | // bit in iterators. This would add branches but reduce cache misses. |
549 | | ABSL_DLL extern const ctrl_t kSooControl[17]; |
550 | | |
551 | | // Returns a pointer to a full byte followed by a sentinel byte. |
552 | 32 | inline ctrl_t* SooControl() { |
553 | | // Const must be cast away here; no uses of this function will actually write |
554 | | // to it because it is only used for SOO iterators. |
555 | 32 | return const_cast<ctrl_t*>(kSooControl); |
556 | 32 | } |
557 | | // Whether ctrl is from the SooControl array. |
558 | 32 | inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); } |
559 | | |
560 | | // Returns a pointer to a generation to use for an empty hashtable. |
561 | | GenerationType* EmptyGeneration(); |
562 | | |
563 | | // Returns whether `generation` is a generation for an empty hashtable that |
564 | | // could be returned by EmptyGeneration(). |
565 | 0 | inline bool IsEmptyGeneration(const GenerationType* generation) { |
566 | 0 | return *generation == SentinelEmptyGeneration(); |
567 | 0 | } |
568 | | |
569 | | // Mixes a randomly generated per-process seed with `hash` and `ctrl` to |
570 | | // randomize insertion order within groups. |
571 | | bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash, |
572 | | const ctrl_t* ctrl); |
573 | | |
574 | | ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards( |
575 | | ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash, |
576 | 0 | ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) { |
577 | | #if defined(NDEBUG) |
578 | | return false; |
579 | | #else |
580 | 0 | return ShouldInsertBackwardsForDebug(capacity, hash, ctrl); |
581 | 0 | #endif |
582 | 0 | } |
583 | | |
584 | | // Returns insert position for the given mask. |
585 | | // We want to add entropy even when ASLR is not enabled. |
586 | | // In debug build we will randomly insert in either the front or back of |
587 | | // the group. |
588 | | // TODO(kfm,sbenza): revisit after we do unconditional mixing |
589 | | template <class Mask> |
590 | | ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset( |
591 | | Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity, |
592 | | ABSL_ATTRIBUTE_UNUSED size_t hash, |
593 | 16 | ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) { |
594 | | #if defined(NDEBUG) |
595 | | return mask.LowestBitSet(); |
596 | | #else |
597 | 16 | return ShouldInsertBackwardsForDebug(capacity, hash, ctrl) |
598 | 16 | ? mask.HighestBitSet() |
599 | 16 | : mask.LowestBitSet(); |
600 | 16 | #endif |
601 | 16 | } |
602 | | |
603 | | // Returns a per-table, hash salt, which changes on resize. This gets mixed into |
604 | | // H1 to randomize iteration order per-table. |
605 | | // |
606 | | // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure |
607 | | // non-determinism of iteration order in most cases. |
608 | 40 | inline size_t PerTableSalt(const ctrl_t* ctrl) { |
609 | | // The low bits of the pointer have little or no entropy because of |
610 | | // alignment. We shift the pointer to try to use higher entropy bits. A |
611 | | // good number seems to be 12 bits, because that aligns with page size. |
612 | 40 | return reinterpret_cast<uintptr_t>(ctrl) >> 12; |
613 | 40 | } |
614 | | // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. |
615 | 40 | inline size_t H1(size_t hash, const ctrl_t* ctrl) { |
616 | 40 | return (hash >> 7) ^ PerTableSalt(ctrl); |
617 | 40 | } |
618 | | |
619 | | // Extracts the H2 portion of a hash: the 7 bits not used for H1. |
620 | | // |
621 | | // These are used as an occupied control byte. |
622 | 48 | inline h2_t H2(size_t hash) { return hash & 0x7F; } |
623 | | |
624 | | // Helpers for checking the state of a control byte. |
625 | 40 | inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } |
626 | 130 | inline bool IsFull(ctrl_t c) { |
627 | | // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0` |
628 | | // is not a value in the enum. Both ways are equivalent, but this way makes |
629 | | // linters happier. |
630 | 130 | return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0; |
631 | 130 | } |
632 | 0 | inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } |
633 | 0 | inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } |
634 | | |
635 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
636 | | // Quick reference guide for intrinsics used below: |
637 | | // |
638 | | // * __m128i: An XMM (128-bit) word. |
639 | | // |
640 | | // * _mm_setzero_si128: Returns a zero vector. |
641 | | // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. |
642 | | // |
643 | | // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. |
644 | | // * _mm_and_si128: Ands two i128s together. |
645 | | // * _mm_or_si128: Ors two i128s together. |
646 | | // * _mm_andnot_si128: And-nots two i128s together. |
647 | | // |
648 | | // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, |
649 | | // filling each lane with 0x00 or 0xff. |
650 | | // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. |
651 | | // |
652 | | // * _mm_loadu_si128: Performs an unaligned load of an i128. |
653 | | // * _mm_storeu_si128: Performs an unaligned store of an i128. |
654 | | // |
655 | | // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first |
656 | | // argument if the corresponding lane of the second |
657 | | // argument is positive, negative, or zero, respectively. |
658 | | // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a |
659 | | // bitmask consisting of those bits. |
660 | | // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low |
661 | | // four bits of each i8 lane in the second argument as |
662 | | // indices. |
663 | | |
664 | | // https://github.com/abseil/abseil-cpp/issues/209 |
665 | | // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 |
666 | | // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char |
667 | | // Work around this by using the portable implementation of Group |
668 | | // when using -funsigned-char under GCC. |
669 | 0 | inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { |
670 | | #if defined(__GNUC__) && !defined(__clang__) |
671 | | if (std::is_unsigned<char>::value) { |
672 | | const __m128i mask = _mm_set1_epi8(0x80); |
673 | | const __m128i diff = _mm_subs_epi8(b, a); |
674 | | return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); |
675 | | } |
676 | | #endif |
677 | 0 | return _mm_cmpgt_epi8(a, b); |
678 | 0 | } |
679 | | |
680 | | struct GroupSse2Impl { |
681 | | static constexpr size_t kWidth = 16; // the number of slots per group |
682 | | |
683 | 34 | explicit GroupSse2Impl(const ctrl_t* pos) { |
684 | 34 | ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos)); |
685 | 34 | } |
686 | | |
687 | | // Returns a bitmask representing the positions of slots that match hash. |
688 | 32 | BitMask<uint16_t, kWidth> Match(h2_t hash) const { |
689 | 32 | auto match = _mm_set1_epi8(static_cast<char>(hash)); |
690 | 32 | BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0); |
691 | 32 | result = BitMask<uint16_t, kWidth>( |
692 | 32 | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
693 | 32 | return result; |
694 | 32 | } |
695 | | |
696 | | // Returns a bitmask representing the positions of empty slots. |
697 | 16 | NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const { |
698 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
699 | | // This only works because ctrl_t::kEmpty is -128. |
700 | | return NonIterableBitMask<uint16_t, kWidth>( |
701 | | static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); |
702 | | #else |
703 | 16 | auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty)); |
704 | 16 | return NonIterableBitMask<uint16_t, kWidth>( |
705 | 16 | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
706 | 16 | #endif |
707 | 16 | } |
708 | | |
709 | | // Returns a bitmask representing the positions of full slots. |
710 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
711 | | // original and mirrored. |
712 | 2 | BitMask<uint16_t, kWidth> MaskFull() const { |
713 | 2 | return BitMask<uint16_t, kWidth>( |
714 | 2 | static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff)); |
715 | 2 | } |
716 | | |
717 | | // Returns a bitmask representing the positions of non full slots. |
718 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
719 | | // It is useful in contexts when kSentinel is not present. |
720 | 0 | auto MaskNonFull() const { |
721 | 0 | return BitMask<uint16_t, kWidth>( |
722 | 0 | static_cast<uint16_t>(_mm_movemask_epi8(ctrl))); |
723 | 0 | } |
724 | | |
725 | | // Returns a bitmask representing the positions of empty or deleted slots. |
726 | 0 | NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const { |
727 | 0 | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel)); |
728 | 0 | return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>( |
729 | 0 | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); |
730 | 0 | } |
731 | | |
732 | | // Returns the number of trailing empty or deleted elements in the group. |
733 | 0 | uint32_t CountLeadingEmptyOrDeleted() const { |
734 | 0 | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel)); |
735 | 0 | return TrailingZeros(static_cast<uint32_t>( |
736 | 0 | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); |
737 | 0 | } |
738 | | |
739 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
740 | 0 | auto msbs = _mm_set1_epi8(static_cast<char>(-128)); |
741 | 0 | auto x126 = _mm_set1_epi8(126); |
742 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
743 | | auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); |
744 | | #else |
745 | 0 | auto zero = _mm_setzero_si128(); |
746 | 0 | auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); |
747 | 0 | auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); |
748 | 0 | #endif |
749 | 0 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); |
750 | 0 | } |
751 | | |
752 | | __m128i ctrl; |
753 | | }; |
754 | | #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 |
755 | | |
756 | | #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
757 | | struct GroupAArch64Impl { |
758 | | static constexpr size_t kWidth = 8; |
759 | | |
760 | | explicit GroupAArch64Impl(const ctrl_t* pos) { |
761 | | ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos)); |
762 | | } |
763 | | |
764 | | auto Match(h2_t hash) const { |
765 | | uint8x8_t dup = vdup_n_u8(hash); |
766 | | auto mask = vceq_u8(ctrl, dup); |
767 | | return BitMask<uint64_t, kWidth, /*Shift=*/3, |
768 | | /*NullifyBitsOnIteration=*/true>( |
769 | | vget_lane_u64(vreinterpret_u64_u8(mask), 0)); |
770 | | } |
771 | | |
772 | | NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { |
773 | | uint64_t mask = |
774 | | vget_lane_u64(vreinterpret_u64_u8(vceq_s8( |
775 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)), |
776 | | vreinterpret_s8_u8(ctrl))), |
777 | | 0); |
778 | | return NonIterableBitMask<uint64_t, kWidth, 3>(mask); |
779 | | } |
780 | | |
781 | | // Returns a bitmask representing the positions of full slots. |
782 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
783 | | // original and mirrored. |
784 | | auto MaskFull() const { |
785 | | uint64_t mask = vget_lane_u64( |
786 | | vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl), |
787 | | vdup_n_s8(static_cast<int8_t>(0)))), |
788 | | 0); |
789 | | return BitMask<uint64_t, kWidth, /*Shift=*/3, |
790 | | /*NullifyBitsOnIteration=*/true>(mask); |
791 | | } |
792 | | |
793 | | // Returns a bitmask representing the positions of non full slots. |
794 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
795 | | // It is useful in contexts when kSentinel is not present. |
796 | | auto MaskNonFull() const { |
797 | | uint64_t mask = vget_lane_u64( |
798 | | vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl), |
799 | | vdup_n_s8(static_cast<int8_t>(0)))), |
800 | | 0); |
801 | | return BitMask<uint64_t, kWidth, /*Shift=*/3, |
802 | | /*NullifyBitsOnIteration=*/true>(mask); |
803 | | } |
804 | | |
805 | | NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const { |
806 | | uint64_t mask = |
807 | | vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( |
808 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)), |
809 | | vreinterpret_s8_u8(ctrl))), |
810 | | 0); |
811 | | return NonIterableBitMask<uint64_t, kWidth, 3>(mask); |
812 | | } |
813 | | |
814 | | uint32_t CountLeadingEmptyOrDeleted() const { |
815 | | uint64_t mask = |
816 | | vget_lane_u64(vreinterpret_u64_u8(vcle_s8( |
817 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)), |
818 | | vreinterpret_s8_u8(ctrl))), |
819 | | 0); |
820 | | // Similar to MaskEmptyorDeleted() but we invert the logic to invert the |
821 | | // produced bitfield. We then count number of trailing zeros. |
822 | | // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, |
823 | | // so we should be fine. |
824 | | return static_cast<uint32_t>(countr_zero(mask)) >> 3; |
825 | | } |
826 | | |
827 | | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
828 | | uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); |
829 | | constexpr uint64_t slsbs = 0x0202020202020202ULL; |
830 | | constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL; |
831 | | auto x = slsbs & (mask >> 6); |
832 | | auto res = (x + midbs) | kMsbs8Bytes; |
833 | | little_endian::Store64(dst, res); |
834 | | } |
835 | | |
836 | | uint8x8_t ctrl; |
837 | | }; |
838 | | #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN |
839 | | |
840 | | struct GroupPortableImpl { |
841 | | static constexpr size_t kWidth = 8; |
842 | | |
843 | | explicit GroupPortableImpl(const ctrl_t* pos) |
844 | 28 | : ctrl(little_endian::Load64(pos)) {} |
845 | | |
846 | 0 | BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const { |
847 | 0 | // For the technique, see: |
848 | 0 | // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord |
849 | 0 | // (Determine if a word has a byte equal to n). |
850 | 0 | // |
851 | 0 | // Caveat: there are false positives but: |
852 | 0 | // - they only occur if there is a real match |
853 | 0 | // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel |
854 | 0 | // - they will be handled gracefully by subsequent checks in code |
855 | 0 | // |
856 | 0 | // Example: |
857 | 0 | // v = 0x1716151413121110 |
858 | 0 | // hash = 0x12 |
859 | 0 | // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 |
860 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
861 | 0 | auto x = ctrl ^ (lsbs * hash); |
862 | 0 | return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes); |
863 | 0 | } |
864 | | |
865 | 0 | NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { |
866 | 0 | return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) & |
867 | 0 | kMsbs8Bytes); |
868 | 0 | } |
869 | | |
870 | | // Returns a bitmask representing the positions of full slots. |
871 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
872 | | // original and mirrored. |
873 | 28 | BitMask<uint64_t, kWidth, 3> MaskFull() const { |
874 | 28 | return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes); |
875 | 28 | } |
876 | | |
877 | | // Returns a bitmask representing the positions of non full slots. |
878 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
879 | | // It is useful in contexts when kSentinel is not present. |
880 | 0 | auto MaskNonFull() const { |
881 | 0 | return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes); |
882 | 0 | } |
883 | | |
884 | 0 | NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const { |
885 | 0 | return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) & |
886 | 0 | kMsbs8Bytes); |
887 | 0 | } |
888 | | |
889 | 0 | uint32_t CountLeadingEmptyOrDeleted() const { |
890 | 0 | // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and |
891 | 0 | // kDeleted. We lower all other bits and count number of trailing zeros. |
892 | 0 | constexpr uint64_t bits = 0x0101010101010101ULL; |
893 | 0 | return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> |
894 | 0 | 3); |
895 | 0 | } |
896 | | |
897 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
898 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
899 | 0 | auto x = ctrl & kMsbs8Bytes; |
900 | 0 | auto res = (~x + (x >> 7)) & ~lsbs; |
901 | 0 | little_endian::Store64(dst, res); |
902 | 0 | } |
903 | | |
904 | | uint64_t ctrl; |
905 | | }; |
906 | | |
907 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
908 | | using Group = GroupSse2Impl; |
909 | | using GroupFullEmptyOrDeleted = GroupSse2Impl; |
910 | | #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
911 | | using Group = GroupAArch64Impl; |
912 | | // For Aarch64, we use the portable implementation for counting and masking |
913 | | // full, empty or deleted group elements. This is to avoid the latency of moving |
914 | | // between data GPRs and Neon registers when it does not provide a benefit. |
915 | | // Using Neon is profitable when we call Match(), but is not when we don't, |
916 | | // which is the case when we do *EmptyOrDeleted and MaskFull operations. |
917 | | // It is difficult to make a similar approach beneficial on other architectures |
918 | | // such as x86 since they have much lower GPR <-> vector register transfer |
919 | | // latency and 16-wide Groups. |
920 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
921 | | #else |
922 | | using Group = GroupPortableImpl; |
923 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
924 | | #endif |
925 | | |
926 | | // When there is an insertion with no reserved growth, we rehash with |
927 | | // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a |
928 | | // constant divided by capacity ensures that inserting N elements is still O(N) |
929 | | // in the average case. Using the constant 16 means that we expect to rehash ~8 |
930 | | // times more often than when generations are disabled. We are adding expected |
931 | | // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 - |
932 | | // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth. |
933 | 0 | inline size_t RehashProbabilityConstant() { return 16; } |
934 | | |
935 | | class CommonFieldsGenerationInfoEnabled { |
936 | | // A sentinel value for reserved_growth_ indicating that we just ran out of |
937 | | // reserved growth on the last insertion. When reserve is called and then |
938 | | // insertions take place, reserved_growth_'s state machine is N, ..., 1, |
939 | | // kReservedGrowthJustRanOut, 0. |
940 | | static constexpr size_t kReservedGrowthJustRanOut = |
941 | | (std::numeric_limits<size_t>::max)(); |
942 | | |
943 | | public: |
944 | | CommonFieldsGenerationInfoEnabled() = default; |
945 | | CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that) |
946 | | : reserved_growth_(that.reserved_growth_), |
947 | | reservation_size_(that.reservation_size_), |
948 | 0 | generation_(that.generation_) { |
949 | 0 | that.reserved_growth_ = 0; |
950 | 0 | that.reservation_size_ = 0; |
951 | 0 | that.generation_ = EmptyGeneration(); |
952 | 0 | } |
953 | | CommonFieldsGenerationInfoEnabled& operator=( |
954 | | CommonFieldsGenerationInfoEnabled&&) = default; |
955 | | |
956 | | // Whether we should rehash on insert in order to detect bugs of using invalid |
957 | | // references. We rehash on the first insertion after reserved_growth_ reaches |
958 | | // 0 after a call to reserve. We also do a rehash with low probability |
959 | | // whenever reserved_growth_ is zero. |
960 | | bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, |
961 | | size_t capacity) const; |
962 | | // Similar to above, except that we don't depend on reserved_growth_. |
963 | | bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl, |
964 | | size_t capacity) const; |
965 | 0 | void maybe_increment_generation_on_insert() { |
966 | 0 | if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0; |
967 | 0 |
|
968 | 0 | if (reserved_growth_ > 0) { |
969 | 0 | if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut; |
970 | 0 | } else { |
971 | 0 | increment_generation(); |
972 | 0 | } |
973 | 0 | } |
974 | 0 | void increment_generation() { *generation_ = NextGeneration(*generation_); } |
975 | 0 | void reset_reserved_growth(size_t reservation, size_t size) { |
976 | 0 | reserved_growth_ = reservation - size; |
977 | 0 | } |
978 | 0 | size_t reserved_growth() const { return reserved_growth_; } |
979 | 0 | void set_reserved_growth(size_t r) { reserved_growth_ = r; } |
980 | 0 | size_t reservation_size() const { return reservation_size_; } |
981 | 0 | void set_reservation_size(size_t r) { reservation_size_ = r; } |
982 | 0 | GenerationType generation() const { return *generation_; } |
983 | 0 | void set_generation(GenerationType g) { *generation_ = g; } |
984 | 0 | GenerationType* generation_ptr() const { return generation_; } |
985 | 0 | void set_generation_ptr(GenerationType* g) { generation_ = g; } |
986 | | |
987 | | private: |
988 | | // The number of insertions remaining that are guaranteed to not rehash due to |
989 | | // a prior call to reserve. Note: we store reserved growth in addition to |
990 | | // reservation size because calls to erase() decrease size_ but don't decrease |
991 | | // reserved growth. |
992 | | size_t reserved_growth_ = 0; |
993 | | // The maximum argument to reserve() since the container was cleared. We need |
994 | | // to keep track of this, in addition to reserved growth, because we reset |
995 | | // reserved growth to this when erase(begin(), end()) is called. |
996 | | size_t reservation_size_ = 0; |
997 | | // Pointer to the generation counter, which is used to validate iterators and |
998 | | // is stored in the backing array between the control bytes and the slots. |
999 | | // Note that we can't store the generation inside the container itself and |
1000 | | // keep a pointer to the container in the iterators because iterators must |
1001 | | // remain valid when the container is moved. |
1002 | | // Note: we could derive this pointer from the control pointer, but it makes |
1003 | | // the code more complicated, and there's a benefit in having the sizes of |
1004 | | // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different, |
1005 | | // which is that tests are less likely to rely on the size remaining the same. |
1006 | | GenerationType* generation_ = EmptyGeneration(); |
1007 | | }; |
1008 | | |
1009 | | class CommonFieldsGenerationInfoDisabled { |
1010 | | public: |
1011 | | CommonFieldsGenerationInfoDisabled() = default; |
1012 | | CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) = |
1013 | | default; |
1014 | | CommonFieldsGenerationInfoDisabled& operator=( |
1015 | | CommonFieldsGenerationInfoDisabled&&) = default; |
1016 | | |
1017 | 0 | bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const { |
1018 | 0 | return false; |
1019 | 0 | } |
1020 | 0 | bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const { |
1021 | 0 | return false; |
1022 | 0 | } |
1023 | 16 | void maybe_increment_generation_on_insert() {} |
1024 | 0 | void increment_generation() {} |
1025 | 0 | void reset_reserved_growth(size_t, size_t) {} |
1026 | 0 | size_t reserved_growth() const { return 0; } |
1027 | 0 | void set_reserved_growth(size_t) {} |
1028 | 0 | size_t reservation_size() const { return 0; } |
1029 | 0 | void set_reservation_size(size_t) {} |
1030 | 8 | GenerationType generation() const { return 0; } |
1031 | 8 | void set_generation(GenerationType) {} |
1032 | 32 | GenerationType* generation_ptr() const { return nullptr; } |
1033 | 8 | void set_generation_ptr(GenerationType*) {} |
1034 | | }; |
1035 | | |
1036 | | class HashSetIteratorGenerationInfoEnabled { |
1037 | | public: |
1038 | | HashSetIteratorGenerationInfoEnabled() = default; |
1039 | | explicit HashSetIteratorGenerationInfoEnabled( |
1040 | | const GenerationType* generation_ptr) |
1041 | 0 | : generation_ptr_(generation_ptr), generation_(*generation_ptr) {} |
1042 | | |
1043 | 0 | GenerationType generation() const { return generation_; } |
1044 | 0 | void reset_generation() { generation_ = *generation_ptr_; } |
1045 | 0 | const GenerationType* generation_ptr() const { return generation_ptr_; } |
1046 | 0 | void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; } |
1047 | | |
1048 | | private: |
1049 | | const GenerationType* generation_ptr_ = EmptyGeneration(); |
1050 | | GenerationType generation_ = *generation_ptr_; |
1051 | | }; |
1052 | | |
1053 | | class HashSetIteratorGenerationInfoDisabled { |
1054 | | public: |
1055 | | HashSetIteratorGenerationInfoDisabled() = default; |
1056 | 32 | explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {} |
1057 | | |
1058 | 48 | GenerationType generation() const { return 0; } |
1059 | 0 | void reset_generation() {} |
1060 | 80 | const GenerationType* generation_ptr() const { return nullptr; } |
1061 | 0 | void set_generation_ptr(const GenerationType*) {} |
1062 | | }; |
1063 | | |
1064 | | #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS |
1065 | | using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled; |
1066 | | using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled; |
1067 | | #else |
1068 | | using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled; |
1069 | | using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled; |
1070 | | #endif |
1071 | | |
1072 | | // Stored the information regarding number of slots we can still fill |
1073 | | // without needing to rehash. |
1074 | | // |
1075 | | // We want to ensure sufficient number of empty slots in the table in order |
1076 | | // to keep probe sequences relatively short. Empty slot in the probe group |
1077 | | // is required to stop probing. |
1078 | | // |
1079 | | // Tombstones (kDeleted slots) are not included in the growth capacity, |
1080 | | // because we'd like to rehash when the table is filled with tombstones and/or |
1081 | | // full slots. |
1082 | | // |
1083 | | // GrowthInfo also stores a bit that encodes whether table may have any |
1084 | | // deleted slots. |
1085 | | // Most of the tables (>95%) have no deleted slots, so some functions can |
1086 | | // be more efficient with this information. |
1087 | | // |
1088 | | // Callers can also force a rehash via the standard `rehash(0)`, |
1089 | | // which will recompute this value as a side-effect. |
1090 | | // |
1091 | | // See also `CapacityToGrowth()`. |
1092 | | class GrowthInfo { |
1093 | | public: |
1094 | | // Leaves data member uninitialized. |
1095 | | GrowthInfo() = default; |
1096 | | |
1097 | | // Initializes the GrowthInfo assuming we can grow `growth_left` elements |
1098 | | // and there are no kDeleted slots in the table. |
1099 | 8 | void InitGrowthLeftNoDeleted(size_t growth_left) { |
1100 | 8 | growth_left_info_ = growth_left; |
1101 | 8 | } |
1102 | | |
1103 | | // Overwrites single full slot with an empty slot. |
1104 | 0 | void OverwriteFullAsEmpty() { ++growth_left_info_; } |
1105 | | |
1106 | | // Overwrites single empty slot with a full slot. |
1107 | 0 | void OverwriteEmptyAsFull() { |
1108 | 0 | assert(GetGrowthLeft() > 0); |
1109 | 0 | --growth_left_info_; |
1110 | 0 | } |
1111 | | |
1112 | | // Overwrites several empty slots with full slots. |
1113 | 0 | void OverwriteManyEmptyAsFull(size_t cnt) { |
1114 | 0 | assert(GetGrowthLeft() >= cnt); |
1115 | 0 | growth_left_info_ -= cnt; |
1116 | 0 | } |
1117 | | |
1118 | | // Overwrites specified control element with full slot. |
1119 | 16 | void OverwriteControlAsFull(ctrl_t ctrl) { |
1120 | 16 | assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl))); |
1121 | 16 | growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl)); |
1122 | 16 | } |
1123 | | |
1124 | | // Overwrites single full slot with a deleted slot. |
1125 | 0 | void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; } |
1126 | | |
1127 | | // Returns true if table satisfies two properties: |
1128 | | // 1. Guaranteed to have no kDeleted slots. |
1129 | | // 2. There is a place for at least one element to grow. |
1130 | 16 | bool HasNoDeletedAndGrowthLeft() const { |
1131 | 16 | return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0; |
1132 | 16 | } |
1133 | | |
1134 | | // Returns true if the table satisfies two properties: |
1135 | | // 1. Guaranteed to have no kDeleted slots. |
1136 | | // 2. There is no growth left. |
1137 | 8 | bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; } |
1138 | | |
1139 | | // Returns true if table guaranteed to have no k |
1140 | 0 | bool HasNoDeleted() const { |
1141 | 0 | return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0; |
1142 | 0 | } |
1143 | | |
1144 | | // Returns the number of elements left to grow. |
1145 | 16 | size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; } |
1146 | | |
1147 | | private: |
1148 | | static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1); |
1149 | | static constexpr size_t kDeletedBit = ~kGrowthLeftMask; |
1150 | | // Topmost bit signal whenever there are deleted slots. |
1151 | | size_t growth_left_info_; |
1152 | | }; |
1153 | | |
1154 | | static_assert(sizeof(GrowthInfo) == sizeof(size_t), ""); |
1155 | | static_assert(alignof(GrowthInfo) == alignof(size_t), ""); |
1156 | | |
1157 | | // Returns whether `n` is a valid capacity (i.e., number of slots). |
1158 | | // |
1159 | | // A valid capacity is a non-zero integer `2^m - 1`. |
1160 | 46 | inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } |
1161 | | |
1162 | | // Returns the number of "cloned control bytes". |
1163 | | // |
1164 | | // This is the number of control bytes that are present both at the beginning |
1165 | | // of the control byte array and at the end, such that we can create a |
1166 | | // `Group::kWidth`-width probe window starting from any control byte. |
1167 | 54 | constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } |
1168 | | |
1169 | | // Returns the number of control bytes including cloned. |
1170 | 20 | constexpr size_t NumControlBytes(size_t capacity) { |
1171 | 20 | return capacity + 1 + NumClonedBytes(); |
1172 | 20 | } |
1173 | | |
1174 | | // Computes the offset from the start of the backing allocation of control. |
1175 | | // infoz and growth_info are stored at the beginning of the backing array. |
1176 | 0 | inline static size_t ControlOffset(bool has_infoz) { |
1177 | 0 | return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo); |
1178 | 0 | } Unexecuted instantiation: reflection.cc:absl::container_internal::ControlOffset(bool) Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::ControlOffset(bool) |
1179 | | |
1180 | | // Helper class for computing offsets and allocation size of hash set fields. |
1181 | | class RawHashSetLayout { |
1182 | | public: |
1183 | | explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz) |
1184 | | : capacity_(capacity), |
1185 | | control_offset_(ControlOffset(has_infoz)), |
1186 | | generation_offset_(control_offset_ + NumControlBytes(capacity)), |
1187 | | slot_offset_( |
1188 | | (generation_offset_ + NumGenerationBytes() + slot_align - 1) & |
1189 | 14 | (~slot_align + 1)) { |
1190 | 14 | assert(IsValidCapacity(capacity)); |
1191 | 14 | } |
1192 | | |
1193 | | // Returns the capacity of a table. |
1194 | 8 | size_t capacity() const { return capacity_; } |
1195 | | |
1196 | | // Returns precomputed offset from the start of the backing allocation of |
1197 | | // control. |
1198 | 14 | size_t control_offset() const { return control_offset_; } |
1199 | | |
1200 | | // Given the capacity of a table, computes the offset (from the start of the |
1201 | | // backing allocation) of the generation counter (if it exists). |
1202 | 8 | size_t generation_offset() const { return generation_offset_; } |
1203 | | |
1204 | | // Given the capacity of a table, computes the offset (from the start of the |
1205 | | // backing allocation) at which the slots begin. |
1206 | 8 | size_t slot_offset() const { return slot_offset_; } |
1207 | | |
1208 | | // Given the capacity of a table, computes the total size of the backing |
1209 | | // array. |
1210 | 14 | size_t alloc_size(size_t slot_size) const { |
1211 | 14 | return slot_offset_ + capacity_ * slot_size; |
1212 | 14 | } |
1213 | | |
1214 | | private: |
1215 | | size_t capacity_; |
1216 | | size_t control_offset_; |
1217 | | size_t generation_offset_; |
1218 | | size_t slot_offset_; |
1219 | | }; |
1220 | | |
1221 | | struct HashtableFreeFunctionsAccess; |
1222 | | |
1223 | | // We only allow a maximum of 1 SOO element, which makes the implementation |
1224 | | // much simpler. Complications with multiple SOO elements include: |
1225 | | // - Satisfying the guarantee that erasing one element doesn't invalidate |
1226 | | // iterators to other elements means we would probably need actual SOO |
1227 | | // control bytes. |
1228 | | // - In order to prevent user code from depending on iteration order for small |
1229 | | // tables, we would need to randomize the iteration order somehow. |
1230 | 0 | constexpr size_t SooCapacity() { return 1; } |
1231 | | // Sentinel type to indicate SOO CommonFields construction. |
1232 | | struct soo_tag_t {}; |
1233 | | // Sentinel type to indicate SOO CommonFields construction with full size. |
1234 | | struct full_soo_tag_t {}; |
1235 | | |
1236 | | // Suppress erroneous uninitialized memory errors on GCC. For example, GCC |
1237 | | // thinks that the call to slot_array() in find_or_prepare_insert() is reading |
1238 | | // uninitialized memory, but slot_array is only called there when the table is |
1239 | | // non-empty and this memory is initialized when the table is non-empty. |
1240 | | #if !defined(__clang__) && defined(__GNUC__) |
1241 | | #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \ |
1242 | | _Pragma("GCC diagnostic push") \ |
1243 | | _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \ |
1244 | | _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \ |
1245 | | _Pragma("GCC diagnostic pop") |
1246 | | #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \ |
1247 | | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x) |
1248 | | #else |
1249 | 0 | #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x |
1250 | 680 | #define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x |
1251 | | #endif |
1252 | | |
1253 | | // This allows us to work around an uninitialized memory warning when |
1254 | | // constructing begin() iterators in empty hashtables. |
1255 | | union MaybeInitializedPtr { |
1256 | 162 | void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); } |
1257 | 8 | void set(void* ptr) { p = ptr; } |
1258 | | |
1259 | | void* p; |
1260 | | }; |
1261 | | |
1262 | | struct HeapPtrs { |
1263 | | HeapPtrs() = default; |
1264 | 2 | explicit HeapPtrs(ctrl_t* c) : control(c) {} |
1265 | | |
1266 | | // The control bytes (and, also, a pointer near to the base of the backing |
1267 | | // array). |
1268 | | // |
1269 | | // This contains `capacity + 1 + NumClonedBytes()` entries, even |
1270 | | // when the table is empty (hence EmptyGroup). |
1271 | | // |
1272 | | // Note that growth_info is stored immediately before this pointer. |
1273 | | // May be uninitialized for SOO tables. |
1274 | | ctrl_t* control; |
1275 | | |
1276 | | // The beginning of the slots, located at `SlotOffset()` bytes after |
1277 | | // `control`. May be uninitialized for empty tables. |
1278 | | // Note: we can't use `slots` because Qt defines "slots" as a macro. |
1279 | | MaybeInitializedPtr slot_array; |
1280 | | }; |
1281 | | |
1282 | | // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo |
1283 | | // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`. |
1284 | | union HeapOrSoo { |
1285 | | HeapOrSoo() = default; |
1286 | 2 | explicit HeapOrSoo(ctrl_t* c) : heap(c) {} |
1287 | | |
1288 | 8 | ctrl_t*& control() { |
1289 | 8 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control); |
1290 | 8 | } |
1291 | 340 | ctrl_t* control() const { |
1292 | 340 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control); |
1293 | 340 | } |
1294 | 8 | MaybeInitializedPtr& slot_array() { |
1295 | 8 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array); |
1296 | 8 | } |
1297 | 162 | MaybeInitializedPtr slot_array() const { |
1298 | 162 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array); |
1299 | 162 | } |
1300 | 0 | void* get_soo_data() { |
1301 | 0 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data); |
1302 | 0 | } |
1303 | 0 | const void* get_soo_data() const { |
1304 | 0 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data); |
1305 | 0 | } |
1306 | | |
1307 | | HeapPtrs heap; |
1308 | | unsigned char soo_data[sizeof(HeapPtrs)]; |
1309 | | }; |
1310 | | |
1311 | | // CommonFields hold the fields in raw_hash_set that do not depend |
1312 | | // on template parameters. This allows us to conveniently pass all |
1313 | | // of this state to helper functions as a single argument. |
1314 | | class CommonFields : public CommonFieldsGenerationInfo { |
1315 | | public: |
1316 | 2 | CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {} |
1317 | 0 | explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {} |
1318 | | explicit CommonFields(full_soo_tag_t) |
1319 | 0 | : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {} |
1320 | | |
1321 | | // Not copyable |
1322 | | CommonFields(const CommonFields&) = delete; |
1323 | | CommonFields& operator=(const CommonFields&) = delete; |
1324 | | |
1325 | | // Movable |
1326 | | CommonFields(CommonFields&& that) = default; |
1327 | | CommonFields& operator=(CommonFields&&) = default; |
1328 | | |
1329 | | template <bool kSooEnabled> |
1330 | 2 | static CommonFields CreateDefault() { |
1331 | 2 | return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{}; |
1332 | 2 | } |
1333 | | |
1334 | | // The inline data for SOO is written on top of control_/slots_. |
1335 | 0 | const void* soo_data() const { return heap_or_soo_.get_soo_data(); } |
1336 | 0 | void* soo_data() { return heap_or_soo_.get_soo_data(); } |
1337 | | |
1338 | 8 | HeapOrSoo heap_or_soo() const { return heap_or_soo_; } |
1339 | 0 | const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; } |
1340 | | |
1341 | 328 | ctrl_t* control() const { return heap_or_soo_.control(); } |
1342 | 8 | void set_control(ctrl_t* c) { heap_or_soo_.control() = c; } |
1343 | 0 | void* backing_array_start() const { |
1344 | | // growth_info (and maybe infoz) is stored before control bytes. |
1345 | 0 | assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0); |
1346 | 0 | return control() - ControlOffset(has_infoz()); |
1347 | 0 | } |
1348 | | |
1349 | | // Note: we can't use slots() because Qt defines "slots" as a macro. |
1350 | 138 | void* slot_array() const { return heap_or_soo_.slot_array().get(); } |
1351 | 0 | MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); } |
1352 | 8 | void set_slots(void* s) { heap_or_soo_.slot_array().set(s); } |
1353 | | |
1354 | | // The number of filled slots. |
1355 | 68 | size_t size() const { return size_ >> HasInfozShift(); } |
1356 | 0 | void set_size(size_t s) { |
1357 | 0 | size_ = (s << HasInfozShift()) | (size_ & HasInfozMask()); |
1358 | 0 | } |
1359 | 0 | void set_empty_soo() { |
1360 | 0 | AssertInSooMode(); |
1361 | 0 | size_ = 0; |
1362 | 0 | } |
1363 | 0 | void set_full_soo() { |
1364 | 0 | AssertInSooMode(); |
1365 | 0 | size_ = size_t{1} << HasInfozShift(); |
1366 | 0 | } |
1367 | 16 | void increment_size() { |
1368 | 16 | assert(size() < capacity()); |
1369 | 16 | size_ += size_t{1} << HasInfozShift(); |
1370 | 16 | } |
1371 | 0 | void decrement_size() { |
1372 | 0 | assert(size() > 0); |
1373 | 0 | size_ -= size_t{1} << HasInfozShift(); |
1374 | 0 | } |
1375 | | |
1376 | | // The total number of available slots. |
1377 | 636 | size_t capacity() const { return capacity_; } |
1378 | 8 | void set_capacity(size_t c) { |
1379 | 8 | assert(c == 0 || IsValidCapacity(c)); |
1380 | 8 | capacity_ = c; |
1381 | 8 | } |
1382 | | |
1383 | | // The number of slots we can still fill without needing to rehash. |
1384 | | // This is stored in the heap allocation before the control bytes. |
1385 | | // TODO(b/289225379): experiment with moving growth_info back inline to |
1386 | | // increase room for SOO. |
1387 | 0 | size_t growth_left() const { return growth_info().GetGrowthLeft(); } |
1388 | | |
1389 | 48 | GrowthInfo& growth_info() { |
1390 | 48 | auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1; |
1391 | 48 | assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0); |
1392 | 48 | return *gl_ptr; |
1393 | 48 | } |
1394 | 0 | GrowthInfo growth_info() const { |
1395 | 0 | return const_cast<CommonFields*>(this)->growth_info(); |
1396 | 0 | } |
1397 | | |
1398 | 30 | bool has_infoz() const { |
1399 | 30 | return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0); |
1400 | 30 | } |
1401 | 8 | void set_has_infoz(bool has_infoz) { |
1402 | 8 | size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz); |
1403 | 8 | } |
1404 | | |
1405 | 22 | HashtablezInfoHandle infoz() { |
1406 | 22 | return has_infoz() |
1407 | 22 | ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) |
1408 | 22 | : HashtablezInfoHandle(); |
1409 | 22 | } |
1410 | 0 | void set_infoz(HashtablezInfoHandle infoz) { |
1411 | 0 | assert(has_infoz()); |
1412 | 0 | *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz; |
1413 | 0 | } |
1414 | | |
1415 | 0 | bool should_rehash_for_bug_detection_on_insert() const { |
1416 | 0 | return CommonFieldsGenerationInfo:: |
1417 | 0 | should_rehash_for_bug_detection_on_insert(control(), capacity()); |
1418 | 0 | } |
1419 | 0 | bool should_rehash_for_bug_detection_on_move() const { |
1420 | 0 | return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move( |
1421 | 0 | control(), capacity()); |
1422 | 0 | } |
1423 | 0 | void reset_reserved_growth(size_t reservation) { |
1424 | 0 | CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); |
1425 | 0 | } |
1426 | | |
1427 | | // The size of the backing array allocation. |
1428 | 0 | size_t alloc_size(size_t slot_size, size_t slot_align) const { |
1429 | 0 | return RawHashSetLayout(capacity(), slot_align, has_infoz()) |
1430 | 0 | .alloc_size(slot_size); |
1431 | 0 | } |
1432 | | |
1433 | | // Move fields other than heap_or_soo_. |
1434 | 0 | void move_non_heap_or_soo_fields(CommonFields& that) { |
1435 | 0 | static_cast<CommonFieldsGenerationInfo&>(*this) = |
1436 | 0 | std::move(static_cast<CommonFieldsGenerationInfo&>(that)); |
1437 | 0 | capacity_ = that.capacity_; |
1438 | 0 | size_ = that.size_; |
1439 | 0 | } |
1440 | | |
1441 | | // Returns the number of control bytes set to kDeleted. For testing only. |
1442 | 0 | size_t TombstonesCount() const { |
1443 | 0 | return static_cast<size_t>( |
1444 | 0 | std::count(control(), control() + capacity(), ctrl_t::kDeleted)); |
1445 | 0 | } |
1446 | | |
1447 | | private: |
1448 | | // We store the has_infoz bit in the lowest bit of size_. |
1449 | 122 | static constexpr size_t HasInfozShift() { return 1; } |
1450 | 30 | static constexpr size_t HasInfozMask() { |
1451 | 30 | return (size_t{1} << HasInfozShift()) - 1; |
1452 | 30 | } |
1453 | | |
1454 | | // We can't assert that SOO is enabled because we don't have SooEnabled(), but |
1455 | | // we assert what we can. |
1456 | 0 | void AssertInSooMode() const { |
1457 | 0 | assert(capacity() == SooCapacity()); |
1458 | 0 | assert(!has_infoz()); |
1459 | 0 | } |
1460 | | |
1461 | | // The number of slots in the backing array. This is always 2^N-1 for an |
1462 | | // integer N. NOTE: we tried experimenting with compressing the capacity and |
1463 | | // storing it together with size_: (a) using 6 bits to store the corresponding |
1464 | | // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of |
1465 | | // size_ and storing size in the low bits. Both of these experiments were |
1466 | | // regressions, presumably because we need capacity to do find operations. |
1467 | | size_t capacity_; |
1468 | | |
1469 | | // The size and also has one bit that stores whether we have infoz. |
1470 | | // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_ |
1471 | | // encode the size in SOO case. We would be making size()/capacity() more |
1472 | | // expensive in order to have more SOO space. |
1473 | | size_t size_; |
1474 | | |
1475 | | // Either the control/slots pointers or the SOO slot. |
1476 | | HeapOrSoo heap_or_soo_; |
1477 | | }; |
1478 | | |
1479 | | template <class Policy, class Hash, class Eq, class Alloc> |
1480 | | class raw_hash_set; |
1481 | | |
1482 | | // Returns the next valid capacity after `n`. |
1483 | 8 | inline size_t NextCapacity(size_t n) { |
1484 | 8 | assert(IsValidCapacity(n) || n == 0); |
1485 | 8 | return n * 2 + 1; |
1486 | 8 | } |
1487 | | |
1488 | | // Applies the following mapping to every byte in the control array: |
1489 | | // * kDeleted -> kEmpty |
1490 | | // * kEmpty -> kEmpty |
1491 | | // * _ -> kDeleted |
1492 | | // PRECONDITION: |
1493 | | // IsValidCapacity(capacity) |
1494 | | // ctrl[capacity] == ctrl_t::kSentinel |
1495 | | // ctrl[i] != ctrl_t::kSentinel for all i < capacity |
1496 | | void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); |
1497 | | |
1498 | | // Converts `n` into the next valid capacity, per `IsValidCapacity`. |
1499 | 0 | inline size_t NormalizeCapacity(size_t n) { |
1500 | 0 | return n ? ~size_t{} >> countl_zero(n) : 1; |
1501 | 0 | } |
1502 | | |
1503 | | // General notes on capacity/growth methods below: |
1504 | | // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an |
1505 | | // average of two empty slots per group. |
1506 | | // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. |
1507 | | // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we |
1508 | | // never need to probe (the whole table fits in one group) so we don't need a |
1509 | | // load factor less than 1. |
1510 | | |
1511 | | // Given `capacity`, applies the load factor; i.e., it returns the maximum |
1512 | | // number of values we should put into the table before a resizing rehash. |
1513 | 8 | inline size_t CapacityToGrowth(size_t capacity) { |
1514 | 8 | assert(IsValidCapacity(capacity)); |
1515 | | // `capacity*7/8` |
1516 | 8 | if (Group::kWidth == 8 && capacity == 7) { |
1517 | | // x-x/8 does not work when x==7. |
1518 | 0 | return 6; |
1519 | 0 | } |
1520 | 8 | return capacity - capacity / 8; |
1521 | 8 | } |
1522 | | |
1523 | | // Given `growth`, "unapplies" the load factor to find how large the capacity |
1524 | | // should be to stay within the load factor. |
1525 | | // |
1526 | | // This might not be a valid capacity and `NormalizeCapacity()` should be |
1527 | | // called on this. |
1528 | 0 | inline size_t GrowthToLowerboundCapacity(size_t growth) { |
1529 | 0 | // `growth*8/7` |
1530 | 0 | if (Group::kWidth == 8 && growth == 7) { |
1531 | 0 | // x+(x-1)/7 does not work when x==7. |
1532 | 0 | return 8; |
1533 | 0 | } |
1534 | 0 | return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7); |
1535 | 0 | } |
1536 | | |
1537 | | template <class InputIter> |
1538 | | size_t SelectBucketCountForIterRange(InputIter first, InputIter last, |
1539 | | size_t bucket_count) { |
1540 | | if (bucket_count != 0) { |
1541 | | return bucket_count; |
1542 | | } |
1543 | | using InputIterCategory = |
1544 | | typename std::iterator_traits<InputIter>::iterator_category; |
1545 | | if (std::is_base_of<std::random_access_iterator_tag, |
1546 | | InputIterCategory>::value) { |
1547 | | return GrowthToLowerboundCapacity( |
1548 | | static_cast<size_t>(std::distance(first, last))); |
1549 | | } |
1550 | | return 0; |
1551 | | } |
1552 | | |
1553 | 0 | constexpr bool SwisstableDebugEnabled() { |
1554 | 0 | #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \ |
1555 | 0 | ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG) |
1556 | 0 | return true; |
1557 | 0 | #else |
1558 | 0 | return false; |
1559 | 0 | #endif |
1560 | 0 | } |
1561 | | |
1562 | | inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation, |
1563 | | const GenerationType* generation_ptr, |
1564 | 16 | const char* operation) { |
1565 | 16 | if (!SwisstableDebugEnabled()) return; |
1566 | | // `SwisstableDebugEnabled()` is also true for release builds with hardening |
1567 | | // enabled. To minimize their impact in those builds: |
1568 | | // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout |
1569 | | // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve |
1570 | | // the chances that the hot paths will be inlined. |
1571 | 16 | if (ABSL_PREDICT_FALSE(ctrl == nullptr)) { |
1572 | 0 | ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation); |
1573 | 0 | } |
1574 | 16 | if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) { |
1575 | 0 | ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.", |
1576 | 0 | operation); |
1577 | 0 | } |
1578 | 16 | if (SwisstableGenerationsEnabled()) { |
1579 | 0 | if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { |
1580 | 0 | ABSL_RAW_LOG(FATAL, |
1581 | 0 | "%s called on invalid iterator. The table could have " |
1582 | 0 | "rehashed or moved since this iterator was initialized.", |
1583 | 0 | operation); |
1584 | 0 | } |
1585 | 0 | if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { |
1586 | 0 | ABSL_RAW_LOG( |
1587 | 0 | FATAL, |
1588 | 0 | "%s called on invalid iterator. The element was likely erased.", |
1589 | 0 | operation); |
1590 | 0 | } |
1591 | 16 | } else { |
1592 | 16 | if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { |
1593 | 0 | ABSL_RAW_LOG( |
1594 | 0 | FATAL, |
1595 | 0 | "%s called on invalid iterator. The element might have been erased " |
1596 | 0 | "or the table might have rehashed. Consider running with " |
1597 | 0 | "--config=asan to diagnose rehashing issues.", |
1598 | 0 | operation); |
1599 | 0 | } |
1600 | 16 | } |
1601 | 16 | } |
1602 | | |
1603 | | // Note that for comparisons, null/end iterators are valid. |
1604 | | inline void AssertIsValidForComparison(const ctrl_t* ctrl, |
1605 | | GenerationType generation, |
1606 | 32 | const GenerationType* generation_ptr) { |
1607 | 32 | if (!SwisstableDebugEnabled()) return; |
1608 | 32 | const bool ctrl_is_valid_for_comparison = |
1609 | 32 | ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl); |
1610 | 32 | if (SwisstableGenerationsEnabled()) { |
1611 | 0 | if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { |
1612 | 0 | ABSL_RAW_LOG(FATAL, |
1613 | 0 | "Invalid iterator comparison. The table could have rehashed " |
1614 | 0 | "or moved since this iterator was initialized."); |
1615 | 0 | } |
1616 | 0 | if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) { |
1617 | 0 | ABSL_RAW_LOG( |
1618 | 0 | FATAL, "Invalid iterator comparison. The element was likely erased."); |
1619 | 0 | } |
1620 | 32 | } else { |
1621 | 32 | ABSL_HARDENING_ASSERT( |
1622 | 32 | ctrl_is_valid_for_comparison && |
1623 | 32 | "Invalid iterator comparison. The element might have been erased or " |
1624 | 32 | "the table might have rehashed. Consider running with --config=asan to " |
1625 | 32 | "diagnose rehashing issues."); |
1626 | 32 | } |
1627 | 32 | } |
1628 | | |
1629 | | // If the two iterators come from the same container, then their pointers will |
1630 | | // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa. |
1631 | | // Note: we take slots by reference so that it's not UB if they're uninitialized |
1632 | | // as long as we don't read them (when ctrl is null). |
1633 | | inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a, |
1634 | | const ctrl_t* ctrl_b, |
1635 | | const void* const& slot_a, |
1636 | 16 | const void* const& slot_b) { |
1637 | | // If either control byte is null, then we can't tell. |
1638 | 16 | if (ctrl_a == nullptr || ctrl_b == nullptr) return true; |
1639 | 16 | const bool a_is_soo = IsSooControl(ctrl_a); |
1640 | 16 | if (a_is_soo != IsSooControl(ctrl_b)) return false; |
1641 | 16 | if (a_is_soo) return slot_a == slot_b; |
1642 | | |
1643 | 16 | const void* low_slot = slot_a; |
1644 | 16 | const void* hi_slot = slot_b; |
1645 | 16 | if (ctrl_a > ctrl_b) { |
1646 | 0 | std::swap(ctrl_a, ctrl_b); |
1647 | 0 | std::swap(low_slot, hi_slot); |
1648 | 0 | } |
1649 | 16 | return ctrl_b < low_slot && low_slot <= hi_slot; |
1650 | 16 | } |
1651 | | |
1652 | | // Asserts that two iterators come from the same container. |
1653 | | // Note: we take slots by reference so that it's not UB if they're uninitialized |
1654 | | // as long as we don't read them (when ctrl is null). |
1655 | | inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, |
1656 | | const void* const& slot_a, |
1657 | | const void* const& slot_b, |
1658 | | const GenerationType* generation_ptr_a, |
1659 | 16 | const GenerationType* generation_ptr_b) { |
1660 | 16 | if (!SwisstableDebugEnabled()) return; |
1661 | | // `SwisstableDebugEnabled()` is also true for release builds with hardening |
1662 | | // enabled. To minimize their impact in those builds: |
1663 | | // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout |
1664 | | // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve |
1665 | | // the chances that the hot paths will be inlined. |
1666 | | |
1667 | | // fail_if(is_invalid, message) crashes when is_invalid is true and provides |
1668 | | // an error message based on `message`. |
1669 | 16 | const auto fail_if = [](bool is_invalid, const char* message) { |
1670 | 16 | if (ABSL_PREDICT_FALSE(is_invalid)) { |
1671 | 0 | ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message); |
1672 | 0 | } |
1673 | 16 | }; |
1674 | | |
1675 | 16 | const bool a_is_default = ctrl_a == EmptyGroup(); |
1676 | 16 | const bool b_is_default = ctrl_b == EmptyGroup(); |
1677 | 16 | if (a_is_default && b_is_default) return; |
1678 | 16 | fail_if(a_is_default != b_is_default, |
1679 | 16 | "Comparing default-constructed hashtable iterator with a " |
1680 | 16 | "non-default-constructed hashtable iterator."); |
1681 | | |
1682 | 16 | if (SwisstableGenerationsEnabled()) { |
1683 | 0 | if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return; |
1684 | | // Users don't need to know whether the tables are SOO so don't mention SOO |
1685 | | // in the debug message. |
1686 | 0 | const bool a_is_soo = IsSooControl(ctrl_a); |
1687 | 0 | const bool b_is_soo = IsSooControl(ctrl_b); |
1688 | 0 | fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo), |
1689 | 0 | "Comparing iterators from different hashtables."); |
1690 | |
|
1691 | 0 | const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); |
1692 | 0 | const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); |
1693 | 0 | fail_if(a_is_empty != b_is_empty, |
1694 | 0 | "Comparing an iterator from an empty hashtable with an iterator " |
1695 | 0 | "from a non-empty hashtable."); |
1696 | 0 | fail_if(a_is_empty && b_is_empty, |
1697 | 0 | "Comparing iterators from different empty hashtables."); |
1698 | |
|
1699 | 0 | const bool a_is_end = ctrl_a == nullptr; |
1700 | 0 | const bool b_is_end = ctrl_b == nullptr; |
1701 | 0 | fail_if(a_is_end || b_is_end, |
1702 | 0 | "Comparing iterator with an end() iterator from a different " |
1703 | 0 | "hashtable."); |
1704 | 0 | fail_if(true, "Comparing non-end() iterators from different hashtables."); |
1705 | 16 | } else { |
1706 | 16 | ABSL_HARDENING_ASSERT( |
1707 | 16 | AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && |
1708 | 16 | "Invalid iterator comparison. The iterators may be from different " |
1709 | 16 | "containers or the container might have rehashed or moved. Consider " |
1710 | 16 | "running with --config=asan to diagnose issues."); |
1711 | 16 | } |
1712 | 16 | } |
1713 | | |
1714 | | struct FindInfo { |
1715 | | size_t offset; |
1716 | | size_t probe_length; |
1717 | | }; |
1718 | | |
1719 | | // Whether a table is "small". A small table fits entirely into a probing |
1720 | | // group, i.e., has a capacity < `Group::kWidth`. |
1721 | | // |
1722 | | // In small mode we are able to use the whole capacity. The extra control |
1723 | | // bytes give us at least one "empty" control byte to stop the iteration. |
1724 | | // This is important to make 1 a valid capacity. |
1725 | | // |
1726 | | // In small mode only the first `capacity` control bytes after the sentinel |
1727 | | // are valid. The rest contain dummy ctrl_t::kEmpty values that do not |
1728 | | // represent a real slot. This is important to take into account on |
1729 | | // `find_first_non_full()`, where we never try |
1730 | | // `ShouldInsertBackwards()` for small tables. |
1731 | 46 | inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } |
1732 | | |
1733 | | // Whether a table fits entirely into a probing group. |
1734 | | // Arbitrary order of elements in such tables is correct. |
1735 | 34 | inline bool is_single_group(size_t capacity) { |
1736 | 34 | return capacity <= Group::kWidth; |
1737 | 34 | } |
1738 | | |
1739 | | // Begins a probing operation on `common.control`, using `hash`. |
1740 | | inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity, |
1741 | 40 | size_t hash) { |
1742 | 40 | return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity); |
1743 | 40 | } |
1744 | 40 | inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) { |
1745 | 40 | return probe(common.control(), common.capacity(), hash); |
1746 | 40 | } |
1747 | | |
1748 | | // Probes an array of control bits using a probe sequence derived from `hash`, |
1749 | | // and returns the offset corresponding to the first deleted or empty slot. |
1750 | | // |
1751 | | // Behavior when the entire table is full is undefined. |
1752 | | // |
1753 | | // NOTE: this function must work with tables having both empty and deleted |
1754 | | // slots in the same group. Such tables appear during `erase()`. |
1755 | | template <typename = void> |
1756 | 0 | inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) { |
1757 | 0 | auto seq = probe(common, hash); |
1758 | 0 | const ctrl_t* ctrl = common.control(); |
1759 | 0 | if (IsEmptyOrDeleted(ctrl[seq.offset()]) && |
1760 | 0 | !ShouldInsertBackwards(common.capacity(), hash, ctrl)) { |
1761 | 0 | return {seq.offset(), /*probe_length=*/0}; |
1762 | 0 | } |
1763 | 0 | while (true) { |
1764 | 0 | GroupFullEmptyOrDeleted g{ctrl + seq.offset()}; |
1765 | 0 | auto mask = g.MaskEmptyOrDeleted(); |
1766 | 0 | if (mask) { |
1767 | 0 | return { |
1768 | 0 | seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)), |
1769 | 0 | seq.index()}; |
1770 | 0 | } |
1771 | 0 | seq.next(); |
1772 | 0 | assert(seq.index() <= common.capacity() && "full table!"); |
1773 | 0 | } |
1774 | 0 | } |
1775 | | |
1776 | | // Extern template for inline function keep possibility of inlining. |
1777 | | // When compiler decided to not inline, no symbols will be added to the |
1778 | | // corresponding translation unit. |
1779 | | extern template FindInfo find_first_non_full(const CommonFields&, size_t); |
1780 | | |
1781 | | // Non-inlined version of find_first_non_full for use in less |
1782 | | // performance critical routines. |
1783 | | FindInfo find_first_non_full_outofline(const CommonFields&, size_t); |
1784 | | |
1785 | 8 | inline void ResetGrowthLeft(CommonFields& common) { |
1786 | 8 | common.growth_info().InitGrowthLeftNoDeleted( |
1787 | 8 | CapacityToGrowth(common.capacity()) - common.size()); |
1788 | 8 | } |
1789 | | |
1790 | | // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire |
1791 | | // array as marked as empty. |
1792 | 2 | inline void ResetCtrl(CommonFields& common, size_t slot_size) { |
1793 | 2 | const size_t capacity = common.capacity(); |
1794 | 2 | ctrl_t* ctrl = common.control(); |
1795 | 2 | std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), |
1796 | 2 | capacity + 1 + NumClonedBytes()); |
1797 | 2 | ctrl[capacity] = ctrl_t::kSentinel; |
1798 | 2 | SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); |
1799 | 2 | } |
1800 | | |
1801 | | // Sets sanitizer poisoning for slot corresponding to control byte being set. |
1802 | | inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h, |
1803 | 16 | size_t slot_size) { |
1804 | 16 | assert(i < c.capacity()); |
1805 | 16 | auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size; |
1806 | 16 | if (IsFull(h)) { |
1807 | 16 | SanitizerUnpoisonMemoryRegion(slot_i, slot_size); |
1808 | 16 | } else { |
1809 | 0 | SanitizerPoisonMemoryRegion(slot_i, slot_size); |
1810 | 0 | } |
1811 | 16 | } |
1812 | | |
1813 | | // Sets `ctrl[i]` to `h`. |
1814 | | // |
1815 | | // Unlike setting it directly, this function will perform bounds checks and |
1816 | | // mirror the value to the cloned tail if necessary. |
1817 | | inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h, |
1818 | 16 | size_t slot_size) { |
1819 | 16 | DoSanitizeOnSetCtrl(c, i, h, slot_size); |
1820 | 16 | ctrl_t* ctrl = c.control(); |
1821 | 16 | ctrl[i] = h; |
1822 | 16 | ctrl[((i - NumClonedBytes()) & c.capacity()) + |
1823 | 16 | (NumClonedBytes() & c.capacity())] = h; |
1824 | 16 | } |
1825 | | // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. |
1826 | 16 | inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) { |
1827 | 16 | SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size); |
1828 | 16 | } |
1829 | | |
1830 | | // Like SetCtrl, but in a single group table, we can save some operations when |
1831 | | // setting the cloned control byte. |
1832 | | inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h, |
1833 | 0 | size_t slot_size) { |
1834 | 0 | assert(is_single_group(c.capacity())); |
1835 | 0 | DoSanitizeOnSetCtrl(c, i, h, slot_size); |
1836 | 0 | ctrl_t* ctrl = c.control(); |
1837 | 0 | ctrl[i] = h; |
1838 | 0 | ctrl[i + c.capacity() + 1] = h; |
1839 | 0 | } |
1840 | | // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. |
1841 | | inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h, |
1842 | 0 | size_t slot_size) { |
1843 | 0 | SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size); |
1844 | 0 | } |
1845 | | |
1846 | | // growth_info (which is a size_t) is stored with the backing array. |
1847 | 0 | constexpr size_t BackingArrayAlignment(size_t align_of_slot) { |
1848 | 0 | return (std::max)(align_of_slot, alignof(GrowthInfo)); |
1849 | 0 | } |
1850 | | |
1851 | | // Returns the address of the ith slot in slots where each slot occupies |
1852 | | // slot_size. |
1853 | 40 | inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { |
1854 | 40 | return static_cast<void*>(static_cast<char*>(slot_array) + |
1855 | 40 | (slot * slot_size)); |
1856 | 40 | } |
1857 | | |
1858 | | // Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`. |
1859 | | // No insertion to the table allowed during Callback call. |
1860 | | // Erasure is allowed only for the element passed to the callback. |
1861 | | template <class SlotType, class Callback> |
1862 | | ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots( |
1863 | 30 | const CommonFields& c, SlotType* slot, Callback cb) { |
1864 | 30 | const size_t cap = c.capacity(); |
1865 | 30 | const ctrl_t* ctrl = c.control(); |
1866 | 30 | if (is_small(cap)) { |
1867 | | // Mirrored/cloned control bytes in small table are also located in the |
1868 | | // first group (starting from position 0). We are taking group from position |
1869 | | // `capacity` in order to avoid duplicates. |
1870 | | |
1871 | | // Small tables capacity fits into portable group, where |
1872 | | // GroupPortableImpl::MaskFull is more efficient for the |
1873 | | // capacity <= GroupPortableImpl::kWidth. |
1874 | 28 | assert(cap <= GroupPortableImpl::kWidth && |
1875 | 28 | "unexpectedly large small capacity"); |
1876 | 28 | static_assert(Group::kWidth >= GroupPortableImpl::kWidth, |
1877 | 28 | "unexpected group width"); |
1878 | | // Group starts from kSentinel slot, so indices in the mask will |
1879 | | // be increased by 1. |
1880 | 28 | const auto mask = GroupPortableImpl(ctrl + cap).MaskFull(); |
1881 | 28 | --ctrl; |
1882 | 28 | --slot; |
1883 | 112 | for (uint32_t i : mask) { |
1884 | 112 | cb(ctrl + i, slot + i); |
1885 | 112 | } |
1886 | 28 | return; |
1887 | 28 | } |
1888 | 2 | size_t remaining = c.size(); |
1889 | 2 | ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining; |
1890 | 4 | while (remaining != 0) { |
1891 | 16 | for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) { |
1892 | 16 | assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly"); |
1893 | 16 | cb(ctrl + i, slot + i); |
1894 | 16 | --remaining; |
1895 | 16 | } |
1896 | 2 | ctrl += Group::kWidth; |
1897 | 2 | slot += Group::kWidth; |
1898 | 2 | assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) && |
1899 | 2 | "hash table was modified unexpectedly"); |
1900 | 2 | } |
1901 | | // NOTE: erasure of the current element is allowed in callback for |
1902 | | // absl::erase_if specialization. So we use `>=`. |
1903 | 2 | assert(original_size_for_assert >= c.size() && |
1904 | 2 | "hash table was modified unexpectedly"); |
1905 | 2 | } void absl::container_internal::IterateOverFullSlots<absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::AssertHashEqConsistent<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}>(absl::container_internal::CommonFields const&, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::AssertHashEqConsistent<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}) Line | Count | Source | 1863 | 30 | const CommonFields& c, SlotType* slot, Callback cb) { | 1864 | 30 | const size_t cap = c.capacity(); | 1865 | 30 | const ctrl_t* ctrl = c.control(); | 1866 | 30 | if (is_small(cap)) { | 1867 | | // Mirrored/cloned control bytes in small table are also located in the | 1868 | | // first group (starting from position 0). We are taking group from position | 1869 | | // `capacity` in order to avoid duplicates. | 1870 | | | 1871 | | // Small tables capacity fits into portable group, where | 1872 | | // GroupPortableImpl::MaskFull is more efficient for the | 1873 | | // capacity <= GroupPortableImpl::kWidth. | 1874 | 28 | assert(cap <= GroupPortableImpl::kWidth && | 1875 | 28 | "unexpectedly large small capacity"); | 1876 | 28 | static_assert(Group::kWidth >= GroupPortableImpl::kWidth, | 1877 | 28 | "unexpected group width"); | 1878 | | // Group starts from kSentinel slot, so indices in the mask will | 1879 | | // be increased by 1. | 1880 | 28 | const auto mask = GroupPortableImpl(ctrl + cap).MaskFull(); | 1881 | 28 | --ctrl; | 1882 | 28 | --slot; | 1883 | 112 | for (uint32_t i : mask) { | 1884 | 112 | cb(ctrl + i, slot + i); | 1885 | 112 | } | 1886 | 28 | return; | 1887 | 28 | } | 1888 | 2 | size_t remaining = c.size(); | 1889 | 2 | ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining; | 1890 | 4 | while (remaining != 0) { | 1891 | 16 | for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) { | 1892 | 16 | assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly"); | 1893 | 16 | cb(ctrl + i, slot + i); | 1894 | 16 | --remaining; | 1895 | 16 | } | 1896 | 2 | ctrl += Group::kWidth; | 1897 | 2 | slot += Group::kWidth; | 1898 | 2 | assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) && | 1899 | 2 | "hash table was modified unexpectedly"); | 1900 | 2 | } | 1901 | | // NOTE: erasure of the current element is allowed in callback for | 1902 | | // absl::erase_if specialization. So we use `>=`. | 1903 | 2 | assert(original_size_for_assert >= c.size() && | 1904 | 2 | "hash table was modified unexpectedly"); | 1905 | 2 | } |
Unexecuted instantiation: void absl::container_internal::IterateOverFullSlots<absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy_slots()::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}>(absl::container_internal::CommonFields const&, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy_slots()::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}) |
1906 | | |
1907 | | template <typename CharAlloc> |
1908 | 8 | constexpr bool ShouldSampleHashtablezInfo() { |
1909 | | // Folks with custom allocators often make unwarranted assumptions about the |
1910 | | // behavior of their classes vis-a-vis trivial destructability and what |
1911 | | // calls they will or won't make. Avoid sampling for people with custom |
1912 | | // allocators to get us out of this mess. This is not a hard guarantee but |
1913 | | // a workaround while we plan the exact guarantee we want to provide. |
1914 | 8 | return std::is_same<CharAlloc, std::allocator<char>>::value; |
1915 | 8 | } |
1916 | | |
1917 | | template <bool kSooEnabled> |
1918 | | HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key, |
1919 | | size_t sizeof_value, |
1920 | | size_t old_capacity, bool was_soo, |
1921 | | HashtablezInfoHandle forced_infoz, |
1922 | 8 | CommonFields& c) { |
1923 | 8 | if (forced_infoz.IsSampled()) return forced_infoz; |
1924 | | // In SOO, we sample on the first insertion so if this is an empty SOO case |
1925 | | // (e.g. when reserve is called), then we still need to sample. |
1926 | 8 | if (kSooEnabled && was_soo && c.size() == 0) { |
1927 | 0 | return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity()); |
1928 | 0 | } |
1929 | | // For non-SOO cases, we sample whenever the capacity is increasing from zero |
1930 | | // to non-zero. |
1931 | 8 | if (!kSooEnabled && old_capacity == 0) { |
1932 | 2 | return Sample(sizeof_slot, sizeof_key, sizeof_value, 0); |
1933 | 2 | } |
1934 | 6 | return c.infoz(); |
1935 | 8 | } |
1936 | | |
1937 | | // Helper class to perform resize of the hash set. |
1938 | | // |
1939 | | // It contains special optimizations for small group resizes. |
1940 | | // See GrowIntoSingleGroupShuffleControlBytes for details. |
1941 | | class HashSetResizeHelper { |
1942 | | public: |
1943 | | explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot, |
1944 | | HashtablezInfoHandle forced_infoz) |
1945 | | : old_capacity_(c.capacity()), |
1946 | | had_infoz_(c.has_infoz()), |
1947 | | was_soo_(was_soo), |
1948 | | had_soo_slot_(had_soo_slot), |
1949 | 8 | forced_infoz_(forced_infoz) {} |
1950 | | |
1951 | | // Optimized for small groups version of `find_first_non_full`. |
1952 | | // Beneficial only right after calling `raw_hash_set::resize`. |
1953 | | // It is safe to call in case capacity is big or was not changed, but there |
1954 | | // will be no performance benefit. |
1955 | | // It has implicit assumption that `resize` will call |
1956 | | // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`. |
1957 | | // Falls back to `find_first_non_full` in case of big groups. |
1958 | | static FindInfo FindFirstNonFullAfterResize(const CommonFields& c, |
1959 | | size_t old_capacity, |
1960 | 8 | size_t hash) { |
1961 | 8 | if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) { |
1962 | 0 | return find_first_non_full(c, hash); |
1963 | 0 | } |
1964 | | // Find a location for the new element non-deterministically. |
1965 | | // Note that any position is correct. |
1966 | | // It will located at `half_old_capacity` or one of the other |
1967 | | // empty slots with approximately 50% probability each. |
1968 | 8 | size_t offset = probe(c, hash).offset(); |
1969 | | |
1970 | | // Note that we intentionally use unsigned int underflow. |
1971 | 8 | if (offset - (old_capacity + 1) >= old_capacity) { |
1972 | | // Offset fall on kSentinel or into the mostly occupied first half. |
1973 | 7 | offset = old_capacity / 2; |
1974 | 7 | } |
1975 | 8 | assert(IsEmpty(c.control()[offset])); |
1976 | 8 | return FindInfo{offset, 0}; |
1977 | 8 | } |
1978 | | |
1979 | 8 | HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; } |
1980 | 0 | void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); } |
1981 | 12 | ctrl_t* old_ctrl() const { |
1982 | 12 | assert(!was_soo_); |
1983 | 12 | return old_heap_or_soo_.control(); |
1984 | 12 | } |
1985 | 24 | void* old_slots() const { |
1986 | 24 | assert(!was_soo_); |
1987 | 24 | return old_heap_or_soo_.slot_array().get(); |
1988 | 24 | } |
1989 | 14 | size_t old_capacity() const { return old_capacity_; } |
1990 | | |
1991 | | // Returns the index of the SOO slot when growing from SOO to non-SOO in a |
1992 | | // single group. See also InitControlBytesAfterSoo(). It's important to use |
1993 | | // index 1 so that when resizing from capacity 1 to 3, we can still have |
1994 | | // random iteration order between the first two inserted elements. |
1995 | | // I.e. it allows inserting the second element at either index 0 or 2. |
1996 | 0 | static size_t SooSlotIndex() { return 1; } |
1997 | | |
1998 | | // Allocates a backing array for the hashtable. |
1999 | | // Reads `capacity` and updates all other fields based on the result of |
2000 | | // the allocation. |
2001 | | // |
2002 | | // It also may do the following actions: |
2003 | | // 1. initialize control bytes |
2004 | | // 2. initialize slots |
2005 | | // 3. deallocate old slots. |
2006 | | // |
2007 | | // We are bundling a lot of functionality |
2008 | | // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code |
2009 | | // duplication in raw_hash_set<>::resize. |
2010 | | // |
2011 | | // `c.capacity()` must be nonzero. |
2012 | | // POSTCONDITIONS: |
2013 | | // 1. CommonFields is initialized. |
2014 | | // |
2015 | | // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy |
2016 | | // Both control bytes and slots are fully initialized. |
2017 | | // old_slots are deallocated. |
2018 | | // infoz.RecordRehash is called. |
2019 | | // |
2020 | | // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy |
2021 | | // Control bytes are fully initialized. |
2022 | | // infoz.RecordRehash is called. |
2023 | | // GrowSizeIntoSingleGroup must be called to finish slots initialization. |
2024 | | // |
2025 | | // if !IsGrowingIntoSingleGroupApplicable |
2026 | | // Control bytes are initialized to empty table via ResetCtrl. |
2027 | | // raw_hash_set<>::resize must insert elements regularly. |
2028 | | // infoz.RecordRehash is called if old_capacity == 0. |
2029 | | // |
2030 | | // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation. |
2031 | | template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy, |
2032 | | bool SooEnabled, size_t AlignOfSlot> |
2033 | | ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc, |
2034 | | ctrl_t soo_slot_h2, |
2035 | | size_t key_size, |
2036 | 8 | size_t value_size) { |
2037 | 8 | assert(c.capacity()); |
2038 | 8 | HashtablezInfoHandle infoz = |
2039 | 8 | ShouldSampleHashtablezInfo<Alloc>() |
2040 | 8 | ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size, |
2041 | 8 | old_capacity_, was_soo_, |
2042 | 8 | forced_infoz_, c) |
2043 | 8 | : HashtablezInfoHandle{}; |
2044 | | |
2045 | 8 | const bool has_infoz = infoz.IsSampled(); |
2046 | 8 | RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz); |
2047 | 8 | char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>( |
2048 | 8 | &alloc, layout.alloc_size(SizeOfSlot))); |
2049 | 8 | const GenerationType old_generation = c.generation(); |
2050 | 8 | c.set_generation_ptr( |
2051 | 8 | reinterpret_cast<GenerationType*>(mem + layout.generation_offset())); |
2052 | 8 | c.set_generation(NextGeneration(old_generation)); |
2053 | 8 | c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset())); |
2054 | 8 | c.set_slots(mem + layout.slot_offset()); |
2055 | 8 | ResetGrowthLeft(c); |
2056 | | |
2057 | 8 | const bool grow_single_group = |
2058 | 8 | IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity()); |
2059 | 8 | if (SooEnabled && was_soo_ && grow_single_group) { |
2060 | 0 | InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity()); |
2061 | 0 | if (TransferUsesMemcpy && had_soo_slot_) { |
2062 | 0 | TransferSlotAfterSoo(c, SizeOfSlot); |
2063 | 0 | } |
2064 | | // SooEnabled implies that old_capacity_ != 0. |
2065 | 8 | } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) { |
2066 | 6 | if (TransferUsesMemcpy) { |
2067 | 6 | GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot); |
2068 | 6 | DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot); |
2069 | 6 | } else { |
2070 | 0 | GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity()); |
2071 | 0 | } |
2072 | 6 | } else { |
2073 | 2 | ResetCtrl(c, SizeOfSlot); |
2074 | 2 | } |
2075 | | |
2076 | 8 | c.set_has_infoz(has_infoz); |
2077 | 8 | if (has_infoz) { |
2078 | 0 | infoz.RecordStorageChanged(c.size(), layout.capacity()); |
2079 | 0 | if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) { |
2080 | 0 | infoz.RecordRehash(0); |
2081 | 0 | } |
2082 | 0 | c.set_infoz(infoz); |
2083 | 0 | } |
2084 | 8 | return grow_single_group; |
2085 | 8 | } |
2086 | | |
2087 | | // Relocates slots into new single group consistent with |
2088 | | // GrowIntoSingleGroupShuffleControlBytes. |
2089 | | // |
2090 | | // PRECONDITIONS: |
2091 | | // 1. GrowIntoSingleGroupShuffleControlBytes was already called. |
2092 | | template <class PolicyTraits, class Alloc> |
2093 | 0 | void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) { |
2094 | 0 | assert(old_capacity_ < Group::kWidth / 2); |
2095 | 0 | assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); |
2096 | 0 | using slot_type = typename PolicyTraits::slot_type; |
2097 | 0 | assert(is_single_group(c.capacity())); |
2098 | 0 |
|
2099 | 0 | auto* new_slots = static_cast<slot_type*>(c.slot_array()); |
2100 | 0 | auto* old_slots_ptr = static_cast<slot_type*>(old_slots()); |
2101 | 0 |
|
2102 | 0 | size_t shuffle_bit = old_capacity_ / 2 + 1; |
2103 | 0 | for (size_t i = 0; i < old_capacity_; ++i) { |
2104 | 0 | if (IsFull(old_ctrl()[i])) { |
2105 | 0 | size_t new_i = i ^ shuffle_bit; |
2106 | 0 | SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type)); |
2107 | 0 | PolicyTraits::transfer(&alloc_ref, new_slots + new_i, |
2108 | 0 | old_slots_ptr + i); |
2109 | 0 | } |
2110 | 0 | } |
2111 | 0 | PoisonSingleGroupEmptySlots(c, sizeof(slot_type)); |
2112 | 0 | } |
2113 | | |
2114 | | // Deallocates old backing array. |
2115 | | template <size_t AlignOfSlot, class CharAlloc> |
2116 | 6 | void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) { |
2117 | 6 | SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_); |
2118 | 6 | auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_); |
2119 | 6 | Deallocate<BackingArrayAlignment(AlignOfSlot)>( |
2120 | 6 | &alloc_ref, old_ctrl() - layout.control_offset(), |
2121 | 6 | layout.alloc_size(slot_size)); |
2122 | 6 | } |
2123 | | |
2124 | | private: |
2125 | | // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing. |
2126 | | static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity, |
2127 | 22 | size_t new_capacity) { |
2128 | | // NOTE that `old_capacity < new_capacity` in order to have |
2129 | | // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes. |
2130 | 22 | return is_single_group(new_capacity) && old_capacity < new_capacity; |
2131 | 22 | } |
2132 | | |
2133 | | // Relocates control bytes and slots into new single group for |
2134 | | // transferable objects. |
2135 | | // Must be called only if IsGrowingIntoSingleGroupApplicable returned true. |
2136 | | void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size); |
2137 | | |
2138 | | // If there was an SOO slot and slots are transferable, transfers the SOO slot |
2139 | | // into the new heap allocation. Must be called only if |
2140 | | // IsGrowingIntoSingleGroupApplicable returned true. |
2141 | | void TransferSlotAfterSoo(CommonFields& c, size_t slot_size); |
2142 | | |
2143 | | // Shuffle control bits deterministically to the next capacity. |
2144 | | // Returns offset for newly added element with given hash. |
2145 | | // |
2146 | | // PRECONDITIONs: |
2147 | | // 1. new_ctrl is allocated for new_capacity, |
2148 | | // but not initialized. |
2149 | | // 2. new_capacity is a single group. |
2150 | | // |
2151 | | // All elements are transferred into the first `old_capacity + 1` positions |
2152 | | // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions |
2153 | | // in order to change an order and keep it non deterministic. |
2154 | | // Although rotation itself deterministic, position of the new added element |
2155 | | // will be based on `H1` and is not deterministic. |
2156 | | // |
2157 | | // Examples: |
2158 | | // S = kSentinel, E = kEmpty |
2159 | | // |
2160 | | // old_ctrl = SEEEEEEEE... |
2161 | | // new_ctrl = ESEEEEEEE... |
2162 | | // |
2163 | | // old_ctrl = 0SEEEEEEE... |
2164 | | // new_ctrl = E0ESE0EEE... |
2165 | | // |
2166 | | // old_ctrl = 012S012EEEEEEEEE... |
2167 | | // new_ctrl = 2E01EEES2E01EEE... |
2168 | | // |
2169 | | // old_ctrl = 0123456S0123456EEEEEEEEEEE... |
2170 | | // new_ctrl = 456E0123EEEEEES456E0123EEE... |
2171 | | void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl, |
2172 | | size_t new_capacity) const; |
2173 | | |
2174 | | // If the table was SOO, initializes new control bytes. `h2` is the control |
2175 | | // byte corresponding to the full slot. Must be called only if |
2176 | | // IsGrowingIntoSingleGroupApplicable returned true. |
2177 | | // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`. |
2178 | | void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2, |
2179 | | size_t new_capacity); |
2180 | | |
2181 | | // Shuffle trivially transferable slots in the way consistent with |
2182 | | // GrowIntoSingleGroupShuffleControlBytes. |
2183 | | // |
2184 | | // PRECONDITIONs: |
2185 | | // 1. old_capacity must be non-zero. |
2186 | | // 2. new_ctrl is fully initialized using |
2187 | | // GrowIntoSingleGroupShuffleControlBytes. |
2188 | | // 3. new_slots is allocated and *not* poisoned. |
2189 | | // |
2190 | | // POSTCONDITIONS: |
2191 | | // 1. new_slots are transferred from old_slots_ consistent with |
2192 | | // GrowIntoSingleGroupShuffleControlBytes. |
2193 | | // 2. Empty new_slots are *not* poisoned. |
2194 | | void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots, |
2195 | | size_t slot_size) const; |
2196 | | |
2197 | | // Poison empty slots that were transferred using the deterministic algorithm |
2198 | | // described above. |
2199 | | // PRECONDITIONs: |
2200 | | // 1. new_ctrl is fully initialized using |
2201 | | // GrowIntoSingleGroupShuffleControlBytes. |
2202 | | // 2. new_slots is fully initialized consistent with |
2203 | | // GrowIntoSingleGroupShuffleControlBytes. |
2204 | 6 | void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const { |
2205 | | // poison non full items |
2206 | 56 | for (size_t i = 0; i < c.capacity(); ++i) { |
2207 | 50 | if (!IsFull(c.control()[i])) { |
2208 | 28 | SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size), |
2209 | 28 | slot_size); |
2210 | 28 | } |
2211 | 50 | } |
2212 | 6 | } |
2213 | | |
2214 | | HeapOrSoo old_heap_or_soo_; |
2215 | | size_t old_capacity_; |
2216 | | bool had_infoz_; |
2217 | | bool was_soo_; |
2218 | | bool had_soo_slot_; |
2219 | | // Either null infoz or a pre-sampled forced infoz for SOO tables. |
2220 | | HashtablezInfoHandle forced_infoz_; |
2221 | | }; |
2222 | | |
2223 | 16 | inline void PrepareInsertCommon(CommonFields& common) { |
2224 | 16 | common.increment_size(); |
2225 | 16 | common.maybe_increment_generation_on_insert(); |
2226 | 16 | } |
2227 | | |
2228 | | // Like prepare_insert, but for the case of inserting into a full SOO table. |
2229 | | size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size, |
2230 | | CommonFields& common); |
2231 | | |
2232 | | // PolicyFunctions bundles together some information for a particular |
2233 | | // raw_hash_set<T, ...> instantiation. This information is passed to |
2234 | | // type-erased functions that want to do small amounts of type-specific |
2235 | | // work. |
2236 | | struct PolicyFunctions { |
2237 | | size_t slot_size; |
2238 | | |
2239 | | // Returns the pointer to the hash function stored in the set. |
2240 | | const void* (*hash_fn)(const CommonFields& common); |
2241 | | |
2242 | | // Returns the hash of the pointed-to slot. |
2243 | | size_t (*hash_slot)(const void* hash_fn, void* slot); |
2244 | | |
2245 | | // Transfers the contents of src_slot to dst_slot. |
2246 | | void (*transfer)(void* set, void* dst_slot, void* src_slot); |
2247 | | |
2248 | | // Deallocates the backing store from common. |
2249 | | void (*dealloc)(CommonFields& common, const PolicyFunctions& policy); |
2250 | | |
2251 | | // Resizes set to the new capacity. |
2252 | | // Arguments are used as in raw_hash_set::resize_impl. |
2253 | | void (*resize)(CommonFields& common, size_t new_capacity, |
2254 | | HashtablezInfoHandle forced_infoz); |
2255 | | }; |
2256 | | |
2257 | | // ClearBackingArray clears the backing array, either modifying it in place, |
2258 | | // or creating a new one based on the value of "reuse". |
2259 | | // REQUIRES: c.capacity > 0 |
2260 | | void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, |
2261 | | bool reuse, bool soo_enabled); |
2262 | | |
2263 | | // Type-erased version of raw_hash_set::erase_meta_only. |
2264 | | void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size); |
2265 | | |
2266 | | // Function to place in PolicyFunctions::dealloc for raw_hash_sets |
2267 | | // that are using std::allocator. This allows us to share the same |
2268 | | // function body for raw_hash_set instantiations that have the |
2269 | | // same slot alignment. |
2270 | | template <size_t AlignOfSlot> |
2271 | | ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common, |
2272 | 0 | const PolicyFunctions& policy) { |
2273 | | // Unpoison before returning the memory to the allocator. |
2274 | 0 | SanitizerUnpoisonMemoryRegion(common.slot_array(), |
2275 | 0 | policy.slot_size * common.capacity()); |
2276 | |
|
2277 | 0 | std::allocator<char> alloc; |
2278 | 0 | common.infoz().Unregister(); |
2279 | 0 | Deallocate<BackingArrayAlignment(AlignOfSlot)>( |
2280 | 0 | &alloc, common.backing_array_start(), |
2281 | 0 | common.alloc_size(policy.slot_size, AlignOfSlot)); |
2282 | 0 | } |
2283 | | |
2284 | | // For trivially relocatable types we use memcpy directly. This allows us to |
2285 | | // share the same function body for raw_hash_set instantiations that have the |
2286 | | // same slot size as long as they are relocatable. |
2287 | | template <size_t SizeOfSlot> |
2288 | 0 | ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) { |
2289 | 0 | memcpy(dst, src, SizeOfSlot); |
2290 | 0 | } |
2291 | | |
2292 | | // Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case. |
2293 | | const void* GetHashRefForEmptyHasher(const CommonFields& common); |
2294 | | |
2295 | | // Given the hash of a value not currently in the table and the first empty |
2296 | | // slot in the probe sequence, finds a viable slot index to insert it at. |
2297 | | // |
2298 | | // In case there's no space left, the table can be resized or rehashed |
2299 | | // (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash). |
2300 | | // |
2301 | | // In the case of absence of deleted slots and positive growth_left, the element |
2302 | | // can be inserted in the provided `target` position. |
2303 | | // |
2304 | | // When the table has deleted slots (according to GrowthInfo), the target |
2305 | | // position will be searched one more time using `find_first_non_full`. |
2306 | | // |
2307 | | // REQUIRES: Table is not SOO. |
2308 | | // REQUIRES: At least one non-full slot available. |
2309 | | // REQUIRES: `target` is a valid empty position to insert. |
2310 | | size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target, |
2311 | | const PolicyFunctions& policy); |
2312 | | |
2313 | | // A SwissTable. |
2314 | | // |
2315 | | // Policy: a policy defines how to perform different operations on |
2316 | | // the slots of the hashtable (see hash_policy_traits.h for the full interface |
2317 | | // of policy). |
2318 | | // |
2319 | | // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The |
2320 | | // functor should accept a key and return size_t as hash. For best performance |
2321 | | // it is important that the hash function provides high entropy across all bits |
2322 | | // of the hash. |
2323 | | // |
2324 | | // Eq: a (possibly polymorphic) functor that compares two keys for equality. It |
2325 | | // should accept two (of possibly different type) keys and return a bool: true |
2326 | | // if they are equal, false if they are not. If two keys compare equal, then |
2327 | | // their hash values as defined by Hash MUST be equal. |
2328 | | // |
2329 | | // Allocator: an Allocator |
2330 | | // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which |
2331 | | // the storage of the hashtable will be allocated and the elements will be |
2332 | | // constructed and destroyed. |
2333 | | template <class Policy, class Hash, class Eq, class Alloc> |
2334 | | class raw_hash_set { |
2335 | | using PolicyTraits = hash_policy_traits<Policy>; |
2336 | | using KeyArgImpl = |
2337 | | KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>; |
2338 | | |
2339 | | public: |
2340 | | using init_type = typename PolicyTraits::init_type; |
2341 | | using key_type = typename PolicyTraits::key_type; |
2342 | | // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user |
2343 | | // code fixes! |
2344 | | using slot_type = typename PolicyTraits::slot_type; |
2345 | | using allocator_type = Alloc; |
2346 | | using size_type = size_t; |
2347 | | using difference_type = ptrdiff_t; |
2348 | | using hasher = Hash; |
2349 | | using key_equal = Eq; |
2350 | | using policy_type = Policy; |
2351 | | using value_type = typename PolicyTraits::value_type; |
2352 | | using reference = value_type&; |
2353 | | using const_reference = const value_type&; |
2354 | | using pointer = typename absl::allocator_traits< |
2355 | | allocator_type>::template rebind_traits<value_type>::pointer; |
2356 | | using const_pointer = typename absl::allocator_traits< |
2357 | | allocator_type>::template rebind_traits<value_type>::const_pointer; |
2358 | | |
2359 | | // Alias used for heterogeneous lookup functions. |
2360 | | // `key_arg<K>` evaluates to `K` when the functors are transparent and to |
2361 | | // `key_type` otherwise. It permits template argument deduction on `K` for the |
2362 | | // transparent case. |
2363 | | template <class K> |
2364 | | using key_arg = typename KeyArgImpl::template type<K, key_type>; |
2365 | | |
2366 | | private: |
2367 | | // TODO(b/289225379): we could add extra SOO space inside raw_hash_set |
2368 | | // after CommonFields to allow inlining larger slot_types (e.g. std::string), |
2369 | | // but it's a bit complicated if we want to support incomplete mapped_type in |
2370 | | // flat_hash_map. We could potentially do this for flat_hash_set and for an |
2371 | | // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic |
2372 | | // types, strings, cords, and pairs/tuples of allowlisted types. |
2373 | 0 | constexpr static bool SooEnabled() { |
2374 | 0 | return PolicyTraits::soo_enabled() && |
2375 | 0 | sizeof(slot_type) <= sizeof(HeapOrSoo) && |
2376 | 0 | alignof(slot_type) <= alignof(HeapOrSoo); |
2377 | 0 | } |
2378 | | |
2379 | | // Whether `size` fits in the SOO capacity of this table. |
2380 | 340 | bool fits_in_soo(size_t size) const { |
2381 | 340 | return SooEnabled() && size <= SooCapacity(); |
2382 | 340 | } |
2383 | | // Whether this table is in SOO mode or non-SOO mode. |
2384 | 332 | bool is_soo() const { return fits_in_soo(capacity()); } |
2385 | 0 | bool is_full_soo() const { return is_soo() && !empty(); } |
2386 | | |
2387 | | // Give an early error when key_type is not hashable/eq. |
2388 | | auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); |
2389 | | auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); |
2390 | | |
2391 | | using AllocTraits = absl::allocator_traits<allocator_type>; |
2392 | | using SlotAlloc = typename absl::allocator_traits< |
2393 | | allocator_type>::template rebind_alloc<slot_type>; |
2394 | | // People are often sloppy with the exact type of their allocator (sometimes |
2395 | | // it has an extra const or is missing the pair, but rebinds made it work |
2396 | | // anyway). |
2397 | | using CharAlloc = |
2398 | | typename absl::allocator_traits<Alloc>::template rebind_alloc<char>; |
2399 | | using SlotAllocTraits = typename absl::allocator_traits< |
2400 | | allocator_type>::template rebind_traits<slot_type>; |
2401 | | |
2402 | | static_assert(std::is_lvalue_reference<reference>::value, |
2403 | | "Policy::element() must return a reference"); |
2404 | | |
2405 | | template <typename T> |
2406 | | struct SameAsElementReference |
2407 | | : std::is_same<typename std::remove_cv< |
2408 | | typename std::remove_reference<reference>::type>::type, |
2409 | | typename std::remove_cv< |
2410 | | typename std::remove_reference<T>::type>::type> {}; |
2411 | | |
2412 | | // An enabler for insert(T&&): T must be convertible to init_type or be the |
2413 | | // same as [cv] value_type [ref]. |
2414 | | // Note: we separate SameAsElementReference into its own type to avoid using |
2415 | | // reference unless we need to. MSVC doesn't seem to like it in some |
2416 | | // cases. |
2417 | | template <class T> |
2418 | | using RequiresInsertable = typename std::enable_if< |
2419 | | absl::disjunction<std::is_convertible<T, init_type>, |
2420 | | SameAsElementReference<T>>::value, |
2421 | | int>::type; |
2422 | | |
2423 | | // RequiresNotInit is a workaround for gcc prior to 7.1. |
2424 | | // See https://godbolt.org/g/Y4xsUh. |
2425 | | template <class T> |
2426 | | using RequiresNotInit = |
2427 | | typename std::enable_if<!std::is_same<T, init_type>::value, int>::type; |
2428 | | |
2429 | | template <class... Ts> |
2430 | | using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>; |
2431 | | |
2432 | | public: |
2433 | | static_assert(std::is_same<pointer, value_type*>::value, |
2434 | | "Allocators with custom pointer types are not supported"); |
2435 | | static_assert(std::is_same<const_pointer, const value_type*>::value, |
2436 | | "Allocators with custom pointer types are not supported"); |
2437 | | |
2438 | | class iterator : private HashSetIteratorGenerationInfo { |
2439 | | friend class raw_hash_set; |
2440 | | friend struct HashtableFreeFunctionsAccess; |
2441 | | |
2442 | | public: |
2443 | | using iterator_category = std::forward_iterator_tag; |
2444 | | using value_type = typename raw_hash_set::value_type; |
2445 | | using reference = |
2446 | | absl::conditional_t<PolicyTraits::constant_iterators::value, |
2447 | | const value_type&, value_type&>; |
2448 | | using pointer = absl::remove_reference_t<reference>*; |
2449 | | using difference_type = typename raw_hash_set::difference_type; |
2450 | | |
2451 | | iterator() {} |
2452 | | |
2453 | | // PRECONDITION: not an end() iterator. |
2454 | 16 | reference operator*() const { |
2455 | 16 | AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()"); |
2456 | 16 | return unchecked_deref(); |
2457 | 16 | } |
2458 | | |
2459 | | // PRECONDITION: not an end() iterator. |
2460 | 0 | pointer operator->() const { |
2461 | 0 | AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->"); |
2462 | 0 | return &operator*(); |
2463 | 0 | } |
2464 | | |
2465 | | // PRECONDITION: not an end() iterator. |
2466 | 0 | iterator& operator++() { |
2467 | 0 | AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++"); |
2468 | 0 | ++ctrl_; |
2469 | 0 | ++slot_; |
2470 | 0 | skip_empty_or_deleted(); |
2471 | 0 | if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; |
2472 | 0 | return *this; |
2473 | 0 | } |
2474 | | // PRECONDITION: not an end() iterator. |
2475 | | iterator operator++(int) { |
2476 | | auto tmp = *this; |
2477 | | ++*this; |
2478 | | return tmp; |
2479 | | } |
2480 | | |
2481 | 16 | friend bool operator==(const iterator& a, const iterator& b) { |
2482 | 16 | AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr()); |
2483 | 16 | AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr()); |
2484 | 16 | AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_, |
2485 | 16 | a.generation_ptr(), b.generation_ptr()); |
2486 | 16 | return a.ctrl_ == b.ctrl_; |
2487 | 16 | } |
2488 | 0 | friend bool operator!=(const iterator& a, const iterator& b) { |
2489 | 0 | return !(a == b); |
2490 | 0 | } |
2491 | | |
2492 | | private: |
2493 | | iterator(ctrl_t* ctrl, slot_type* slot, |
2494 | | const GenerationType* generation_ptr) |
2495 | | : HashSetIteratorGenerationInfo(generation_ptr), |
2496 | | ctrl_(ctrl), |
2497 | 32 | slot_(slot) { |
2498 | | // This assumption helps the compiler know that any non-end iterator is |
2499 | | // not equal to any end iterator. |
2500 | 32 | ABSL_ASSUME(ctrl != nullptr); |
2501 | 32 | } |
2502 | | // This constructor is used in begin() to avoid an MSan |
2503 | | // use-of-uninitialized-value error. Delegating from this constructor to |
2504 | | // the previous one doesn't avoid the error. |
2505 | | iterator(ctrl_t* ctrl, MaybeInitializedPtr slot, |
2506 | | const GenerationType* generation_ptr) |
2507 | | : HashSetIteratorGenerationInfo(generation_ptr), |
2508 | | ctrl_(ctrl), |
2509 | 0 | slot_(to_slot(slot.get())) { |
2510 | | // This assumption helps the compiler know that any non-end iterator is |
2511 | | // not equal to any end iterator. |
2512 | 0 | ABSL_ASSUME(ctrl != nullptr); |
2513 | 0 | } |
2514 | | // For end() iterators. |
2515 | | explicit iterator(const GenerationType* generation_ptr) |
2516 | 0 | : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {} |
2517 | | |
2518 | | // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and |
2519 | | // `slot_` until they reach one. |
2520 | 0 | void skip_empty_or_deleted() { |
2521 | 0 | while (IsEmptyOrDeleted(*ctrl_)) { |
2522 | 0 | uint32_t shift = |
2523 | 0 | GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted(); |
2524 | 0 | ctrl_ += shift; |
2525 | 0 | slot_ += shift; |
2526 | 0 | } |
2527 | 0 | } |
2528 | | |
2529 | 0 | ctrl_t* control() const { return ctrl_; } |
2530 | 16 | slot_type* slot() const { return slot_; } |
2531 | | |
2532 | | // We use EmptyGroup() for default-constructed iterators so that they can |
2533 | | // be distinguished from end iterators, which have nullptr ctrl_. |
2534 | | ctrl_t* ctrl_ = EmptyGroup(); |
2535 | | // To avoid uninitialized member warnings, put slot_ in an anonymous union. |
2536 | | // The member is not initialized on singleton and end iterators. |
2537 | | union { |
2538 | | slot_type* slot_; |
2539 | | }; |
2540 | | |
2541 | | // An equality check which skips ABSL Hardening iterator invalidation |
2542 | | // checks. |
2543 | | // Should be used when the lifetimes of the iterators are well-enough |
2544 | | // understood to prove that they cannot be invalid. |
2545 | | bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); } |
2546 | | |
2547 | | // Dereferences the iterator without ABSL Hardening iterator invalidation |
2548 | | // checks. |
2549 | 16 | reference unchecked_deref() const { return PolicyTraits::element(slot_); } |
2550 | | }; |
2551 | | |
2552 | | class const_iterator { |
2553 | | friend class raw_hash_set; |
2554 | | template <class Container, typename Enabler> |
2555 | | friend struct absl::container_internal::hashtable_debug_internal:: |
2556 | | HashtableDebugAccess; |
2557 | | |
2558 | | public: |
2559 | | using iterator_category = typename iterator::iterator_category; |
2560 | | using value_type = typename raw_hash_set::value_type; |
2561 | | using reference = typename raw_hash_set::const_reference; |
2562 | | using pointer = typename raw_hash_set::const_pointer; |
2563 | | using difference_type = typename raw_hash_set::difference_type; |
2564 | | |
2565 | | const_iterator() = default; |
2566 | | // Implicit construction from iterator. |
2567 | 32 | const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT |
2568 | | |
2569 | | reference operator*() const { return *inner_; } |
2570 | | pointer operator->() const { return inner_.operator->(); } |
2571 | | |
2572 | | const_iterator& operator++() { |
2573 | | ++inner_; |
2574 | | return *this; |
2575 | | } |
2576 | | const_iterator operator++(int) { return inner_++; } |
2577 | | |
2578 | 16 | friend bool operator==(const const_iterator& a, const const_iterator& b) { |
2579 | 16 | return a.inner_ == b.inner_; |
2580 | 16 | } |
2581 | | friend bool operator!=(const const_iterator& a, const const_iterator& b) { |
2582 | | return !(a == b); |
2583 | | } |
2584 | | |
2585 | | private: |
2586 | | const_iterator(const ctrl_t* ctrl, const slot_type* slot, |
2587 | | const GenerationType* gen) |
2588 | | : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) { |
2589 | | } |
2590 | | ctrl_t* control() const { return inner_.control(); } |
2591 | | slot_type* slot() const { return inner_.slot(); } |
2592 | | |
2593 | | iterator inner_; |
2594 | | |
2595 | | bool unchecked_equals(const const_iterator& b) { |
2596 | | return inner_.unchecked_equals(b.inner_); |
2597 | | } |
2598 | | }; |
2599 | | |
2600 | | using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>; |
2601 | | using insert_return_type = InsertReturnType<iterator, node_type>; |
2602 | | |
2603 | | // Note: can't use `= default` due to non-default noexcept (causes |
2604 | | // problems for some compilers). NOLINTNEXTLINE |
2605 | | raw_hash_set() noexcept( |
2606 | | std::is_nothrow_default_constructible<hasher>::value && |
2607 | | std::is_nothrow_default_constructible<key_equal>::value && |
2608 | 2 | std::is_nothrow_default_constructible<allocator_type>::value) {} |
2609 | | |
2610 | | ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set( |
2611 | | size_t bucket_count, const hasher& hash = hasher(), |
2612 | | const key_equal& eq = key_equal(), |
2613 | | const allocator_type& alloc = allocator_type()) |
2614 | | : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq, |
2615 | | alloc) { |
2616 | | if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) { |
2617 | | resize(NormalizeCapacity(bucket_count)); |
2618 | | } |
2619 | | } |
2620 | | |
2621 | | raw_hash_set(size_t bucket_count, const hasher& hash, |
2622 | | const allocator_type& alloc) |
2623 | | : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} |
2624 | | |
2625 | | raw_hash_set(size_t bucket_count, const allocator_type& alloc) |
2626 | | : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} |
2627 | | |
2628 | | explicit raw_hash_set(const allocator_type& alloc) |
2629 | | : raw_hash_set(0, hasher(), key_equal(), alloc) {} |
2630 | | |
2631 | | template <class InputIter> |
2632 | | raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, |
2633 | | const hasher& hash = hasher(), const key_equal& eq = key_equal(), |
2634 | | const allocator_type& alloc = allocator_type()) |
2635 | | : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), |
2636 | | hash, eq, alloc) { |
2637 | | insert(first, last); |
2638 | | } |
2639 | | |
2640 | | template <class InputIter> |
2641 | | raw_hash_set(InputIter first, InputIter last, size_t bucket_count, |
2642 | | const hasher& hash, const allocator_type& alloc) |
2643 | | : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} |
2644 | | |
2645 | | template <class InputIter> |
2646 | | raw_hash_set(InputIter first, InputIter last, size_t bucket_count, |
2647 | | const allocator_type& alloc) |
2648 | | : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} |
2649 | | |
2650 | | template <class InputIter> |
2651 | | raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) |
2652 | | : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} |
2653 | | |
2654 | | // Instead of accepting std::initializer_list<value_type> as the first |
2655 | | // argument like std::unordered_set<value_type> does, we have two overloads |
2656 | | // that accept std::initializer_list<T> and std::initializer_list<init_type>. |
2657 | | // This is advantageous for performance. |
2658 | | // |
2659 | | // // Turns {"abc", "def"} into std::initializer_list<std::string>, then |
2660 | | // // copies the strings into the set. |
2661 | | // std::unordered_set<std::string> s = {"abc", "def"}; |
2662 | | // |
2663 | | // // Turns {"abc", "def"} into std::initializer_list<const char*>, then |
2664 | | // // copies the strings into the set. |
2665 | | // absl::flat_hash_set<std::string> s = {"abc", "def"}; |
2666 | | // |
2667 | | // The same trick is used in insert(). |
2668 | | // |
2669 | | // The enabler is necessary to prevent this constructor from triggering where |
2670 | | // the copy constructor is meant to be called. |
2671 | | // |
2672 | | // absl::flat_hash_set<int> a, b{a}; |
2673 | | // |
2674 | | // RequiresNotInit<T> is a workaround for gcc prior to 7.1. |
2675 | | template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> |
2676 | | raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0, |
2677 | | const hasher& hash = hasher(), const key_equal& eq = key_equal(), |
2678 | | const allocator_type& alloc = allocator_type()) |
2679 | | : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} |
2680 | | |
2681 | | raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0, |
2682 | | const hasher& hash = hasher(), const key_equal& eq = key_equal(), |
2683 | | const allocator_type& alloc = allocator_type()) |
2684 | | : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} |
2685 | | |
2686 | | template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> |
2687 | | raw_hash_set(std::initializer_list<T> init, size_t bucket_count, |
2688 | | const hasher& hash, const allocator_type& alloc) |
2689 | | : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} |
2690 | | |
2691 | | raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count, |
2692 | | const hasher& hash, const allocator_type& alloc) |
2693 | | : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} |
2694 | | |
2695 | | template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> |
2696 | | raw_hash_set(std::initializer_list<T> init, size_t bucket_count, |
2697 | | const allocator_type& alloc) |
2698 | | : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} |
2699 | | |
2700 | | raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count, |
2701 | | const allocator_type& alloc) |
2702 | | : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} |
2703 | | |
2704 | | template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0> |
2705 | | raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc) |
2706 | | : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} |
2707 | | |
2708 | | raw_hash_set(std::initializer_list<init_type> init, |
2709 | | const allocator_type& alloc) |
2710 | | : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} |
2711 | | |
2712 | | raw_hash_set(const raw_hash_set& that) |
2713 | | : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( |
2714 | | that.alloc_ref())) {} |
2715 | | |
2716 | | raw_hash_set(const raw_hash_set& that, const allocator_type& a) |
2717 | | : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(), |
2718 | | that.eq_ref(), a) { |
2719 | | const size_t size = that.size(); |
2720 | | if (size == 0) { |
2721 | | return; |
2722 | | } |
2723 | | // We don't use `that.is_soo()` here because `that` can have non-SOO |
2724 | | // capacity but have a size that fits into SOO capacity. |
2725 | | if (fits_in_soo(size)) { |
2726 | | assert(size == 1); |
2727 | | common().set_full_soo(); |
2728 | | emplace_at(soo_iterator(), *that.begin()); |
2729 | | const HashtablezInfoHandle infoz = try_sample_soo(); |
2730 | | if (infoz.IsSampled()) resize_with_soo_infoz(infoz); |
2731 | | return; |
2732 | | } |
2733 | | assert(!that.is_soo()); |
2734 | | const size_t cap = capacity(); |
2735 | | // Note about single group tables: |
2736 | | // 1. It is correct to have any order of elements. |
2737 | | // 2. Order has to be non deterministic. |
2738 | | // 3. We are assigning elements with arbitrary `shift` starting from |
2739 | | // `capacity + shift` position. |
2740 | | // 4. `shift` must be coprime with `capacity + 1` in order to be able to use |
2741 | | // modular arithmetic to traverse all positions, instead if cycling |
2742 | | // through a subset of positions. Odd numbers are coprime with any |
2743 | | // `capacity + 1` (2^N). |
2744 | | size_t offset = cap; |
2745 | | const size_t shift = |
2746 | | is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0; |
2747 | | IterateOverFullSlots( |
2748 | | that.common(), that.slot_array(), |
2749 | | [&](const ctrl_t* that_ctrl, |
2750 | | slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE { |
2751 | | if (shift == 0) { |
2752 | | // Big tables case. Position must be searched via probing. |
2753 | | // The table is guaranteed to be empty, so we can do faster than |
2754 | | // a full `insert`. |
2755 | | const size_t hash = PolicyTraits::apply( |
2756 | | HashElement{hash_ref()}, PolicyTraits::element(that_slot)); |
2757 | | FindInfo target = find_first_non_full_outofline(common(), hash); |
2758 | | infoz().RecordInsert(hash, target.probe_length); |
2759 | | offset = target.offset; |
2760 | | } else { |
2761 | | // Small tables case. Next position is computed via shift. |
2762 | | offset = (offset + shift) & cap; |
2763 | | } |
2764 | | const h2_t h2 = static_cast<h2_t>(*that_ctrl); |
2765 | | assert( // We rely that hash is not changed for small tables. |
2766 | | H2(PolicyTraits::apply(HashElement{hash_ref()}, |
2767 | | PolicyTraits::element(that_slot))) == h2 && |
2768 | | "hash function value changed unexpectedly during the copy"); |
2769 | | SetCtrl(common(), offset, h2, sizeof(slot_type)); |
2770 | | emplace_at(iterator_at(offset), PolicyTraits::element(that_slot)); |
2771 | | common().maybe_increment_generation_on_insert(); |
2772 | | }); |
2773 | | if (shift != 0) { |
2774 | | // On small table copy we do not record individual inserts. |
2775 | | // RecordInsert requires hash, but it is unknown for small tables. |
2776 | | infoz().RecordStorageChanged(size, cap); |
2777 | | } |
2778 | | common().set_size(size); |
2779 | | growth_info().OverwriteManyEmptyAsFull(size); |
2780 | | } |
2781 | | |
2782 | | ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept( |
2783 | | std::is_nothrow_copy_constructible<hasher>::value && |
2784 | | std::is_nothrow_copy_constructible<key_equal>::value && |
2785 | | std::is_nothrow_copy_constructible<allocator_type>::value) |
2786 | | : // Hash, equality and allocator are copied instead of moved because |
2787 | | // `that` must be left valid. If Hash is std::function<Key>, moving it |
2788 | | // would create a nullptr functor that cannot be called. |
2789 | | // TODO(b/296061262): move instead of copying hash/eq/alloc. |
2790 | | // Note: we avoid using exchange for better generated code. |
2791 | | settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo() |
2792 | | ? std::move(that.common()) |
2793 | | : CommonFields{full_soo_tag_t{}}, |
2794 | | that.hash_ref(), that.eq_ref(), that.alloc_ref()) { |
2795 | | if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) { |
2796 | | transfer(soo_slot(), that.soo_slot()); |
2797 | | } |
2798 | | that.common() = CommonFields::CreateDefault<SooEnabled()>(); |
2799 | | maybe_increment_generation_or_rehash_on_move(); |
2800 | | } |
2801 | | |
2802 | | raw_hash_set(raw_hash_set&& that, const allocator_type& a) |
2803 | | : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(), |
2804 | | that.eq_ref(), a) { |
2805 | | if (a == that.alloc_ref()) { |
2806 | | swap_common(that); |
2807 | | maybe_increment_generation_or_rehash_on_move(); |
2808 | | } else { |
2809 | | move_elements_allocs_unequal(std::move(that)); |
2810 | | } |
2811 | | } |
2812 | | |
2813 | | raw_hash_set& operator=(const raw_hash_set& that) { |
2814 | | if (ABSL_PREDICT_FALSE(this == &that)) return *this; |
2815 | | constexpr bool propagate_alloc = |
2816 | | AllocTraits::propagate_on_container_copy_assignment::value; |
2817 | | // TODO(ezb): maybe avoid allocating a new backing array if this->capacity() |
2818 | | // is an exact match for that.size(). If this->capacity() is too big, then |
2819 | | // it would make iteration very slow to reuse the allocation. Maybe we can |
2820 | | // do the same heuristic as clear() and reuse if it's small enough. |
2821 | | raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref()); |
2822 | | // NOLINTNEXTLINE: not returning *this for performance. |
2823 | | return assign_impl<propagate_alloc>(std::move(tmp)); |
2824 | | } |
2825 | | |
2826 | | raw_hash_set& operator=(raw_hash_set&& that) noexcept( |
2827 | | absl::allocator_traits<allocator_type>::is_always_equal::value && |
2828 | | std::is_nothrow_move_assignable<hasher>::value && |
2829 | | std::is_nothrow_move_assignable<key_equal>::value) { |
2830 | | // TODO(sbenza): We should only use the operations from the noexcept clause |
2831 | | // to make sure we actually adhere to that contract. |
2832 | | // NOLINTNEXTLINE: not returning *this for performance. |
2833 | | return move_assign( |
2834 | | std::move(that), |
2835 | | typename AllocTraits::propagate_on_container_move_assignment()); |
2836 | | } |
2837 | | |
2838 | 0 | ~raw_hash_set() { destructor_impl(); } |
2839 | | |
2840 | 0 | iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2841 | 0 | if (ABSL_PREDICT_FALSE(empty())) return end(); |
2842 | 0 | if (is_soo()) return soo_iterator(); |
2843 | 0 | iterator it = {control(), common().slots_union(), |
2844 | 0 | common().generation_ptr()}; |
2845 | 0 | it.skip_empty_or_deleted(); |
2846 | 0 | assert(IsFull(*it.control())); |
2847 | 0 | return it; |
2848 | 0 | } |
2849 | 0 | iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2850 | 0 | return iterator(common().generation_ptr()); |
2851 | 0 | } |
2852 | | |
2853 | | const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2854 | | return const_cast<raw_hash_set*>(this)->begin(); |
2855 | | } |
2856 | | const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2857 | | return iterator(common().generation_ptr()); |
2858 | | } |
2859 | | const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2860 | | return begin(); |
2861 | | } |
2862 | | const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); } |
2863 | | |
2864 | 32 | bool empty() const { return !size(); } |
2865 | 32 | size_t size() const { return common().size(); } |
2866 | 378 | size_t capacity() const { |
2867 | 378 | const size_t cap = common().capacity(); |
2868 | | // Compiler complains when using functions in assume so use local variables. |
2869 | 378 | ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled(); |
2870 | 378 | ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity(); |
2871 | 378 | ABSL_ASSUME(!kEnabled || cap >= kCapacity); |
2872 | 378 | return cap; |
2873 | 378 | } |
2874 | | size_t max_size() const { return (std::numeric_limits<size_t>::max)(); } |
2875 | | |
2876 | 0 | ABSL_ATTRIBUTE_REINITIALIZES void clear() { |
2877 | | // Iterating over this container is O(bucket_count()). When bucket_count() |
2878 | | // is much greater than size(), iteration becomes prohibitively expensive. |
2879 | | // For clear() it is more important to reuse the allocated array when the |
2880 | | // container is small because allocation takes comparatively long time |
2881 | | // compared to destruction of the elements of the container. So we pick the |
2882 | | // largest bucket_count() threshold for which iteration is still fast and |
2883 | | // past that we simply deallocate the array. |
2884 | 0 | const size_t cap = capacity(); |
2885 | 0 | if (cap == 0) { |
2886 | | // Already guaranteed to be empty; so nothing to do. |
2887 | 0 | } else if (is_soo()) { |
2888 | 0 | if (!empty()) destroy(soo_slot()); |
2889 | 0 | common().set_empty_soo(); |
2890 | 0 | } else { |
2891 | 0 | destroy_slots(); |
2892 | 0 | ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128, |
2893 | 0 | SooEnabled()); |
2894 | 0 | } |
2895 | 0 | common().set_reserved_growth(0); |
2896 | 0 | common().set_reservation_size(0); |
2897 | 0 | } |
2898 | | |
2899 | | // This overload kicks in when the argument is an rvalue of insertable and |
2900 | | // decomposable type other than init_type. |
2901 | | // |
2902 | | // flat_hash_map<std::string, int> m; |
2903 | | // m.insert(std::make_pair("abc", 42)); |
2904 | | // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc |
2905 | | // bug. |
2906 | | template <class T, RequiresInsertable<T> = 0, class T2 = T, |
2907 | | typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0, |
2908 | | T* = nullptr> |
2909 | 16 | std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2910 | 16 | return emplace(std::forward<T>(value)); |
2911 | 16 | } |
2912 | | |
2913 | | // This overload kicks in when the argument is a bitfield or an lvalue of |
2914 | | // insertable and decomposable type. |
2915 | | // |
2916 | | // union { int n : 1; }; |
2917 | | // flat_hash_set<int> s; |
2918 | | // s.insert(n); |
2919 | | // |
2920 | | // flat_hash_set<std::string> s; |
2921 | | // const char* p = "hello"; |
2922 | | // s.insert(p); |
2923 | | // |
2924 | | template < |
2925 | | class T, RequiresInsertable<const T&> = 0, |
2926 | | typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0> |
2927 | | std::pair<iterator, bool> insert(const T& value) |
2928 | | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2929 | | return emplace(value); |
2930 | | } |
2931 | | |
2932 | | // This overload kicks in when the argument is an rvalue of init_type. Its |
2933 | | // purpose is to handle brace-init-list arguments. |
2934 | | // |
2935 | | // flat_hash_map<std::string, int> s; |
2936 | | // s.insert({"abc", 42}); |
2937 | | std::pair<iterator, bool> insert(init_type&& value) |
2938 | 0 | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2939 | 0 | return emplace(std::move(value)); |
2940 | 0 | } |
2941 | | |
2942 | | // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc |
2943 | | // bug. |
2944 | | template <class T, RequiresInsertable<T> = 0, class T2 = T, |
2945 | | typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0, |
2946 | | T* = nullptr> |
2947 | | iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2948 | | return insert(std::forward<T>(value)).first; |
2949 | | } |
2950 | | |
2951 | | template < |
2952 | | class T, RequiresInsertable<const T&> = 0, |
2953 | | typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0> |
2954 | | iterator insert(const_iterator, |
2955 | | const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2956 | | return insert(value).first; |
2957 | | } |
2958 | | |
2959 | | iterator insert(const_iterator, |
2960 | | init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2961 | | return insert(std::move(value)).first; |
2962 | | } |
2963 | | |
2964 | | template <class InputIt> |
2965 | | void insert(InputIt first, InputIt last) { |
2966 | | for (; first != last; ++first) emplace(*first); |
2967 | | } |
2968 | | |
2969 | | template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0> |
2970 | | void insert(std::initializer_list<T> ilist) { |
2971 | | insert(ilist.begin(), ilist.end()); |
2972 | | } |
2973 | | |
2974 | | void insert(std::initializer_list<init_type> ilist) { |
2975 | | insert(ilist.begin(), ilist.end()); |
2976 | | } |
2977 | | |
2978 | | insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2979 | | if (!node) return {end(), false, node_type()}; |
2980 | | const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); |
2981 | | auto res = PolicyTraits::apply( |
2982 | | InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))}, |
2983 | | elem); |
2984 | | if (res.second) { |
2985 | | CommonAccess::Reset(&node); |
2986 | | return {res.first, true, node_type()}; |
2987 | | } else { |
2988 | | return {res.first, false, std::move(node)}; |
2989 | | } |
2990 | | } |
2991 | | |
2992 | | iterator insert(const_iterator, |
2993 | | node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
2994 | | auto res = insert(std::move(node)); |
2995 | | node = std::move(res.node); |
2996 | | return res.position; |
2997 | | } |
2998 | | |
2999 | | // This overload kicks in if we can deduce the key from args. This enables us |
3000 | | // to avoid constructing value_type if an entry with the same key already |
3001 | | // exists. |
3002 | | // |
3003 | | // For example: |
3004 | | // |
3005 | | // flat_hash_map<std::string, std::string> m = {{"abc", "def"}}; |
3006 | | // // Creates no std::string copies and makes no heap allocations. |
3007 | | // m.emplace("abc", "xyz"); |
3008 | | template <class... Args, typename std::enable_if< |
3009 | | IsDecomposable<Args...>::value, int>::type = 0> |
3010 | | std::pair<iterator, bool> emplace(Args&&... args) |
3011 | 16 | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3012 | 16 | return PolicyTraits::apply(EmplaceDecomposable{*this}, |
3013 | 16 | std::forward<Args>(args)...); |
3014 | 16 | } std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*>, 0>(std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*>&&) Line | Count | Source | 3011 | 16 | ABSL_ATTRIBUTE_LIFETIME_BOUND { | 3012 | 16 | return PolicyTraits::apply(EmplaceDecomposable{*this}, | 3013 | 16 | std::forward<Args>(args)...); | 3014 | 16 | } |
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, 0>(std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>&&) |
3015 | | |
3016 | | // This overload kicks in if we cannot deduce the key from args. It constructs |
3017 | | // value_type unconditionally and then either moves it into the table or |
3018 | | // destroys. |
3019 | | template <class... Args, typename std::enable_if< |
3020 | | !IsDecomposable<Args...>::value, int>::type = 0> |
3021 | | std::pair<iterator, bool> emplace(Args&&... args) |
3022 | | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3023 | | alignas(slot_type) unsigned char raw[sizeof(slot_type)]; |
3024 | | slot_type* slot = to_slot(&raw); |
3025 | | |
3026 | | construct(slot, std::forward<Args>(args)...); |
3027 | | const auto& elem = PolicyTraits::element(slot); |
3028 | | return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem); |
3029 | | } |
3030 | | |
3031 | | template <class... Args> |
3032 | | iterator emplace_hint(const_iterator, |
3033 | | Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3034 | | return emplace(std::forward<Args>(args)...).first; |
3035 | | } |
3036 | | |
3037 | | // Extension API: support for lazy emplace. |
3038 | | // |
3039 | | // Looks up key in the table. If found, returns the iterator to the element. |
3040 | | // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`, |
3041 | | // and returns an iterator to the new element. |
3042 | | // |
3043 | | // `f` must abide by several restrictions: |
3044 | | // - it MUST call `raw_hash_set::constructor` with arguments as if a |
3045 | | // `raw_hash_set::value_type` is constructed, |
3046 | | // - it MUST NOT access the container before the call to |
3047 | | // `raw_hash_set::constructor`, and |
3048 | | // - it MUST NOT erase the lazily emplaced element. |
3049 | | // Doing any of these is undefined behavior. |
3050 | | // |
3051 | | // For example: |
3052 | | // |
3053 | | // std::unordered_set<ArenaString> s; |
3054 | | // // Makes ArenaStr even if "abc" is in the map. |
3055 | | // s.insert(ArenaString(&arena, "abc")); |
3056 | | // |
3057 | | // flat_hash_set<ArenaStr> s; |
3058 | | // // Makes ArenaStr only if "abc" is not in the map. |
3059 | | // s.lazy_emplace("abc", [&](const constructor& ctor) { |
3060 | | // ctor(&arena, "abc"); |
3061 | | // }); |
3062 | | // |
3063 | | // WARNING: This API is currently experimental. If there is a way to implement |
3064 | | // the same thing with the rest of the API, prefer that. |
3065 | | class constructor { |
3066 | | friend class raw_hash_set; |
3067 | | |
3068 | | public: |
3069 | | template <class... Args> |
3070 | | void operator()(Args&&... args) const { |
3071 | | assert(*slot_); |
3072 | | PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...); |
3073 | | *slot_ = nullptr; |
3074 | | } |
3075 | | |
3076 | | private: |
3077 | | constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} |
3078 | | |
3079 | | allocator_type* alloc_; |
3080 | | slot_type** slot_; |
3081 | | }; |
3082 | | |
3083 | | template <class K = key_type, class F> |
3084 | | iterator lazy_emplace(const key_arg<K>& key, |
3085 | | F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3086 | | auto res = find_or_prepare_insert(key); |
3087 | | if (res.second) { |
3088 | | slot_type* slot = res.first.slot(); |
3089 | | std::forward<F>(f)(constructor(&alloc_ref(), &slot)); |
3090 | | assert(!slot); |
3091 | | } |
3092 | | return res.first; |
3093 | | } |
3094 | | |
3095 | | // Extension API: support for heterogeneous keys. |
3096 | | // |
3097 | | // std::unordered_set<std::string> s; |
3098 | | // // Turns "abc" into std::string. |
3099 | | // s.erase("abc"); |
3100 | | // |
3101 | | // flat_hash_set<std::string> s; |
3102 | | // // Uses "abc" directly without copying it into std::string. |
3103 | | // s.erase("abc"); |
3104 | | template <class K = key_type> |
3105 | | size_type erase(const key_arg<K>& key) { |
3106 | | auto it = find(key); |
3107 | | if (it == end()) return 0; |
3108 | | erase(it); |
3109 | | return 1; |
3110 | | } |
3111 | | |
3112 | | // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, |
3113 | | // this method returns void to reduce algorithmic complexity to O(1). The |
3114 | | // iterator is invalidated, so any increment should be done before calling |
3115 | | // erase. In order to erase while iterating across a map, use the following |
3116 | | // idiom (which also works for some standard containers): |
3117 | | // |
3118 | | // for (auto it = m.begin(), end = m.end(); it != end;) { |
3119 | | // // `erase()` will invalidate `it`, so advance `it` first. |
3120 | | // auto copy_it = it++; |
3121 | | // if (<pred>) { |
3122 | | // m.erase(copy_it); |
3123 | | // } |
3124 | | // } |
3125 | | void erase(const_iterator cit) { erase(cit.inner_); } |
3126 | | |
3127 | | // This overload is necessary because otherwise erase<K>(const K&) would be |
3128 | | // a better match if non-const iterator is passed as an argument. |
3129 | | void erase(iterator it) { |
3130 | | AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()"); |
3131 | | destroy(it.slot()); |
3132 | | if (is_soo()) { |
3133 | | common().set_empty_soo(); |
3134 | | } else { |
3135 | | erase_meta_only(it); |
3136 | | } |
3137 | | } |
3138 | | |
3139 | | iterator erase(const_iterator first, |
3140 | | const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3141 | | // We check for empty first because ClearBackingArray requires that |
3142 | | // capacity() > 0 as a precondition. |
3143 | | if (empty()) return end(); |
3144 | | if (first == last) return last.inner_; |
3145 | | if (is_soo()) { |
3146 | | destroy(soo_slot()); |
3147 | | common().set_empty_soo(); |
3148 | | return end(); |
3149 | | } |
3150 | | if (first == begin() && last == end()) { |
3151 | | // TODO(ezb): we access control bytes in destroy_slots so it could make |
3152 | | // sense to combine destroy_slots and ClearBackingArray to avoid cache |
3153 | | // misses when the table is large. Note that we also do this in clear(). |
3154 | | destroy_slots(); |
3155 | | ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true, |
3156 | | SooEnabled()); |
3157 | | common().set_reserved_growth(common().reservation_size()); |
3158 | | return end(); |
3159 | | } |
3160 | | while (first != last) { |
3161 | | erase(first++); |
3162 | | } |
3163 | | return last.inner_; |
3164 | | } |
3165 | | |
3166 | | // Moves elements from `src` into `this`. |
3167 | | // If the element already exists in `this`, it is left unmodified in `src`. |
3168 | | template <typename H, typename E> |
3169 | | void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT |
3170 | | assert(this != &src); |
3171 | | // Returns whether insertion took place. |
3172 | | const auto insert_slot = [this](slot_type* src_slot) { |
3173 | | return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)}, |
3174 | | PolicyTraits::element(src_slot)) |
3175 | | .second; |
3176 | | }; |
3177 | | |
3178 | | if (src.is_soo()) { |
3179 | | if (src.empty()) return; |
3180 | | if (insert_slot(src.soo_slot())) src.common().set_empty_soo(); |
3181 | | return; |
3182 | | } |
3183 | | for (auto it = src.begin(), e = src.end(); it != e;) { |
3184 | | auto next = std::next(it); |
3185 | | if (insert_slot(it.slot())) src.erase_meta_only(it); |
3186 | | it = next; |
3187 | | } |
3188 | | } |
3189 | | |
3190 | | template <typename H, typename E> |
3191 | | void merge(raw_hash_set<Policy, H, E, Alloc>&& src) { |
3192 | | merge(src); |
3193 | | } |
3194 | | |
3195 | | node_type extract(const_iterator position) { |
3196 | | AssertIsFull(position.control(), position.inner_.generation(), |
3197 | | position.inner_.generation_ptr(), "extract()"); |
3198 | | auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot()); |
3199 | | if (is_soo()) { |
3200 | | common().set_empty_soo(); |
3201 | | } else { |
3202 | | erase_meta_only(position); |
3203 | | } |
3204 | | return node; |
3205 | | } |
3206 | | |
3207 | | template < |
3208 | | class K = key_type, |
3209 | | typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0> |
3210 | | node_type extract(const key_arg<K>& key) { |
3211 | | auto it = find(key); |
3212 | | return it == end() ? node_type() : extract(const_iterator{it}); |
3213 | | } |
3214 | | |
3215 | | void swap(raw_hash_set& that) noexcept( |
3216 | | IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() && |
3217 | | IsNoThrowSwappable<allocator_type>( |
3218 | | typename AllocTraits::propagate_on_container_swap{})) { |
3219 | | using std::swap; |
3220 | | swap_common(that); |
3221 | | swap(hash_ref(), that.hash_ref()); |
3222 | | swap(eq_ref(), that.eq_ref()); |
3223 | | SwapAlloc(alloc_ref(), that.alloc_ref(), |
3224 | | typename AllocTraits::propagate_on_container_swap{}); |
3225 | | } |
3226 | | |
3227 | | void rehash(size_t n) { |
3228 | | const size_t cap = capacity(); |
3229 | | if (n == 0) { |
3230 | | if (cap == 0 || is_soo()) return; |
3231 | | if (empty()) { |
3232 | | ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false, |
3233 | | SooEnabled()); |
3234 | | return; |
3235 | | } |
3236 | | if (fits_in_soo(size())) { |
3237 | | // When the table is already sampled, we keep it sampled. |
3238 | | if (infoz().IsSampled()) { |
3239 | | const size_t kInitialSampledCapacity = NextCapacity(SooCapacity()); |
3240 | | if (capacity() > kInitialSampledCapacity) { |
3241 | | resize(kInitialSampledCapacity); |
3242 | | } |
3243 | | // This asserts that we didn't lose sampling coverage in `resize`. |
3244 | | assert(infoz().IsSampled()); |
3245 | | return; |
3246 | | } |
3247 | | alignas(slot_type) unsigned char slot_space[sizeof(slot_type)]; |
3248 | | slot_type* tmp_slot = to_slot(slot_space); |
3249 | | transfer(tmp_slot, begin().slot()); |
3250 | | ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false, |
3251 | | SooEnabled()); |
3252 | | transfer(soo_slot(), tmp_slot); |
3253 | | common().set_full_soo(); |
3254 | | return; |
3255 | | } |
3256 | | } |
3257 | | |
3258 | | // bitor is a faster way of doing `max` here. We will round up to the next |
3259 | | // power-of-2-minus-1, so bitor is good enough. |
3260 | | auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); |
3261 | | // n == 0 unconditionally rehashes as per the standard. |
3262 | | if (n == 0 || m > cap) { |
3263 | | resize(m); |
3264 | | |
3265 | | // This is after resize, to ensure that we have completed the allocation |
3266 | | // and have potentially sampled the hashtable. |
3267 | | infoz().RecordReservation(n); |
3268 | | } |
3269 | | } |
3270 | | |
3271 | | void reserve(size_t n) { |
3272 | | const size_t max_size_before_growth = |
3273 | | is_soo() ? SooCapacity() : size() + growth_left(); |
3274 | | if (n > max_size_before_growth) { |
3275 | | size_t m = GrowthToLowerboundCapacity(n); |
3276 | | resize(NormalizeCapacity(m)); |
3277 | | |
3278 | | // This is after resize, to ensure that we have completed the allocation |
3279 | | // and have potentially sampled the hashtable. |
3280 | | infoz().RecordReservation(n); |
3281 | | } |
3282 | | common().reset_reserved_growth(n); |
3283 | | common().set_reservation_size(n); |
3284 | | } |
3285 | | |
3286 | | // Extension API: support for heterogeneous keys. |
3287 | | // |
3288 | | // std::unordered_set<std::string> s; |
3289 | | // // Turns "abc" into std::string. |
3290 | | // s.count("abc"); |
3291 | | // |
3292 | | // ch_set<std::string> s; |
3293 | | // // Uses "abc" directly without copying it into std::string. |
3294 | | // s.count("abc"); |
3295 | | template <class K = key_type> |
3296 | | size_t count(const key_arg<K>& key) const { |
3297 | | return find(key) == end() ? 0 : 1; |
3298 | | } |
3299 | | |
3300 | | // Issues CPU prefetch instructions for the memory needed to find or insert |
3301 | | // a key. Like all lookup functions, this support heterogeneous keys. |
3302 | | // |
3303 | | // NOTE: This is a very low level operation and should not be used without |
3304 | | // specific benchmarks indicating its importance. |
3305 | | template <class K = key_type> |
3306 | | void prefetch(const key_arg<K>& key) const { |
3307 | | if (SooEnabled() ? is_soo() : capacity() == 0) return; |
3308 | | (void)key; |
3309 | | // Avoid probing if we won't be able to prefetch the addresses received. |
3310 | | #ifdef ABSL_HAVE_PREFETCH |
3311 | | prefetch_heap_block(); |
3312 | | auto seq = probe(common(), hash_ref()(key)); |
3313 | | PrefetchToLocalCache(control() + seq.offset()); |
3314 | | PrefetchToLocalCache(slot_array() + seq.offset()); |
3315 | | #endif // ABSL_HAVE_PREFETCH |
3316 | | } |
3317 | | |
3318 | | // The API of find() has two extensions. |
3319 | | // |
3320 | | // 1. The hash can be passed by the user. It must be equal to the hash of the |
3321 | | // key. |
3322 | | // |
3323 | | // 2. The type of the key argument doesn't have to be key_type. This is so |
3324 | | // called heterogeneous key support. |
3325 | | template <class K = key_type> |
3326 | | iterator find(const key_arg<K>& key, |
3327 | | size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3328 | | AssertHashEqConsistent(key); |
3329 | | if (is_soo()) return find_soo(key); |
3330 | | return find_non_soo(key, hash); |
3331 | | } |
3332 | | template <class K = key_type> |
3333 | 16 | iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3334 | 16 | AssertHashEqConsistent(key); |
3335 | 16 | if (is_soo()) return find_soo(key); |
3336 | 16 | prefetch_heap_block(); |
3337 | 16 | return find_non_soo(key, hash_ref()(key)); |
3338 | 16 | } |
3339 | | |
3340 | | template <class K = key_type> |
3341 | | const_iterator find(const key_arg<K>& key, |
3342 | | size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3343 | | return const_cast<raw_hash_set*>(this)->find(key, hash); |
3344 | | } |
3345 | | template <class K = key_type> |
3346 | | const_iterator find(const key_arg<K>& key) const |
3347 | 16 | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3348 | 16 | return const_cast<raw_hash_set*>(this)->find(key); |
3349 | 16 | } |
3350 | | |
3351 | | template <class K = key_type> |
3352 | | bool contains(const key_arg<K>& key) const { |
3353 | | // Here neither the iterator returned by `find()` nor `end()` can be invalid |
3354 | | // outside of potential thread-safety issues. |
3355 | | // `find()`'s return value is constructed, used, and then destructed |
3356 | | // all in this context. |
3357 | | return !find(key).unchecked_equals(end()); |
3358 | | } |
3359 | | |
3360 | | template <class K = key_type> |
3361 | | std::pair<iterator, iterator> equal_range(const key_arg<K>& key) |
3362 | | ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3363 | | auto it = find(key); |
3364 | | if (it != end()) return {it, std::next(it)}; |
3365 | | return {it, it}; |
3366 | | } |
3367 | | template <class K = key_type> |
3368 | | std::pair<const_iterator, const_iterator> equal_range( |
3369 | | const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3370 | | auto it = find(key); |
3371 | | if (it != end()) return {it, std::next(it)}; |
3372 | | return {it, it}; |
3373 | | } |
3374 | | |
3375 | | size_t bucket_count() const { return capacity(); } |
3376 | | float load_factor() const { |
3377 | | return capacity() ? static_cast<double>(size()) / capacity() : 0.0; |
3378 | | } |
3379 | | float max_load_factor() const { return 1.0f; } |
3380 | | void max_load_factor(float) { |
3381 | | // Does nothing. |
3382 | | } |
3383 | | |
3384 | | hasher hash_function() const { return hash_ref(); } |
3385 | | key_equal key_eq() const { return eq_ref(); } |
3386 | | allocator_type get_allocator() const { return alloc_ref(); } |
3387 | | |
3388 | | friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { |
3389 | | if (a.size() != b.size()) return false; |
3390 | | const raw_hash_set* outer = &a; |
3391 | | const raw_hash_set* inner = &b; |
3392 | | if (outer->capacity() > inner->capacity()) std::swap(outer, inner); |
3393 | | for (const value_type& elem : *outer) { |
3394 | | auto it = PolicyTraits::apply(FindElement{*inner}, elem); |
3395 | | if (it == inner->end() || !(*it == elem)) return false; |
3396 | | } |
3397 | | return true; |
3398 | | } |
3399 | | |
3400 | | friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { |
3401 | | return !(a == b); |
3402 | | } |
3403 | | |
3404 | | template <typename H> |
3405 | | friend typename std::enable_if<H::template is_hashable<value_type>::value, |
3406 | | H>::type |
3407 | | AbslHashValue(H h, const raw_hash_set& s) { |
3408 | | return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), |
3409 | | s.size()); |
3410 | | } |
3411 | | |
3412 | | friend void swap(raw_hash_set& a, |
3413 | | raw_hash_set& b) noexcept(noexcept(a.swap(b))) { |
3414 | | a.swap(b); |
3415 | | } |
3416 | | |
3417 | | private: |
3418 | | template <class Container, typename Enabler> |
3419 | | friend struct absl::container_internal::hashtable_debug_internal:: |
3420 | | HashtableDebugAccess; |
3421 | | |
3422 | | friend struct absl::container_internal::HashtableFreeFunctionsAccess; |
3423 | | |
3424 | | struct FindElement { |
3425 | | template <class K, class... Args> |
3426 | 16 | const_iterator operator()(const K& key, Args&&...) const { |
3427 | 16 | return s.find(key); |
3428 | 16 | } |
3429 | | const raw_hash_set& s; |
3430 | | }; |
3431 | | |
3432 | | struct HashElement { |
3433 | | template <class K, class... Args> |
3434 | 16 | size_t operator()(const K& key, Args&&...) const { |
3435 | 16 | return h(key); |
3436 | 16 | } |
3437 | | const hasher& h; |
3438 | | }; |
3439 | | |
3440 | | template <class K1> |
3441 | | struct EqualElement { |
3442 | | template <class K2, class... Args> |
3443 | 146 | bool operator()(const K2& lhs, Args&&...) const { |
3444 | 146 | return eq(lhs, rhs); |
3445 | 146 | } |
3446 | | const K1& rhs; |
3447 | | const key_equal& eq; |
3448 | | }; |
3449 | | |
3450 | | struct EmplaceDecomposable { |
3451 | | template <class K, class... Args> |
3452 | 16 | std::pair<iterator, bool> operator()(const K& key, Args&&... args) const { |
3453 | 16 | auto res = s.find_or_prepare_insert(key); |
3454 | 16 | if (res.second) { |
3455 | 16 | s.emplace_at(res.first, std::forward<Args>(args)...); |
3456 | 16 | } |
3457 | 16 | return res; |
3458 | 16 | } std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const Line | Count | Source | 3452 | 16 | std::pair<iterator, bool> operator()(const K& key, Args&&... args) const { | 3453 | 16 | auto res = s.find_or_prepare_insert(key); | 3454 | 16 | if (res.second) { | 3455 | 16 | s.emplace_at(res.first, std::forward<Args>(args)...); | 3456 | 16 | } | 3457 | 16 | return res; | 3458 | 16 | } |
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const |
3459 | | raw_hash_set& s; |
3460 | | }; |
3461 | | |
3462 | | template <bool do_destroy> |
3463 | | struct InsertSlot { |
3464 | | template <class K, class... Args> |
3465 | | std::pair<iterator, bool> operator()(const K& key, Args&&...) && { |
3466 | | auto res = s.find_or_prepare_insert(key); |
3467 | | if (res.second) { |
3468 | | s.transfer(res.first.slot(), &slot); |
3469 | | } else if (do_destroy) { |
3470 | | s.destroy(&slot); |
3471 | | } |
3472 | | return res; |
3473 | | } |
3474 | | raw_hash_set& s; |
3475 | | // Constructed slot. Either moved into place or destroyed. |
3476 | | slot_type&& slot; |
3477 | | }; |
3478 | | |
3479 | | // TODO(b/303305702): re-enable reentrant validation. |
3480 | | template <typename... Args> |
3481 | 16 | inline void construct(slot_type* slot, Args&&... args) { |
3482 | 16 | PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...); |
3483 | 16 | } void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) Line | Count | Source | 3481 | 16 | inline void construct(slot_type* slot, Args&&... args) { | 3482 | 16 | PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...); | 3483 | 16 | } |
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) |
3484 | 0 | inline void destroy(slot_type* slot) { |
3485 | 0 | PolicyTraits::destroy(&alloc_ref(), slot); |
3486 | 0 | } |
3487 | 0 | inline void transfer(slot_type* to, slot_type* from) { |
3488 | 0 | PolicyTraits::transfer(&alloc_ref(), to, from); |
3489 | 0 | } |
3490 | | |
3491 | | // TODO(b/289225379): consider having a helper class that has the impls for |
3492 | | // SOO functionality. |
3493 | | template <class K = key_type> |
3494 | 0 | iterator find_soo(const key_arg<K>& key) { |
3495 | 0 | assert(is_soo()); |
3496 | 0 | return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, |
3497 | 0 | PolicyTraits::element(soo_slot())) |
3498 | 0 | ? end() |
3499 | 0 | : soo_iterator(); |
3500 | 0 | } |
3501 | | |
3502 | | template <class K = key_type> |
3503 | 16 | iterator find_non_soo(const key_arg<K>& key, size_t hash) { |
3504 | 16 | assert(!is_soo()); |
3505 | 16 | auto seq = probe(common(), hash); |
3506 | 16 | const ctrl_t* ctrl = control(); |
3507 | 16 | while (true) { |
3508 | 16 | Group g{ctrl + seq.offset()}; |
3509 | 17 | for (uint32_t i : g.Match(H2(hash))) { |
3510 | 17 | if (ABSL_PREDICT_TRUE(PolicyTraits::apply( |
3511 | 17 | EqualElement<K>{key, eq_ref()}, |
3512 | 17 | PolicyTraits::element(slot_array() + seq.offset(i))))) |
3513 | 16 | return iterator_at(seq.offset(i)); |
3514 | 17 | } |
3515 | 0 | if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); |
3516 | 0 | seq.next(); |
3517 | 0 | assert(seq.index() <= capacity() && "full table!"); |
3518 | 0 | } |
3519 | 16 | } |
3520 | | |
3521 | | // Conditionally samples hashtablez for SOO tables. This should be called on |
3522 | | // insertion into an empty SOO table and in copy construction when the size |
3523 | | // can fit in SOO capacity. |
3524 | 0 | inline HashtablezInfoHandle try_sample_soo() { |
3525 | 0 | assert(is_soo()); |
3526 | 0 | if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{}; |
3527 | 0 | return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type), |
3528 | 0 | SooCapacity()); |
3529 | 0 | } |
3530 | | |
3531 | 0 | inline void destroy_slots() { |
3532 | 0 | assert(!is_soo()); |
3533 | 0 | if (PolicyTraits::template destroy_is_trivial<Alloc>()) return; |
3534 | 0 | IterateOverFullSlots( |
3535 | 0 | common(), slot_array(), |
3536 | 0 | [&](const ctrl_t*, slot_type* slot) |
3537 | 0 | ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); }); |
3538 | 0 | } |
3539 | | |
3540 | 0 | inline void dealloc() { |
3541 | 0 | assert(capacity() != 0); |
3542 | | // Unpoison before returning the memory to the allocator. |
3543 | 0 | SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity()); |
3544 | 0 | infoz().Unregister(); |
3545 | 0 | Deallocate<BackingArrayAlignment(alignof(slot_type))>( |
3546 | 0 | &alloc_ref(), common().backing_array_start(), |
3547 | 0 | common().alloc_size(sizeof(slot_type), alignof(slot_type))); |
3548 | 0 | } |
3549 | | |
3550 | 0 | inline void destructor_impl() { |
3551 | 0 | if (capacity() == 0) return; |
3552 | 0 | if (is_soo()) { |
3553 | 0 | if (!empty()) { |
3554 | 0 | ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot())); |
3555 | 0 | } |
3556 | 0 | return; |
3557 | 0 | } |
3558 | 0 | destroy_slots(); |
3559 | 0 | dealloc(); |
3560 | 0 | } |
3561 | | |
3562 | | // Erases, but does not destroy, the value pointed to by `it`. |
3563 | | // |
3564 | | // This merely updates the pertinent control byte. This can be used in |
3565 | | // conjunction with Policy::transfer to move the object to another place. |
3566 | | void erase_meta_only(const_iterator it) { |
3567 | | assert(!is_soo()); |
3568 | | EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()), |
3569 | | sizeof(slot_type)); |
3570 | | } |
3571 | | |
3572 | 0 | size_t hash_of(slot_type* slot) const { |
3573 | 0 | return PolicyTraits::apply(HashElement{hash_ref()}, |
3574 | 0 | PolicyTraits::element(slot)); |
3575 | 0 | } |
3576 | | |
3577 | | // Resizes table to the new capacity and move all elements to the new |
3578 | | // positions accordingly. |
3579 | | // |
3580 | | // Note that for better performance instead of |
3581 | | // find_first_non_full(common(), hash), |
3582 | | // HashSetResizeHelper::FindFirstNonFullAfterResize( |
3583 | | // common(), old_capacity, hash) |
3584 | | // can be called right after `resize`. |
3585 | 0 | void resize(size_t new_capacity) { |
3586 | 0 | raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{}); |
3587 | 0 | } |
3588 | | |
3589 | | // As above, except that we also accept a pre-sampled, forced infoz for |
3590 | | // SOO tables, since they need to switch from SOO to heap in order to |
3591 | | // store the infoz. |
3592 | 0 | void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) { |
3593 | 0 | assert(forced_infoz.IsSampled()); |
3594 | 0 | raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()), |
3595 | 0 | forced_infoz); |
3596 | 0 | } |
3597 | | |
3598 | | // Resizes set to the new capacity. |
3599 | | // It is a static function in order to use its pointer in GetPolicyFunctions. |
3600 | | ABSL_ATTRIBUTE_NOINLINE static void resize_impl( |
3601 | | CommonFields& common, size_t new_capacity, |
3602 | 8 | HashtablezInfoHandle forced_infoz) { |
3603 | 8 | raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common); |
3604 | 8 | assert(IsValidCapacity(new_capacity)); |
3605 | 8 | assert(!set->fits_in_soo(new_capacity)); |
3606 | 8 | const bool was_soo = set->is_soo(); |
3607 | 8 | const bool had_soo_slot = was_soo && !set->empty(); |
3608 | 8 | const ctrl_t soo_slot_h2 = |
3609 | 8 | had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot()))) |
3610 | 8 | : ctrl_t::kEmpty; |
3611 | 8 | HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot, |
3612 | 8 | forced_infoz); |
3613 | | // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in |
3614 | | // HashSetResizeHelper constructor because it can't transfer slots when |
3615 | | // transfer_uses_memcpy is false. |
3616 | | // TODO(b/289225379): try to handle more of the SOO cases inside |
3617 | | // InitializeSlots. See comment on cl/555990034 snapshot #63. |
3618 | 8 | if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) { |
3619 | 8 | resize_helper.old_heap_or_soo() = common.heap_or_soo(); |
3620 | 8 | } else { |
3621 | 0 | set->transfer(set->to_slot(resize_helper.old_soo_data()), |
3622 | 0 | set->soo_slot()); |
3623 | 0 | } |
3624 | 8 | common.set_capacity(new_capacity); |
3625 | | // Note that `InitializeSlots` does different number initialization steps |
3626 | | // depending on the values of `transfer_uses_memcpy` and capacities. |
3627 | | // Refer to the comment in `InitializeSlots` for more details. |
3628 | 8 | const bool grow_single_group = |
3629 | 8 | resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type), |
3630 | 8 | PolicyTraits::transfer_uses_memcpy(), |
3631 | 8 | SooEnabled(), alignof(slot_type)>( |
3632 | 8 | common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type), |
3633 | 8 | sizeof(value_type)); |
3634 | | |
3635 | | // In the SooEnabled() case, capacity is never 0 so we don't check. |
3636 | 8 | if (!SooEnabled() && resize_helper.old_capacity() == 0) { |
3637 | | // InitializeSlots did all the work including infoz().RecordRehash(). |
3638 | 2 | return; |
3639 | 2 | } |
3640 | 6 | assert(resize_helper.old_capacity() > 0); |
3641 | | // Nothing more to do in this case. |
3642 | 6 | if (was_soo && !had_soo_slot) return; |
3643 | | |
3644 | 6 | slot_type* new_slots = set->slot_array(); |
3645 | 6 | if (grow_single_group) { |
3646 | 6 | if (PolicyTraits::transfer_uses_memcpy()) { |
3647 | | // InitializeSlots did all the work. |
3648 | 6 | return; |
3649 | 6 | } |
3650 | 0 | if (was_soo) { |
3651 | 0 | set->transfer(new_slots + resize_helper.SooSlotIndex(), |
3652 | 0 | to_slot(resize_helper.old_soo_data())); |
3653 | 0 | return; |
3654 | 0 | } else { |
3655 | | // We want GrowSizeIntoSingleGroup to be called here in order to make |
3656 | | // InitializeSlots not depend on PolicyTraits. |
3657 | 0 | resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common, |
3658 | 0 | set->alloc_ref()); |
3659 | 0 | } |
3660 | 0 | } else { |
3661 | | // InitializeSlots prepares control bytes to correspond to empty table. |
3662 | 0 | const auto insert_slot = [&](slot_type* slot) { |
3663 | 0 | size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()}, |
3664 | 0 | PolicyTraits::element(slot)); |
3665 | 0 | auto target = find_first_non_full(common, hash); |
3666 | 0 | SetCtrl(common, target.offset, H2(hash), sizeof(slot_type)); |
3667 | 0 | set->transfer(new_slots + target.offset, slot); |
3668 | 0 | return target.probe_length; |
3669 | 0 | }; |
3670 | 0 | if (was_soo) { |
3671 | 0 | insert_slot(to_slot(resize_helper.old_soo_data())); |
3672 | 0 | return; |
3673 | 0 | } else { |
3674 | 0 | auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots()); |
3675 | 0 | size_t total_probe_length = 0; |
3676 | 0 | for (size_t i = 0; i != resize_helper.old_capacity(); ++i) { |
3677 | 0 | if (IsFull(resize_helper.old_ctrl()[i])) { |
3678 | 0 | total_probe_length += insert_slot(old_slots + i); |
3679 | 0 | } |
3680 | 0 | } |
3681 | 0 | common.infoz().RecordRehash(total_probe_length); |
3682 | 0 | } |
3683 | 0 | } |
3684 | 0 | resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()), |
3685 | 0 | sizeof(slot_type)); |
3686 | 0 | } |
3687 | | |
3688 | | // Casting directly from e.g. char* to slot_type* can cause compilation errors |
3689 | | // on objective-C. This function converts to void* first, avoiding the issue. |
3690 | 0 | static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); } |
3691 | | |
3692 | | // Requires that lhs does not have a full SOO slot. |
3693 | | static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc, |
3694 | | CommonFields& lhs, CommonFields&& rhs) { |
3695 | | if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) { |
3696 | | lhs = std::move(rhs); |
3697 | | } else { |
3698 | | lhs.move_non_heap_or_soo_fields(rhs); |
3699 | | // TODO(b/303305702): add reentrancy guard. |
3700 | | PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()), |
3701 | | to_slot(rhs.soo_data())); |
3702 | | } |
3703 | | } |
3704 | | |
3705 | | // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we |
3706 | | // aren't allowed to do so. |
3707 | | void swap_common(raw_hash_set& that) { |
3708 | | using std::swap; |
3709 | | if (PolicyTraits::transfer_uses_memcpy()) { |
3710 | | swap(common(), that.common()); |
3711 | | return; |
3712 | | } |
3713 | | CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>(); |
3714 | | const bool that_is_full_soo = that.is_full_soo(); |
3715 | | move_common(that_is_full_soo, that.alloc_ref(), tmp, |
3716 | | std::move(that.common())); |
3717 | | move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common())); |
3718 | | move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp)); |
3719 | | } |
3720 | | |
3721 | 0 | void maybe_increment_generation_or_rehash_on_move() { |
3722 | 0 | if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) { |
3723 | 0 | return; |
3724 | 0 | } |
3725 | 0 | common().increment_generation(); |
3726 | 0 | if (!empty() && common().should_rehash_for_bug_detection_on_move()) { |
3727 | 0 | resize(capacity()); |
3728 | 0 | } |
3729 | 0 | } |
3730 | | |
3731 | | template <bool propagate_alloc> |
3732 | | raw_hash_set& assign_impl(raw_hash_set&& that) { |
3733 | | // We don't bother checking for this/that aliasing. We just need to avoid |
3734 | | // breaking the invariants in that case. |
3735 | | destructor_impl(); |
3736 | | move_common(that.is_full_soo(), that.alloc_ref(), common(), |
3737 | | std::move(that.common())); |
3738 | | // TODO(b/296061262): move instead of copying hash/eq/alloc. |
3739 | | hash_ref() = that.hash_ref(); |
3740 | | eq_ref() = that.eq_ref(); |
3741 | | CopyAlloc(alloc_ref(), that.alloc_ref(), |
3742 | | std::integral_constant<bool, propagate_alloc>()); |
3743 | | that.common() = CommonFields::CreateDefault<SooEnabled()>(); |
3744 | | maybe_increment_generation_or_rehash_on_move(); |
3745 | | return *this; |
3746 | | } |
3747 | | |
3748 | | raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) { |
3749 | | const size_t size = that.size(); |
3750 | | if (size == 0) return *this; |
3751 | | reserve(size); |
3752 | | for (iterator it = that.begin(); it != that.end(); ++it) { |
3753 | | insert(std::move(PolicyTraits::element(it.slot()))); |
3754 | | that.destroy(it.slot()); |
3755 | | } |
3756 | | if (!that.is_soo()) that.dealloc(); |
3757 | | that.common() = CommonFields::CreateDefault<SooEnabled()>(); |
3758 | | maybe_increment_generation_or_rehash_on_move(); |
3759 | | return *this; |
3760 | | } |
3761 | | |
3762 | | raw_hash_set& move_assign(raw_hash_set&& that, |
3763 | | std::true_type /*propagate_alloc*/) { |
3764 | | return assign_impl<true>(std::move(that)); |
3765 | | } |
3766 | | raw_hash_set& move_assign(raw_hash_set&& that, |
3767 | | std::false_type /*propagate_alloc*/) { |
3768 | | if (alloc_ref() == that.alloc_ref()) { |
3769 | | return assign_impl<false>(std::move(that)); |
3770 | | } |
3771 | | // Aliasing can't happen here because allocs would compare equal above. |
3772 | | assert(this != &that); |
3773 | | destructor_impl(); |
3774 | | // We can't take over that's memory so we need to move each element. |
3775 | | // While moving elements, this should have that's hash/eq so copy hash/eq |
3776 | | // before moving elements. |
3777 | | // TODO(b/296061262): move instead of copying hash/eq. |
3778 | | hash_ref() = that.hash_ref(); |
3779 | | eq_ref() = that.eq_ref(); |
3780 | | return move_elements_allocs_unequal(std::move(that)); |
3781 | | } |
3782 | | |
3783 | | template <class K> |
3784 | 0 | std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) { |
3785 | 0 | if (empty()) { |
3786 | 0 | const HashtablezInfoHandle infoz = try_sample_soo(); |
3787 | 0 | if (infoz.IsSampled()) { |
3788 | 0 | resize_with_soo_infoz(infoz); |
3789 | 0 | } else { |
3790 | 0 | common().set_full_soo(); |
3791 | 0 | return {soo_iterator(), true}; |
3792 | 0 | } |
3793 | 0 | } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, |
3794 | 0 | PolicyTraits::element(soo_slot()))) { |
3795 | 0 | return {soo_iterator(), false}; |
3796 | 0 | } else { |
3797 | 0 | resize(NextCapacity(SooCapacity())); |
3798 | 0 | } |
3799 | 0 | const size_t index = |
3800 | 0 | PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common()); |
3801 | 0 | return {iterator_at(index), true}; |
3802 | 0 | } |
3803 | | |
3804 | | template <class K> |
3805 | 16 | std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) { |
3806 | 16 | assert(!is_soo()); |
3807 | 16 | prefetch_heap_block(); |
3808 | 16 | auto hash = hash_ref()(key); |
3809 | 16 | auto seq = probe(common(), hash); |
3810 | 16 | const ctrl_t* ctrl = control(); |
3811 | 16 | while (true) { |
3812 | 16 | Group g{ctrl + seq.offset()}; |
3813 | 16 | for (uint32_t i : g.Match(H2(hash))) { |
3814 | 1 | if (ABSL_PREDICT_TRUE(PolicyTraits::apply( |
3815 | 1 | EqualElement<K>{key, eq_ref()}, |
3816 | 1 | PolicyTraits::element(slot_array() + seq.offset(i))))) |
3817 | 0 | return {iterator_at(seq.offset(i)), false}; |
3818 | 1 | } |
3819 | 16 | auto mask_empty = g.MaskEmpty(); |
3820 | 16 | if (ABSL_PREDICT_TRUE(mask_empty)) { |
3821 | 16 | size_t target = seq.offset( |
3822 | 16 | GetInsertionOffset(mask_empty, capacity(), hash, control())); |
3823 | 16 | return {iterator_at(PrepareInsertNonSoo(common(), hash, |
3824 | 16 | FindInfo{target, seq.index()}, |
3825 | 16 | GetPolicyFunctions())), |
3826 | 16 | true}; |
3827 | 16 | } |
3828 | 0 | seq.next(); |
3829 | 0 | assert(seq.index() <= capacity() && "full table!"); |
3830 | 0 | } |
3831 | 16 | } |
3832 | | |
3833 | | protected: |
3834 | | // Asserts that hash and equal functors provided by the user are consistent, |
3835 | | // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`. |
3836 | | template <class K> |
3837 | 32 | void AssertHashEqConsistent(ABSL_ATTRIBUTE_UNUSED const K& key) { |
3838 | 32 | #ifndef NDEBUG |
3839 | 32 | if (empty()) return; |
3840 | | |
3841 | 30 | const size_t hash_of_arg = hash_ref()(key); |
3842 | 128 | const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) { |
3843 | 128 | const value_type& element = PolicyTraits::element(slot); |
3844 | 128 | const bool is_key_equal = |
3845 | 128 | PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element); |
3846 | 128 | if (!is_key_equal) return; |
3847 | | |
3848 | 16 | const size_t hash_of_slot = |
3849 | 16 | PolicyTraits::apply(HashElement{hash_ref()}, element); |
3850 | 16 | const bool is_hash_equal = hash_of_arg == hash_of_slot; |
3851 | 16 | if (!is_hash_equal) { |
3852 | | // In this case, we're going to crash. Do a couple of other checks for |
3853 | | // idempotence issues. Recalculating hash/eq here is also convenient for |
3854 | | // debugging with gdb/lldb. |
3855 | 0 | const size_t once_more_hash_arg = hash_ref()(key); |
3856 | 0 | assert(hash_of_arg == once_more_hash_arg && "hash is not idempotent."); |
3857 | 0 | const size_t once_more_hash_slot = |
3858 | 0 | PolicyTraits::apply(HashElement{hash_ref()}, element); |
3859 | 0 | assert(hash_of_slot == once_more_hash_slot && |
3860 | 0 | "hash is not idempotent."); |
3861 | 0 | const bool once_more_eq = |
3862 | 0 | PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element); |
3863 | 0 | assert(is_key_equal == once_more_eq && "equality is not idempotent."); |
3864 | 0 | } |
3865 | 16 | assert((!is_key_equal || is_hash_equal) && |
3866 | 16 | "eq(k1, k2) must imply that hash(k1) == hash(k2). " |
3867 | 16 | "hash/eq functors are inconsistent."); |
3868 | 16 | }; |
3869 | | |
3870 | 30 | if (is_soo()) { |
3871 | 0 | assert_consistent(/*unused*/ nullptr, soo_slot()); |
3872 | 0 | return; |
3873 | 0 | } |
3874 | | // We only do validation for small tables so that it's constant time. |
3875 | 30 | if (capacity() > 16) return; |
3876 | 30 | IterateOverFullSlots(common(), slot_array(), assert_consistent); |
3877 | 30 | #endif |
3878 | 30 | } |
3879 | | |
3880 | | // Attempts to find `key` in the table; if it isn't found, returns an iterator |
3881 | | // where the value can be inserted into, with the control byte already set to |
3882 | | // `key`'s H2. Returns a bool indicating whether an insertion can take place. |
3883 | | template <class K> |
3884 | 16 | std::pair<iterator, bool> find_or_prepare_insert(const K& key) { |
3885 | 16 | AssertHashEqConsistent(key); |
3886 | 16 | if (is_soo()) return find_or_prepare_insert_soo(key); |
3887 | 16 | return find_or_prepare_insert_non_soo(key); |
3888 | 16 | } |
3889 | | |
3890 | | // Constructs the value in the space pointed by the iterator. This only works |
3891 | | // after an unsuccessful find_or_prepare_insert() and before any other |
3892 | | // modifications happen in the raw_hash_set. |
3893 | | // |
3894 | | // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is |
3895 | | // the key decomposed from `forward<Args>(args)...`, and the bool returned by |
3896 | | // find_or_prepare_insert(k) was true. |
3897 | | // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...). |
3898 | | template <class... Args> |
3899 | 16 | void emplace_at(iterator iter, Args&&... args) { |
3900 | 16 | construct(iter.slot(), std::forward<Args>(args)...); |
3901 | | |
3902 | 16 | assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter && |
3903 | 16 | "constructed value does not match the lookup key"); |
3904 | 16 | } void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) Line | Count | Source | 3899 | 16 | void emplace_at(iterator iter, Args&&... args) { | 3900 | 16 | construct(iter.slot(), std::forward<Args>(args)...); | 3901 | | | 3902 | 16 | assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter && | 3903 | 16 | "constructed value does not match the lookup key"); | 3904 | 16 | } |
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) |
3905 | | |
3906 | 32 | iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3907 | 32 | return {control() + i, slot_array() + i, common().generation_ptr()}; |
3908 | 32 | } |
3909 | | const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { |
3910 | | return const_cast<raw_hash_set*>(this)->iterator_at(i); |
3911 | | } |
3912 | | |
3913 | | reference unchecked_deref(iterator it) { return it.unchecked_deref(); } |
3914 | | |
3915 | | private: |
3916 | | friend struct RawHashSetTestOnlyAccess; |
3917 | | |
3918 | | // The number of slots we can still fill without needing to rehash. |
3919 | | // |
3920 | | // This is stored separately due to tombstones: we do not include tombstones |
3921 | | // in the growth capacity, because we'd like to rehash when the table is |
3922 | | // otherwise filled with tombstones: otherwise, probe sequences might get |
3923 | | // unacceptably long without triggering a rehash. Callers can also force a |
3924 | | // rehash via the standard `rehash(0)`, which will recompute this value as a |
3925 | | // side-effect. |
3926 | | // |
3927 | | // See `CapacityToGrowth()`. |
3928 | | size_t growth_left() const { |
3929 | | assert(!is_soo()); |
3930 | | return common().growth_left(); |
3931 | | } |
3932 | | |
3933 | | GrowthInfo& growth_info() { |
3934 | | assert(!is_soo()); |
3935 | | return common().growth_info(); |
3936 | | } |
3937 | | GrowthInfo growth_info() const { |
3938 | | assert(!is_soo()); |
3939 | | return common().growth_info(); |
3940 | | } |
3941 | | |
3942 | | // Prefetch the heap-allocated memory region to resolve potential TLB and |
3943 | | // cache misses. This is intended to overlap with execution of calculating the |
3944 | | // hash for a key. |
3945 | 32 | void prefetch_heap_block() const { |
3946 | 32 | assert(!is_soo()); |
3947 | 32 | #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) |
3948 | 32 | __builtin_prefetch(control(), 0, 1); |
3949 | 32 | #endif |
3950 | 32 | } |
3951 | | |
3952 | 110 | CommonFields& common() { return settings_.template get<0>(); } |
3953 | 608 | const CommonFields& common() const { return settings_.template get<0>(); } |
3954 | | |
3955 | 112 | ctrl_t* control() const { |
3956 | 112 | assert(!is_soo()); |
3957 | 112 | return common().control(); |
3958 | 112 | } |
3959 | 86 | slot_type* slot_array() const { |
3960 | 86 | assert(!is_soo()); |
3961 | 86 | return static_cast<slot_type*>(common().slot_array()); |
3962 | 86 | } |
3963 | 0 | slot_type* soo_slot() { |
3964 | 0 | assert(is_soo()); |
3965 | 0 | return static_cast<slot_type*>(common().soo_data()); |
3966 | 0 | } |
3967 | | const slot_type* soo_slot() const { |
3968 | | return const_cast<raw_hash_set*>(this)->soo_slot(); |
3969 | | } |
3970 | 0 | iterator soo_iterator() { |
3971 | 0 | return {SooControl(), soo_slot(), common().generation_ptr()}; |
3972 | 0 | } |
3973 | | const_iterator soo_iterator() const { |
3974 | | return const_cast<raw_hash_set*>(this)->soo_iterator(); |
3975 | | } |
3976 | 0 | HashtablezInfoHandle infoz() { |
3977 | 0 | assert(!is_soo()); |
3978 | 0 | return common().infoz(); |
3979 | 0 | } |
3980 | | |
3981 | 78 | hasher& hash_ref() { return settings_.template get<1>(); } |
3982 | 0 | const hasher& hash_ref() const { return settings_.template get<1>(); } |
3983 | 146 | key_equal& eq_ref() { return settings_.template get<2>(); } |
3984 | | const key_equal& eq_ref() const { return settings_.template get<2>(); } |
3985 | 24 | allocator_type& alloc_ref() { return settings_.template get<3>(); } |
3986 | | const allocator_type& alloc_ref() const { |
3987 | | return settings_.template get<3>(); |
3988 | | } |
3989 | | |
3990 | 0 | static const void* get_hash_ref_fn(const CommonFields& common) { |
3991 | 0 | auto* h = reinterpret_cast<const raw_hash_set*>(&common); |
3992 | 0 | return &h->hash_ref(); |
3993 | 0 | } |
3994 | 0 | static void transfer_slot_fn(void* set, void* dst, void* src) { |
3995 | 0 | auto* h = static_cast<raw_hash_set*>(set); |
3996 | 0 | h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src)); |
3997 | 0 | } |
3998 | | // Note: dealloc_fn will only be used if we have a non-standard allocator. |
3999 | 0 | static void dealloc_fn(CommonFields& common, const PolicyFunctions&) { |
4000 | 0 | auto* set = reinterpret_cast<raw_hash_set*>(&common); |
4001 | 0 |
|
4002 | 0 | // Unpoison before returning the memory to the allocator. |
4003 | 0 | SanitizerUnpoisonMemoryRegion(common.slot_array(), |
4004 | 0 | sizeof(slot_type) * common.capacity()); |
4005 | 0 |
|
4006 | 0 | common.infoz().Unregister(); |
4007 | 0 | Deallocate<BackingArrayAlignment(alignof(slot_type))>( |
4008 | 0 | &set->alloc_ref(), common.backing_array_start(), |
4009 | 0 | common.alloc_size(sizeof(slot_type), alignof(slot_type))); |
4010 | 0 | } |
4011 | | |
4012 | 16 | static const PolicyFunctions& GetPolicyFunctions() { |
4013 | 16 | static constexpr PolicyFunctions value = { |
4014 | 16 | sizeof(slot_type), |
4015 | | // TODO(b/328722020): try to type erase |
4016 | | // for standard layout and alignof(Hash) <= alignof(CommonFields). |
4017 | 16 | std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher |
4018 | 16 | : &raw_hash_set::get_hash_ref_fn, |
4019 | 16 | PolicyTraits::template get_hash_slot_fn<hasher>(), |
4020 | 16 | PolicyTraits::transfer_uses_memcpy() |
4021 | 16 | ? TransferRelocatable<sizeof(slot_type)> |
4022 | 16 | : &raw_hash_set::transfer_slot_fn, |
4023 | 16 | (std::is_same<SlotAlloc, std::allocator<slot_type>>::value |
4024 | 16 | ? &DeallocateStandard<alignof(slot_type)> |
4025 | 16 | : &raw_hash_set::dealloc_fn), |
4026 | 16 | &raw_hash_set::resize_impl, |
4027 | 16 | }; |
4028 | 16 | return value; |
4029 | 16 | } |
4030 | | |
4031 | | // Bundle together CommonFields plus other objects which might be empty. |
4032 | | // CompressedTuple will ensure that sizeof is not affected by any of the empty |
4033 | | // fields that occur after CommonFields. |
4034 | | absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal, |
4035 | | allocator_type> |
4036 | | settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{}, |
4037 | | key_equal{}, allocator_type{}}; |
4038 | | }; |
4039 | | |
4040 | | // Friend access for free functions in raw_hash_set.h. |
4041 | | struct HashtableFreeFunctionsAccess { |
4042 | | template <class Predicate, typename Set> |
4043 | | static typename Set::size_type EraseIf(Predicate& pred, Set* c) { |
4044 | | if (c->empty()) { |
4045 | | return 0; |
4046 | | } |
4047 | | if (c->is_soo()) { |
4048 | | auto it = c->soo_iterator(); |
4049 | | if (!pred(*it)) { |
4050 | | assert(c->size() == 1 && "hash table was modified unexpectedly"); |
4051 | | return 0; |
4052 | | } |
4053 | | c->destroy(it.slot()); |
4054 | | c->common().set_empty_soo(); |
4055 | | return 1; |
4056 | | } |
4057 | | ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size(); |
4058 | | size_t num_deleted = 0; |
4059 | | IterateOverFullSlots( |
4060 | | c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) { |
4061 | | if (pred(Set::PolicyTraits::element(slot))) { |
4062 | | c->destroy(slot); |
4063 | | EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()), |
4064 | | sizeof(*slot)); |
4065 | | ++num_deleted; |
4066 | | } |
4067 | | }); |
4068 | | // NOTE: IterateOverFullSlots allow removal of the current element, so we |
4069 | | // verify the size additionally here. |
4070 | | assert(original_size_for_assert - num_deleted == c->size() && |
4071 | | "hash table was modified unexpectedly"); |
4072 | | return num_deleted; |
4073 | | } |
4074 | | |
4075 | | template <class Callback, typename Set> |
4076 | | static void ForEach(Callback& cb, Set* c) { |
4077 | | if (c->empty()) { |
4078 | | return; |
4079 | | } |
4080 | | if (c->is_soo()) { |
4081 | | cb(*c->soo_iterator()); |
4082 | | return; |
4083 | | } |
4084 | | using ElementTypeWithConstness = decltype(*c->begin()); |
4085 | | IterateOverFullSlots( |
4086 | | c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) { |
4087 | | ElementTypeWithConstness& element = Set::PolicyTraits::element(slot); |
4088 | | cb(element); |
4089 | | }); |
4090 | | } |
4091 | | }; |
4092 | | |
4093 | | // Erases all elements that satisfy the predicate `pred` from the container `c`. |
4094 | | template <typename P, typename H, typename E, typename A, typename Predicate> |
4095 | | typename raw_hash_set<P, H, E, A>::size_type EraseIf( |
4096 | | Predicate& pred, raw_hash_set<P, H, E, A>* c) { |
4097 | | return HashtableFreeFunctionsAccess::EraseIf(pred, c); |
4098 | | } |
4099 | | |
4100 | | // Calls `cb` for all elements in the container `c`. |
4101 | | template <typename P, typename H, typename E, typename A, typename Callback> |
4102 | | void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) { |
4103 | | return HashtableFreeFunctionsAccess::ForEach(cb, c); |
4104 | | } |
4105 | | template <typename P, typename H, typename E, typename A, typename Callback> |
4106 | | void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) { |
4107 | | return HashtableFreeFunctionsAccess::ForEach(cb, c); |
4108 | | } |
4109 | | |
4110 | | namespace hashtable_debug_internal { |
4111 | | template <typename Set> |
4112 | | struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { |
4113 | | using Traits = typename Set::PolicyTraits; |
4114 | | using Slot = typename Traits::slot_type; |
4115 | | |
4116 | | static size_t GetNumProbes(const Set& set, |
4117 | | const typename Set::key_type& key) { |
4118 | | if (set.is_soo()) return 0; |
4119 | | size_t num_probes = 0; |
4120 | | size_t hash = set.hash_ref()(key); |
4121 | | auto seq = probe(set.common(), hash); |
4122 | | const ctrl_t* ctrl = set.control(); |
4123 | | while (true) { |
4124 | | container_internal::Group g{ctrl + seq.offset()}; |
4125 | | for (uint32_t i : g.Match(container_internal::H2(hash))) { |
4126 | | if (Traits::apply( |
4127 | | typename Set::template EqualElement<typename Set::key_type>{ |
4128 | | key, set.eq_ref()}, |
4129 | | Traits::element(set.slot_array() + seq.offset(i)))) |
4130 | | return num_probes; |
4131 | | ++num_probes; |
4132 | | } |
4133 | | if (g.MaskEmpty()) return num_probes; |
4134 | | seq.next(); |
4135 | | ++num_probes; |
4136 | | } |
4137 | | } |
4138 | | |
4139 | | static size_t AllocatedByteSize(const Set& c) { |
4140 | | size_t capacity = c.capacity(); |
4141 | | if (capacity == 0) return 0; |
4142 | | size_t m = |
4143 | | c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot)); |
4144 | | |
4145 | | size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); |
4146 | | if (per_slot != ~size_t{}) { |
4147 | | m += per_slot * c.size(); |
4148 | | } else { |
4149 | | for (auto it = c.begin(); it != c.end(); ++it) { |
4150 | | m += Traits::space_used(it.slot()); |
4151 | | } |
4152 | | } |
4153 | | return m; |
4154 | | } |
4155 | | }; |
4156 | | |
4157 | | } // namespace hashtable_debug_internal |
4158 | | } // namespace container_internal |
4159 | | ABSL_NAMESPACE_END |
4160 | | } // namespace absl |
4161 | | |
4162 | | #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS |
4163 | | #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED |
4164 | | #undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN |
4165 | | |
4166 | | #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ |