Coverage Report

Created: 2025-08-25 06:55

/src/abseil-cpp/absl/container/internal/raw_hash_set.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// An open-addressing
16
// hashtable with quadratic probing.
17
//
18
// This is a low level hashtable on top of which different interfaces can be
19
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20
//
21
// The table interface is similar to that of std::unordered_set. Notable
22
// differences are that most member functions support heterogeneous keys when
23
// BOTH the hash and eq functions are marked as transparent. They do so by
24
// providing a typedef called `is_transparent`.
25
//
26
// When heterogeneous lookup is enabled, functions that take key_type act as if
27
// they have an overload set like:
28
//
29
//   iterator find(const key_type& key);
30
//   template <class K>
31
//   iterator find(const K& key);
32
//
33
//   size_type erase(const key_type& key);
34
//   template <class K>
35
//   size_type erase(const K& key);
36
//
37
//   std::pair<iterator, iterator> equal_range(const key_type& key);
38
//   template <class K>
39
//   std::pair<iterator, iterator> equal_range(const K& key);
40
//
41
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42
// exist.
43
//
44
// In addition the pointer to element and iterator stability guarantees are
45
// weaker: all iterators and pointers are invalidated after a new element is
46
// inserted.
47
//
48
// IMPLEMENTATION DETAILS
49
//
50
// # Table Layout
51
//
52
// A raw_hash_set's backing array consists of control bytes followed by slots
53
// that may or may not contain objects.
54
//
55
// The layout of the backing array, for `capacity` slots, is thus, as a
56
// pseudo-struct:
57
//
58
//   struct BackingArray {
59
//     // Sampling handler. This field isn't present when the sampling is
60
//     // disabled or this allocation hasn't been selected for sampling.
61
//     HashtablezInfoHandle infoz_;
62
//     // The number of elements we can insert before growing the capacity.
63
//     size_t growth_left;
64
//     // Control bytes for the "real" slots.
65
//     ctrl_t ctrl[capacity];
66
//     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
67
//     // stop and serves no other purpose.
68
//     ctrl_t sentinel;
69
//     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
70
//     // that if a probe sequence picks a value near the end of `ctrl`,
71
//     // `Group` will have valid control bytes to look at.
72
//     ctrl_t clones[kWidth - 1];
73
//     // The actual slot data.
74
//     slot_type slots[capacity];
75
//   };
76
//
77
// The length of this array is computed by `RawHashSetLayout::alloc_size` below.
78
//
79
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
80
// platform-specific size) that define the state of the corresponding slot in
81
// the slot array. Group manipulation is tightly optimized to be as efficient
82
// as possible: SSE and friends on x86, clever bit operations on other arches.
83
//
84
//      Group 1         Group 2        Group 3
85
// +---------------+---------------+---------------+
86
// | | | | | | | | | | | | | | | | | | | | | | | | |
87
// +---------------+---------------+---------------+
88
//
89
// Each control byte is either a special value for empty slots, deleted slots
90
// (sometimes called *tombstones*), and a special end-of-table marker used by
91
// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
92
// corresponding slot.
93
//
94
// Storing control bytes in a separate array also has beneficial cache effects,
95
// since more logical slots will fit into a cache line.
96
//
97
// # Small Object Optimization (SOO)
98
//
99
// When the size/alignment of the value_type and the capacity of the table are
100
// small, we enable small object optimization and store the values inline in
101
// the raw_hash_set object. This optimization allows us to avoid
102
// allocation/deallocation as well as cache/dTLB misses.
103
//
104
// # Hashing
105
//
106
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
107
// `H1(hash(x))` is an index into `slots`, and essentially the starting point
108
// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
109
// objects that cannot possibly be the one we are looking for.
110
//
111
// # Table operations.
112
//
113
// The key operations are `insert`, `find`, and `erase`.
114
//
115
// Since `insert` and `erase` are implemented in terms of `find`, we describe
116
// `find` first. To `find` a value `x`, we compute `hash(x)`. From
117
// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
118
// group of slots in some interesting order.
119
//
120
// We now walk through these indices. At each index, we select the entire group
121
// starting with that index and extract potential candidates: occupied slots
122
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
123
// group, we stop and return an error. Each candidate slot `y` is compared with
124
// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
125
// next probe index. Tombstones effectively behave like full slots that never
126
// match the value we're looking for.
127
//
128
// The `H2` bits ensure when we compare a slot to an object with `==`, we are
129
// likely to have actually found the object.  That is, the chance is low that
130
// `==` is called and returns `false`.  Thus, when we search for an object, we
131
// are unlikely to call `==` many times.  This likelyhood can be analyzed as
132
// follows (assuming that H2 is a random enough hash function).
133
//
134
// Let's assume that there are `k` "wrong" objects that must be examined in a
135
// probe sequence.  For example, when doing a `find` on an object that is in the
136
// table, `k` is the number of objects between the start of the probe sequence
137
// and the final found object (not including the final found object).  The
138
// expected number of objects with an H2 match is then `k/128`.  Measurements
139
// and analysis indicate that even at high load factors, `k` is less than 32,
140
// meaning that the number of "false positive" comparisons we must perform is
141
// less than 1/8 per `find`.
142
143
// `insert` is implemented in terms of `unchecked_insert`, which inserts a
144
// value presumed to not be in the table (violating this requirement will cause
145
// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
146
// it, we construct a `probe_seq` once again, and use it to find the first
147
// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
148
// first such slot in the group and mark it as full with `x`'s H2.
149
//
150
// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
151
// perform a `find` to see if it's already present; if it is, we're done. If
152
// it's not, we may decide the table is getting overcrowded (i.e. the load
153
// factor is greater than 7/8 for big tables; tables smaller than one probing
154
// group use a max load factor of 1); in this case, we allocate a bigger array,
155
// `unchecked_insert` each element of the table into the new array (we know that
156
// no insertion here will insert an already-present value), and discard the old
157
// backing array. At this point, we may `unchecked_insert` the value `x`.
158
//
159
// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
160
// presents a viable, initialized slot pointee to the caller.
161
//
162
// `erase` is implemented in terms of `erase_at`, which takes an index to a
163
// slot. Given an offset, we simply create a tombstone and destroy its contents.
164
// If we can prove that the slot would not appear in a probe sequence, we can
165
// make the slot as empty, instead. We can prove this by observing that if a
166
// group has any empty slots, it has never been full (assuming we never create
167
// an empty slot in a group with no empties, which this heuristic guarantees we
168
// never do) and find would stop at this group anyways (since it does not probe
169
// beyond groups with empties).
170
//
171
// `erase` is `erase_at` composed with `find`: if we
172
// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
173
// slot.
174
//
175
// To iterate, we simply traverse the array, skipping empty and deleted slots
176
// and stopping when we hit a `kSentinel`.
177
178
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
179
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
180
181
#include <algorithm>
182
#include <cassert>
183
#include <cmath>
184
#include <cstddef>
185
#include <cstdint>
186
#include <cstring>
187
#include <functional>
188
#include <initializer_list>
189
#include <iterator>
190
#include <limits>
191
#include <memory>
192
#include <tuple>
193
#include <type_traits>
194
#include <utility>
195
196
#include "absl/base/attributes.h"
197
#include "absl/base/casts.h"
198
#include "absl/base/config.h"
199
#include "absl/base/internal/endian.h"
200
#include "absl/base/internal/iterator_traits.h"
201
#include "absl/base/internal/raw_logging.h"
202
#include "absl/base/macros.h"
203
#include "absl/base/optimization.h"
204
#include "absl/base/options.h"
205
#include "absl/base/port.h"
206
#include "absl/base/prefetch.h"
207
#include "absl/container/internal/common.h"  // IWYU pragma: export // for node_handle
208
#include "absl/container/internal/common_policy_traits.h"
209
#include "absl/container/internal/compressed_tuple.h"
210
#include "absl/container/internal/container_memory.h"
211
#include "absl/container/internal/hash_function_defaults.h"
212
#include "absl/container/internal/hash_policy_traits.h"
213
#include "absl/container/internal/hashtable_control_bytes.h"
214
#include "absl/container/internal/hashtable_debug_hooks.h"
215
#include "absl/container/internal/hashtablez_sampler.h"
216
#include "absl/functional/function_ref.h"
217
#include "absl/hash/hash.h"
218
#include "absl/hash/internal/weakly_mixed_integer.h"
219
#include "absl/memory/memory.h"
220
#include "absl/meta/type_traits.h"
221
#include "absl/numeric/bits.h"
222
#include "absl/utility/utility.h"
223
224
namespace absl {
225
ABSL_NAMESPACE_BEGIN
226
namespace container_internal {
227
228
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
229
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
230
#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) ||   \
231
       defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
232
       defined(ABSL_HAVE_MEMORY_SANITIZER)) &&   \
233
    !defined(NDEBUG_SANITIZER)  // If defined, performance is important.
234
// When compiled in sanitizer mode, we add generation integers to the backing
235
// array and iterators. In the backing array, we store the generation between
236
// the control bytes and the slots. When iterators are dereferenced, we assert
237
// that the container has not been mutated in a way that could cause iterator
238
// invalidation since the iterator was initialized.
239
#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
240
#endif
241
242
#ifdef ABSL_SWISSTABLE_ASSERT
243
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
244
#else
245
// We use this macro for assertions that users may see when the table is in an
246
// invalid state that sanitizers may help diagnose.
247
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
248
580
  assert((CONDITION) && "Try enabling sanitizers.")
249
#endif
250
251
// We use uint8_t so we don't need to worry about padding.
252
using GenerationType = uint8_t;
253
254
// A sentinel value for empty generations. Using 0 makes it easy to constexpr
255
// initialize an array of this value.
256
8
constexpr GenerationType SentinelEmptyGeneration() { return 0; }
257
258
8
constexpr GenerationType NextGeneration(GenerationType generation) {
259
8
  return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
260
8
}
261
262
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
263
constexpr bool SwisstableGenerationsEnabled() { return true; }
264
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
265
#else
266
0
constexpr bool SwisstableGenerationsEnabled() { return false; }
267
14
constexpr size_t NumGenerationBytes() { return 0; }
268
#endif
269
270
// Returns true if we should assert that the table is not accessed after it has
271
// been destroyed or during the destruction of the table.
272
0
constexpr bool SwisstableAssertAccessToDestroyedTable() {
273
0
#ifndef NDEBUG
274
0
  return true;
275
0
#endif
276
0
  return SwisstableGenerationsEnabled();
277
0
}
278
279
template <typename AllocType>
280
void SwapAlloc(AllocType& lhs, AllocType& rhs,
281
               std::true_type /* propagate_on_container_swap */) {
282
  using std::swap;
283
  swap(lhs, rhs);
284
}
285
template <typename AllocType>
286
void SwapAlloc([[maybe_unused]] AllocType& lhs, [[maybe_unused]] AllocType& rhs,
287
               std::false_type /* propagate_on_container_swap */) {
288
  assert(lhs == rhs &&
289
         "It's UB to call swap with unequal non-propagating allocators.");
290
}
291
292
template <typename AllocType>
293
void CopyAlloc(AllocType& lhs, AllocType& rhs,
294
               std::true_type /* propagate_alloc */) {
295
  lhs = rhs;
296
}
297
template <typename AllocType>
298
void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
299
300
// The state for a probe sequence.
301
//
302
// Currently, the sequence is a triangular progression of the form
303
//
304
//   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
305
//
306
// The use of `Width` ensures that each probe step does not overlap groups;
307
// the sequence effectively outputs the addresses of *groups* (although not
308
// necessarily aligned to any boundary). The `Group` machinery allows us
309
// to check an entire group with minimal branching.
310
//
311
// Wrapping around at `mask + 1` is important, but not for the obvious reason.
312
// As described above, the first few entries of the control byte array
313
// are mirrored at the end of the array, which `Group` will find and use
314
// for selecting candidates. However, when those candidates' slots are
315
// actually inspected, there are no corresponding slots for the cloned bytes,
316
// so we need to make sure we've treated those offsets as "wrapping around".
317
//
318
// It turns out that this probe sequence visits every group exactly once if the
319
// number of groups is a power of two, since (i^2+i)/2 is a bijection in
320
// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
321
template <size_t Width>
322
class probe_seq {
323
 public:
324
  // Creates a new probe sequence using `hash` as the initial value of the
325
  // sequence and `mask` (usually the capacity of the table) as the mask to
326
  // apply to each value in the progression.
327
26
  probe_seq(size_t hash, size_t mask) {
328
26
    ABSL_SWISSTABLE_ASSERT(((mask + 1) & mask) == 0 && "not a mask");
329
26
    mask_ = mask;
330
26
    offset_ = hash & mask_;
331
26
  }
332
333
  // The offset within the table, i.e., the value `p(i)` above.
334
52
  size_t offset() const { return offset_; }
335
40
  size_t offset(size_t i) const { return (offset_ + i) & mask_; }
336
337
0
  void next() {
338
0
    index_ += Width;
339
0
    offset_ += index_;
340
0
    offset_ &= mask_;
341
0
  }
342
  // 0-based probe index, a multiple of `Width`.
343
12
  size_t index() const { return index_; }
344
345
 private:
346
  size_t mask_;
347
  size_t offset_;
348
  size_t index_ = 0;
349
};
350
351
template <class ContainerKey, class Hash, class Eq>
352
struct RequireUsableKey {
353
  template <class PassedKey, class... Args>
354
  std::pair<
355
      decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
356
      decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
357
                                         std::declval<const PassedKey&>()))>*
358
  operator()(const PassedKey&, const Args&...) const;
359
};
360
361
template <class E, class Policy, class Hash, class Eq, class... Ts>
362
struct IsDecomposable : std::false_type {};
363
364
template <class Policy, class Hash, class Eq, class... Ts>
365
struct IsDecomposable<
366
    absl::void_t<decltype(Policy::apply(
367
        RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
368
        std::declval<Ts>()...))>,
369
    Policy, Hash, Eq, Ts...> : std::true_type {};
370
371
ABSL_DLL extern ctrl_t kDefaultIterControl;
372
373
// We use these sentinel capacity values in debug mode to indicate different
374
// classes of bugs.
375
enum InvalidCapacity : size_t {
376
  kAboveMaxValidCapacity = ~size_t{} - 100,
377
  kReentrance,
378
  kDestroyed,
379
380
  // These two must be last because we use `>= kMovedFrom` to mean moved-from.
381
  kMovedFrom,
382
  kSelfMovedFrom,
383
};
384
385
// Returns a pointer to a control byte that can be used by default-constructed
386
// iterators. We don't expect this pointer to be dereferenced.
387
70
inline ctrl_t* DefaultIterControl() { return &kDefaultIterControl; }
388
389
// For use in SOO iterators.
390
// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
391
// bit in iterators. This would add branches but reduce cache misses.
392
ABSL_DLL extern const ctrl_t kSooControl[2];
393
394
// Returns a pointer to a full byte followed by a sentinel byte.
395
30
inline ctrl_t* SooControl() {
396
  // Const must be cast away here; no uses of this function will actually write
397
  // to it because it is only used for SOO iterators.
398
30
  return const_cast<ctrl_t*>(kSooControl);
399
30
}
400
// Whether ctrl is from the SooControl array.
401
28
inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
402
403
// Returns a pointer to a generation to use for an empty hashtable.
404
GenerationType* EmptyGeneration();
405
406
// Returns whether `generation` is a generation for an empty hashtable that
407
// could be returned by EmptyGeneration().
408
0
inline bool IsEmptyGeneration(const GenerationType* generation) {
409
0
  return *generation == SentinelEmptyGeneration();
410
0
}
411
412
// We only allow a maximum of 1 SOO element, which makes the implementation
413
// much simpler. Complications with multiple SOO elements include:
414
// - Satisfying the guarantee that erasing one element doesn't invalidate
415
//   iterators to other elements means we would probably need actual SOO
416
//   control bytes.
417
// - In order to prevent user code from depending on iteration order for small
418
//   tables, we would need to randomize the iteration order somehow.
419
8
constexpr size_t SooCapacity() { return 1; }
420
// Sentinel type to indicate SOO CommonFields construction.
421
struct soo_tag_t {};
422
// Sentinel type to indicate SOO CommonFields construction with full size.
423
struct full_soo_tag_t {};
424
// Sentinel type to indicate non-SOO CommonFields construction.
425
struct non_soo_tag_t {};
426
// Sentinel value to indicate an uninitialized value explicitly.
427
struct uninitialized_tag_t {};
428
// Sentinel value to indicate creation of an empty table without a seed.
429
struct no_seed_empty_tag_t {};
430
431
// Per table hash salt. This gets mixed into H1 to randomize iteration order
432
// per-table.
433
// The seed is needed to ensure non-determinism of iteration order.
434
class PerTableSeed {
435
 public:
436
  // The number of bits in the seed.
437
  // It is big enough to ensure non-determinism of iteration order.
438
  // We store the seed inside a uint64_t together with size and other metadata.
439
  // Using 16 bits allows us to save one `and` instruction in H1 (we use
440
  // sign-extended move instead of mov+and).
441
  static constexpr size_t kBitCount = 16;
442
  static constexpr size_t kSignBit = uint64_t{1} << (kBitCount - 1);
443
444
  // Returns the seed for the table.
445
36
  size_t seed() const {
446
    // We use a sign-extended load to ensure high bits are non-zero.
447
36
    int16_t seed_signed = absl::bit_cast<int16_t>(seed_);
448
36
    auto seed_sign_extended =
449
36
        static_cast<std::make_signed_t<size_t>>(seed_signed);
450
36
    return absl::bit_cast<size_t>(seed_sign_extended);
451
36
  }
452
453
 private:
454
  friend class HashtableSize;
455
36
  explicit PerTableSeed(uint16_t seed) : seed_(seed) {
456
36
    ABSL_SWISSTABLE_ASSERT((seed & kSignBit) != 0 || seed == 0);
457
36
  }
458
459
  // The most significant bit of the seed is always 1 when there is a non-zero
460
  // seed. This way, when sign-extended the seed has non-zero high bits.
461
  const uint16_t seed_;
462
};
463
464
// Returns next per-table seed.
465
2
inline uint16_t NextSeed() {
466
2
  static_assert(PerTableSeed::kBitCount == 16);
467
2
  thread_local uint16_t seed =
468
2
      static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
469
2
  seed += uint16_t{0xad53};
470
2
  return seed;
471
2
}
472
473
// The size and also has additionally
474
// 1) one bit that stores whether we have infoz.
475
// 2) PerTableSeed::kBitCount bits for the seed.
476
class HashtableSize {
477
 public:
478
  static constexpr size_t kSizeBitCount = 64 - PerTableSeed::kBitCount - 1;
479
480
0
  explicit HashtableSize(uninitialized_tag_t) {}
481
2
  explicit HashtableSize(no_seed_empty_tag_t) : data_(0) {}
482
0
  explicit HashtableSize(full_soo_tag_t) : data_(kSizeOneNoMetadata) {}
483
484
  // Returns actual size of the table.
485
30
  size_t size() const { return static_cast<size_t>(data_ >> kSizeShift); }
486
16
  void increment_size() { data_ += kSizeOneNoMetadata; }
487
0
  void increment_size(size_t size) {
488
0
    data_ += static_cast<uint64_t>(size) * kSizeOneNoMetadata;
489
0
  }
490
0
  void decrement_size() { data_ -= kSizeOneNoMetadata; }
491
  // Returns true if the table is empty.
492
4
  bool empty() const { return data_ < kSizeOneNoMetadata; }
493
  // Sets the size to zero, but keeps all the metadata bits.
494
0
  void set_size_to_zero_keep_metadata() { data_ = data_ & kMetadataMask; }
495
496
36
  PerTableSeed seed() const {
497
36
    return PerTableSeed(static_cast<size_t>(data_) & kSeedMask);
498
36
  }
499
500
2
  void generate_new_seed() { set_seed(NextSeed()); }
501
502
  // We need to use a constant seed when the table is sampled so that sampled
503
  // hashes use the same seed and can e.g. identify stuck bits accurately.
504
0
  void set_sampled_seed() { set_seed(PerTableSeed::kSignBit); }
505
506
0
  bool is_sampled_seed() const {
507
0
    return (data_ & kSeedMask) == PerTableSeed::kSignBit;
508
0
  }
509
510
  // Returns true if the table has infoz.
511
14
  bool has_infoz() const {
512
14
    return ABSL_PREDICT_FALSE((data_ & kHasInfozMask) != 0);
513
14
  }
514
515
  // Sets the has_infoz bit.
516
0
  void set_has_infoz() { data_ |= kHasInfozMask; }
517
518
0
  void set_no_seed_for_testing() { data_ &= ~kSeedMask; }
519
520
 private:
521
2
  void set_seed(uint16_t seed) {
522
2
    data_ = (data_ & ~kSeedMask) | (seed | PerTableSeed::kSignBit);
523
2
  }
524
  static constexpr size_t kSizeShift = 64 - kSizeBitCount;
525
  static constexpr uint64_t kSizeOneNoMetadata = uint64_t{1} << kSizeShift;
526
  static constexpr uint64_t kMetadataMask = kSizeOneNoMetadata - 1;
527
  static constexpr uint64_t kSeedMask =
528
      (uint64_t{1} << PerTableSeed::kBitCount) - 1;
529
  // The next bit after the seed.
530
  static constexpr uint64_t kHasInfozMask = kSeedMask + 1;
531
  uint64_t data_;
532
};
533
534
// H1 is just the low bits of the hash.
535
26
inline size_t H1(size_t hash) { return hash; }
536
537
// Extracts the H2 portion of a hash: the 7 most significant bits.
538
//
539
// These are used as an occupied control byte.
540
42
inline h2_t H2(size_t hash) { return hash >> (sizeof(size_t) * 8 - 7); }
541
542
// When there is an insertion with no reserved growth, we rehash with
543
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
544
// constant divided by capacity ensures that inserting N elements is still O(N)
545
// in the average case. Using the constant 16 means that we expect to rehash ~8
546
// times more often than when generations are disabled. We are adding expected
547
// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
548
// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
549
0
inline size_t RehashProbabilityConstant() { return 16; }
550
551
class CommonFieldsGenerationInfoEnabled {
552
  // A sentinel value for reserved_growth_ indicating that we just ran out of
553
  // reserved growth on the last insertion. When reserve is called and then
554
  // insertions take place, reserved_growth_'s state machine is N, ..., 1,
555
  // kReservedGrowthJustRanOut, 0.
556
  static constexpr size_t kReservedGrowthJustRanOut =
557
      (std::numeric_limits<size_t>::max)();
558
559
 public:
560
  CommonFieldsGenerationInfoEnabled() = default;
561
  CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
562
      : reserved_growth_(that.reserved_growth_),
563
        reservation_size_(that.reservation_size_),
564
0
        generation_(that.generation_) {
565
0
    that.reserved_growth_ = 0;
566
0
    that.reservation_size_ = 0;
567
0
    that.generation_ = EmptyGeneration();
568
0
  }
569
  CommonFieldsGenerationInfoEnabled& operator=(
570
      CommonFieldsGenerationInfoEnabled&&) = default;
571
572
  // Whether we should rehash on insert in order to detect bugs of using invalid
573
  // references. We rehash on the first insertion after reserved_growth_ reaches
574
  // 0 after a call to reserve. We also do a rehash with low probability
575
  // whenever reserved_growth_ is zero.
576
  bool should_rehash_for_bug_detection_on_insert(size_t capacity) const;
577
  // Similar to above, except that we don't depend on reserved_growth_.
578
  bool should_rehash_for_bug_detection_on_move(size_t capacity) const;
579
0
  void maybe_increment_generation_on_insert() {
580
0
    if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
581
0
582
0
    if (reserved_growth_ > 0) {
583
0
      if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
584
0
    } else {
585
0
      increment_generation();
586
0
    }
587
0
  }
588
0
  void increment_generation() { *generation_ = NextGeneration(*generation_); }
589
0
  void reset_reserved_growth(size_t reservation, size_t size) {
590
0
    reserved_growth_ = reservation - size;
591
0
  }
592
0
  size_t reserved_growth() const { return reserved_growth_; }
593
0
  void set_reserved_growth(size_t r) { reserved_growth_ = r; }
594
0
  size_t reservation_size() const { return reservation_size_; }
595
0
  void set_reservation_size(size_t r) { reservation_size_ = r; }
596
0
  GenerationType generation() const { return *generation_; }
597
0
  void set_generation(GenerationType g) { *generation_ = g; }
598
0
  GenerationType* generation_ptr() const { return generation_; }
599
0
  void set_generation_ptr(GenerationType* g) { generation_ = g; }
600
601
 private:
602
  // The number of insertions remaining that are guaranteed to not rehash due to
603
  // a prior call to reserve. Note: we store reserved growth in addition to
604
  // reservation size because calls to erase() decrease size_ but don't decrease
605
  // reserved growth.
606
  size_t reserved_growth_ = 0;
607
  // The maximum argument to reserve() since the container was cleared. We need
608
  // to keep track of this, in addition to reserved growth, because we reset
609
  // reserved growth to this when erase(begin(), end()) is called.
610
  size_t reservation_size_ = 0;
611
  // Pointer to the generation counter, which is used to validate iterators and
612
  // is stored in the backing array between the control bytes and the slots.
613
  // Note that we can't store the generation inside the container itself and
614
  // keep a pointer to the container in the iterators because iterators must
615
  // remain valid when the container is moved.
616
  // Note: we could derive this pointer from the control pointer, but it makes
617
  // the code more complicated, and there's a benefit in having the sizes of
618
  // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
619
  // which is that tests are less likely to rely on the size remaining the same.
620
  GenerationType* generation_ = EmptyGeneration();
621
};
622
623
class CommonFieldsGenerationInfoDisabled {
624
 public:
625
  CommonFieldsGenerationInfoDisabled() = default;
626
  CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
627
      default;
628
  CommonFieldsGenerationInfoDisabled& operator=(
629
      CommonFieldsGenerationInfoDisabled&&) = default;
630
631
0
  bool should_rehash_for_bug_detection_on_insert(size_t) const { return false; }
632
0
  bool should_rehash_for_bug_detection_on_move(size_t) const { return false; }
633
16
  void maybe_increment_generation_on_insert() {}
634
0
  void increment_generation() {}
635
0
  void reset_reserved_growth(size_t, size_t) {}
636
0
  size_t reserved_growth() const { return 0; }
637
0
  void set_reserved_growth(size_t) {}
638
0
  size_t reservation_size() const { return 0; }
639
0
  void set_reservation_size(size_t) {}
640
8
  GenerationType generation() const { return 0; }
641
8
  void set_generation(GenerationType) {}
642
30
  GenerationType* generation_ptr() const { return nullptr; }
643
8
  void set_generation_ptr(GenerationType*) {}
644
};
645
646
class HashSetIteratorGenerationInfoEnabled {
647
 public:
648
  HashSetIteratorGenerationInfoEnabled() = default;
649
  explicit HashSetIteratorGenerationInfoEnabled(
650
      const GenerationType* generation_ptr)
651
0
      : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
652
653
0
  GenerationType generation() const { return generation_; }
654
0
  void reset_generation() { generation_ = *generation_ptr_; }
655
0
  const GenerationType* generation_ptr() const { return generation_ptr_; }
656
0
  void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
657
658
 private:
659
  const GenerationType* generation_ptr_ = EmptyGeneration();
660
  GenerationType generation_ = *generation_ptr_;
661
};
662
663
class HashSetIteratorGenerationInfoDisabled {
664
 public:
665
  HashSetIteratorGenerationInfoDisabled() = default;
666
30
  explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
667
668
42
  GenerationType generation() const { return 0; }
669
0
  void reset_generation() {}
670
70
  const GenerationType* generation_ptr() const { return nullptr; }
671
0
  void set_generation_ptr(const GenerationType*) {}
672
};
673
674
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
675
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
676
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
677
#else
678
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
679
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
680
#endif
681
682
// Stored the information regarding number of slots we can still fill
683
// without needing to rehash.
684
//
685
// We want to ensure sufficient number of empty slots in the table in order
686
// to keep probe sequences relatively short. Empty slot in the probe group
687
// is required to stop probing.
688
//
689
// Tombstones (kDeleted slots) are not included in the growth capacity,
690
// because we'd like to rehash when the table is filled with tombstones and/or
691
// full slots.
692
//
693
// GrowthInfo also stores a bit that encodes whether table may have any
694
// deleted slots.
695
// Most of the tables (>95%) have no deleted slots, so some functions can
696
// be more efficient with this information.
697
//
698
// Callers can also force a rehash via the standard `rehash(0)`,
699
// which will recompute this value as a side-effect.
700
//
701
// See also `CapacityToGrowth()`.
702
class GrowthInfo {
703
 public:
704
  // Leaves data member uninitialized.
705
  GrowthInfo() = default;
706
707
  // Initializes the GrowthInfo assuming we can grow `growth_left` elements
708
  // and there are no kDeleted slots in the table.
709
6
  void InitGrowthLeftNoDeleted(size_t growth_left) {
710
6
    growth_left_info_ = growth_left;
711
6
  }
712
713
  // Overwrites single full slot with an empty slot.
714
0
  void OverwriteFullAsEmpty() { ++growth_left_info_; }
715
716
  // Overwrites single empty slot with a full slot.
717
8
  void OverwriteEmptyAsFull() {
718
8
    ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() > 0);
719
8
    --growth_left_info_;
720
8
  }
721
722
  // Overwrites several empty slots with full slots.
723
0
  void OverwriteManyEmptyAsFull(size_t count) {
724
0
    ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >= count);
725
0
    growth_left_info_ -= count;
726
0
  }
727
728
  // Overwrites specified control element with full slot.
729
0
  void OverwriteControlAsFull(ctrl_t ctrl) {
730
0
    ABSL_SWISSTABLE_ASSERT(GetGrowthLeft() >=
731
0
                           static_cast<size_t>(IsEmpty(ctrl)));
732
0
    growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
733
0
  }
734
735
  // Overwrites single full slot with a deleted slot.
736
0
  void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
737
738
  // Returns true if table satisfies two properties:
739
  // 1. Guaranteed to have no kDeleted slots.
740
  // 2. There is a place for at least one element to grow.
741
16
  bool HasNoDeletedAndGrowthLeft() const {
742
16
    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
743
16
  }
744
745
  // Returns true if the table satisfies two properties:
746
  // 1. Guaranteed to have no kDeleted slots.
747
  // 2. There is no growth left.
748
4
  bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
749
750
  // Returns true if GetGrowthLeft() == 0, but must be called only if
751
  // HasNoDeleted() is false. It is slightly more efficient.
752
0
  bool HasNoGrowthLeftAssumingMayHaveDeleted() const {
753
0
    ABSL_SWISSTABLE_ASSERT(!HasNoDeleted());
754
0
    return growth_left_info_ == kDeletedBit;
755
0
  }
756
757
  // Returns true if table guaranteed to have no kDeleted slots.
758
4
  bool HasNoDeleted() const {
759
4
    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
760
4
  }
761
762
  // Returns the number of elements left to grow.
763
16
  size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
764
765
 private:
766
  static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
767
  static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
768
  // Topmost bit signal whenever there are deleted slots.
769
  size_t growth_left_info_;
770
};
771
772
static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
773
static_assert(alignof(GrowthInfo) == alignof(size_t), "");
774
775
// Returns whether `n` is a valid capacity (i.e., number of slots).
776
//
777
// A valid capacity is a non-zero integer `2^m - 1`.
778
62
constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
779
780
// Whether a table is small enough that we don't need to hash any keys.
781
264
constexpr bool IsSmallCapacity(size_t capacity) { return capacity <= 1; }
782
783
// Returns the number of "cloned control bytes".
784
//
785
// This is the number of control bytes that are present both at the beginning
786
// of the control byte array and at the end, such that we can create a
787
// `Group::kWidth`-width probe window starting from any control byte.
788
148
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
789
790
// Returns the number of control bytes including cloned.
791
138
constexpr size_t NumControlBytes(size_t capacity) {
792
138
  return IsSmallCapacity(capacity) ? 0 : capacity + 1 + NumClonedBytes();
793
138
}
794
795
// Computes the offset from the start of the backing allocation of control.
796
// infoz and growth_info are stored at the beginning of the backing array.
797
14
constexpr size_t ControlOffset(bool has_infoz) {
798
14
  return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
799
14
}
800
801
// Returns the offset of the next item after `offset` that is aligned to `align`
802
// bytes. `align` must be a power of two.
803
14
constexpr size_t AlignUpTo(size_t offset, size_t align) {
804
14
  return (offset + align - 1) & (~align + 1);
805
14
}
806
807
// Helper class for computing offsets and allocation size of hash set fields.
808
class RawHashSetLayout {
809
 public:
810
  // TODO(b/413062340): maybe don't allocate growth info for capacity 1 tables.
811
  // Doing so may require additional branches/complexity so it might not be
812
  // worth it.
813
  explicit RawHashSetLayout(size_t capacity, size_t slot_size,
814
                            size_t slot_align, bool has_infoz)
815
14
      : control_offset_(ControlOffset(has_infoz)),
816
14
        generation_offset_(control_offset_ + NumControlBytes(capacity)),
817
        slot_offset_(
818
14
            AlignUpTo(generation_offset_ + NumGenerationBytes(), slot_align)),
819
14
        alloc_size_(slot_offset_ + capacity * slot_size) {
820
14
    ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
821
14
    ABSL_SWISSTABLE_ASSERT(
822
14
        slot_size <=
823
14
        ((std::numeric_limits<size_t>::max)() - slot_offset_) / capacity);
824
14
  }
825
826
  // Returns precomputed offset from the start of the backing allocation of
827
  // control.
828
14
  size_t control_offset() const { return control_offset_; }
829
830
  // Given the capacity of a table, computes the offset (from the start of the
831
  // backing allocation) of the generation counter (if it exists).
832
8
  size_t generation_offset() const { return generation_offset_; }
833
834
  // Given the capacity of a table, computes the offset (from the start of the
835
  // backing allocation) at which the slots begin.
836
8
  size_t slot_offset() const { return slot_offset_; }
837
838
  // Given the capacity of a table, computes the total size of the backing
839
  // array.
840
20
  size_t alloc_size() const { return alloc_size_; }
841
842
 private:
843
  size_t control_offset_;
844
  size_t generation_offset_;
845
  size_t slot_offset_;
846
  size_t alloc_size_;
847
};
848
849
struct HashtableFreeFunctionsAccess;
850
851
// This allows us to work around an uninitialized memory warning when
852
// constructing begin() iterators in empty hashtables.
853
template <typename T>
854
union MaybeInitializedPtr {
855
210
  T* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
absl::container_internal::MaybeInitializedPtr<void>::get() const
Line
Count
Source
855
86
  T* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
absl::container_internal::MaybeInitializedPtr<absl::container_internal::ctrl_t>::get() const
Line
Count
Source
855
124
  T* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
856
16
  void set(T* ptr) { p = ptr; }
absl::container_internal::MaybeInitializedPtr<absl::container_internal::ctrl_t>::set(absl::container_internal::ctrl_t*)
Line
Count
Source
856
8
  void set(T* ptr) { p = ptr; }
absl::container_internal::MaybeInitializedPtr<void>::set(void*)
Line
Count
Source
856
8
  void set(T* ptr) { p = ptr; }
857
858
  T* p;
859
};
860
861
struct HeapPtrs {
862
  // The control bytes (and, also, a pointer near to the base of the backing
863
  // array).
864
  //
865
  // This contains `capacity + 1 + NumClonedBytes()` entries.
866
  //
867
  // Note that growth_info is stored immediately before this pointer.
868
  // May be uninitialized for small tables.
869
  MaybeInitializedPtr<ctrl_t> control;
870
871
  // The beginning of the slots, located at `SlotOffset()` bytes after
872
  // `control`. May be uninitialized for empty tables.
873
  // Note: we can't use `slots` because Qt defines "slots" as a macro.
874
  MaybeInitializedPtr<void> slot_array;
875
};
876
877
// Returns the maximum size of the SOO slot.
878
0
constexpr size_t MaxSooSlotSize() { return sizeof(HeapPtrs); }
879
880
// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
881
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
882
union HeapOrSoo {
883
8
  MaybeInitializedPtr<ctrl_t>& control() {
884
8
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
885
8
  }
886
124
  MaybeInitializedPtr<ctrl_t> control() const {
887
124
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
888
124
  }
889
8
  MaybeInitializedPtr<void>& slot_array() {
890
8
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
891
8
  }
892
86
  MaybeInitializedPtr<void> slot_array() const {
893
86
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
894
86
  }
895
0
  void* get_soo_data() {
896
0
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
897
0
  }
898
0
  const void* get_soo_data() const {
899
0
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
900
0
  }
901
902
  HeapPtrs heap;
903
  unsigned char soo_data[MaxSooSlotSize()];
904
};
905
906
// Returns a reference to the GrowthInfo object stored immediately before
907
// `control`.
908
34
inline GrowthInfo& GetGrowthInfoFromControl(ctrl_t* control) {
909
34
  auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control) - 1;
910
34
  ABSL_SWISSTABLE_ASSERT(
911
34
      reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
912
34
  return *gl_ptr;
913
34
}
914
915
// CommonFields hold the fields in raw_hash_set that do not depend
916
// on template parameters. This allows us to conveniently pass all
917
// of this state to helper functions as a single argument.
918
class CommonFields : public CommonFieldsGenerationInfo {
919
 public:
920
  explicit CommonFields(soo_tag_t)
921
0
      : capacity_(SooCapacity()), size_(no_seed_empty_tag_t{}) {}
922
  explicit CommonFields(full_soo_tag_t)
923
0
      : capacity_(SooCapacity()), size_(full_soo_tag_t{}) {}
924
  explicit CommonFields(non_soo_tag_t)
925
2
      : capacity_(0), size_(no_seed_empty_tag_t{}) {}
926
  // For use in swapping.
927
0
  explicit CommonFields(uninitialized_tag_t) : size_(uninitialized_tag_t{}) {}
928
929
  // Not copyable
930
  CommonFields(const CommonFields&) = delete;
931
  CommonFields& operator=(const CommonFields&) = delete;
932
933
  // Copy with guarantee that it is not SOO.
934
  CommonFields(non_soo_tag_t, const CommonFields& that)
935
      : capacity_(that.capacity_),
936
        size_(that.size_),
937
0
        heap_or_soo_(that.heap_or_soo_) {
938
0
  }
939
940
  // Movable
941
  CommonFields(CommonFields&& that) = default;
942
  CommonFields& operator=(CommonFields&&) = default;
943
944
  template <bool kSooEnabled>
945
2
  static CommonFields CreateDefault() {
946
2
    return kSooEnabled ? CommonFields{soo_tag_t{}}
947
2
                       : CommonFields{non_soo_tag_t{}};
948
2
  }
949
950
  // The inline data for SOO is written on top of control_/slots_.
951
0
  const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
952
0
  void* soo_data() { return heap_or_soo_.get_soo_data(); }
953
954
124
  ctrl_t* control() const {
955
124
    ABSL_SWISSTABLE_ASSERT(capacity() > 0);
956
    // Assume that the control bytes don't alias `this`.
957
124
    ctrl_t* ctrl = heap_or_soo_.control().get();
958
124
    [[maybe_unused]] size_t num_control_bytes = NumControlBytes(capacity());
959
124
    ABSL_ASSUME(reinterpret_cast<uintptr_t>(ctrl + num_control_bytes) <=
960
124
                    reinterpret_cast<uintptr_t>(this) ||
961
124
                reinterpret_cast<uintptr_t>(this + 1) <=
962
124
                    reinterpret_cast<uintptr_t>(ctrl));
963
124
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(ctrl);
964
124
  }
965
966
8
  void set_control(ctrl_t* c) { heap_or_soo_.control().set(c); }
967
968
  // Note: we can't use slots() because Qt defines "slots" as a macro.
969
86
  void* slot_array() const { return heap_or_soo_.slot_array().get(); }
970
0
  MaybeInitializedPtr<void> slots_union() const {
971
0
    return heap_or_soo_.slot_array();
972
0
  }
973
8
  void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
974
975
  // The number of filled slots.
976
30
  size_t size() const { return size_.size(); }
977
  // Sets the size to zero, but keeps hashinfoz bit and seed.
978
0
  void set_size_to_zero() { size_.set_size_to_zero_keep_metadata(); }
979
0
  void set_empty_soo() {
980
0
    AssertInSooMode();
981
0
    size_ = HashtableSize(no_seed_empty_tag_t{});
982
0
  }
983
0
  void set_full_soo() {
984
0
    AssertInSooMode();
985
0
    size_ = HashtableSize(full_soo_tag_t{});
986
0
  }
987
16
  void increment_size() {
988
16
    ABSL_SWISSTABLE_ASSERT(size() < capacity());
989
16
    size_.increment_size();
990
16
  }
991
0
  void increment_size(size_t n) {
992
0
    ABSL_SWISSTABLE_ASSERT(size() + n <= capacity());
993
0
    size_.increment_size(n);
994
0
  }
995
0
  void decrement_size() {
996
0
    ABSL_SWISSTABLE_ASSERT(!empty());
997
0
    size_.decrement_size();
998
0
  }
999
4
  bool empty() const { return size_.empty(); }
1000
1001
  // The seed used for the hash function.
1002
36
  PerTableSeed seed() const { return size_.seed(); }
1003
  // Generates a new seed the hash function.
1004
  // The table will be invalidated if `!empty()` because hash is being changed.
1005
  // In such cases, we will need to rehash the table.
1006
2
  void generate_new_seed(bool has_infoz) {
1007
    // Note: we can't use has_infoz() here because we set has_infoz later than
1008
    // we generate the seed.
1009
2
    if (ABSL_PREDICT_FALSE(has_infoz)) {
1010
0
      size_.set_sampled_seed();
1011
0
      return;
1012
0
    }
1013
2
    size_.generate_new_seed();
1014
2
  }
1015
0
  void set_no_seed_for_testing() { size_.set_no_seed_for_testing(); }
1016
1017
  // The total number of available slots.
1018
572
  size_t capacity() const { return capacity_; }
1019
40
  void set_capacity(size_t c) {
1020
    // We allow setting above the max valid capacity for debugging purposes.
1021
40
    ABSL_SWISSTABLE_ASSERT(c == 0 || IsValidCapacity(c) ||
1022
40
                           c > kAboveMaxValidCapacity);
1023
40
    capacity_ = c;
1024
40
  }
1025
122
  bool is_small() const { return IsSmallCapacity(capacity_); }
1026
1027
  // The number of slots we can still fill without needing to rehash.
1028
  // This is stored in the heap allocation before the control bytes.
1029
  // TODO(b/289225379): experiment with moving growth_info back inline to
1030
  // increase room for SOO.
1031
4
  size_t growth_left() const { return growth_info().GetGrowthLeft(); }
1032
1033
28
  GrowthInfo& growth_info() {
1034
28
    ABSL_SWISSTABLE_ASSERT(!is_small());
1035
28
    return GetGrowthInfoFromControl(control());
1036
28
  }
1037
4
  GrowthInfo growth_info() const {
1038
4
    return const_cast<CommonFields*>(this)->growth_info();
1039
4
  }
1040
1041
14
  bool has_infoz() const { return size_.has_infoz(); }
1042
0
  void set_has_infoz() {
1043
0
    ABSL_SWISSTABLE_ASSERT(size_.is_sampled_seed());
1044
0
    size_.set_has_infoz();
1045
0
  }
1046
1047
0
  HashtablezInfoHandle* infoz_ptr() const {
1048
    // growth_info is stored before control bytes.
1049
0
    ABSL_SWISSTABLE_ASSERT(
1050
0
        reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
1051
0
    ABSL_SWISSTABLE_ASSERT(has_infoz());
1052
0
    return reinterpret_cast<HashtablezInfoHandle*>(
1053
0
        control() - ControlOffset(/*has_infoz=*/true));
1054
0
  }
1055
1056
14
  HashtablezInfoHandle infoz() {
1057
14
    return has_infoz() ? *infoz_ptr() : HashtablezInfoHandle();
1058
14
  }
1059
0
  void set_infoz(HashtablezInfoHandle infoz) {
1060
0
    ABSL_SWISSTABLE_ASSERT(has_infoz());
1061
0
    *infoz_ptr() = infoz;
1062
0
  }
1063
1064
0
  bool should_rehash_for_bug_detection_on_insert() const {
1065
0
    if constexpr (!SwisstableGenerationsEnabled()) {
1066
0
      return false;
1067
0
    }
1068
0
    // As an optimization, we avoid calling ShouldRehashForBugDetection if we
1069
0
    // will end up rehashing anyways.
1070
0
    if (growth_left() == 0) return false;
1071
0
    return CommonFieldsGenerationInfo::
1072
0
        should_rehash_for_bug_detection_on_insert(capacity());
1073
0
  }
1074
0
  bool should_rehash_for_bug_detection_on_move() const {
1075
0
    return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
1076
0
        capacity());
1077
0
  }
1078
0
  void reset_reserved_growth(size_t reservation) {
1079
0
    CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1080
0
  }
1081
1082
  // The size of the backing array allocation.
1083
0
  size_t alloc_size(size_t slot_size, size_t slot_align) const {
1084
0
    return RawHashSetLayout(capacity(), slot_size, slot_align, has_infoz())
1085
0
        .alloc_size();
1086
0
  }
1087
1088
  // Move fields other than heap_or_soo_.
1089
0
  void move_non_heap_or_soo_fields(CommonFields& that) {
1090
0
    static_cast<CommonFieldsGenerationInfo&>(*this) =
1091
0
        std::move(static_cast<CommonFieldsGenerationInfo&>(that));
1092
0
    capacity_ = that.capacity_;
1093
0
    size_ = that.size_;
1094
0
  }
1095
1096
  // Returns the number of control bytes set to kDeleted. For testing only.
1097
0
  size_t TombstonesCount() const {
1098
0
    return static_cast<size_t>(
1099
0
        std::count(control(), control() + capacity(), ctrl_t::kDeleted));
1100
0
  }
1101
1102
  // Helper to enable sanitizer mode validation to protect against reentrant
1103
  // calls during element constructor/destructor.
1104
  template <typename F>
1105
16
  void RunWithReentrancyGuard(F f) {
1106
#ifdef NDEBUG
1107
    f();
1108
    return;
1109
#endif
1110
16
    const size_t cap = capacity();
1111
16
    set_capacity(InvalidCapacity::kReentrance);
1112
16
    f();
1113
16
    set_capacity(cap);
1114
16
  }
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1})
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::transfer(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::transfer(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1})
void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1})
Line
Count
Source
1105
16
  void RunWithReentrancyGuard(F f) {
1106
#ifdef NDEBUG
1107
    f();
1108
    return;
1109
#endif
1110
16
    const size_t cap = capacity();
1111
16
    set_capacity(InvalidCapacity::kReentrance);
1112
16
    f();
1113
16
    set_capacity(cap);
1114
16
  }
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1})
1115
1116
 private:
1117
  // We store the has_infoz bit in the lowest bit of size_.
1118
0
  static constexpr size_t HasInfozShift() { return 1; }
1119
0
  static constexpr size_t HasInfozMask() {
1120
0
    return (size_t{1} << HasInfozShift()) - 1;
1121
0
  }
1122
1123
  // We can't assert that SOO is enabled because we don't have SooEnabled(), but
1124
  // we assert what we can.
1125
0
  void AssertInSooMode() const {
1126
0
    ABSL_SWISSTABLE_ASSERT(capacity() == SooCapacity());
1127
0
    ABSL_SWISSTABLE_ASSERT(!has_infoz());
1128
0
  }
1129
1130
  // The number of slots in the backing array. This is always 2^N-1 for an
1131
  // integer N. NOTE: we tried experimenting with compressing the capacity and
1132
  // storing it together with size_: (a) using 6 bits to store the corresponding
1133
  // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
1134
  // size_ and storing size in the low bits. Both of these experiments were
1135
  // regressions, presumably because we need capacity to do find operations.
1136
  size_t capacity_;
1137
1138
  // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
1139
  // encode the size in SOO case. We would be making size()/capacity() more
1140
  // expensive in order to have more SOO space.
1141
  HashtableSize size_;
1142
1143
  // Either the control/slots pointers or the SOO slot.
1144
  HeapOrSoo heap_or_soo_;
1145
};
1146
1147
template <class Policy, class Hash, class Eq, class Alloc>
1148
class raw_hash_set;
1149
1150
// Returns the next valid capacity after `n`.
1151
4
constexpr size_t NextCapacity(size_t n) {
1152
4
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n) || n == 0);
1153
4
  return n * 2 + 1;
1154
4
}
1155
1156
// Returns the previous valid capacity before `n`.
1157
0
constexpr size_t PreviousCapacity(size_t n) {
1158
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(n));
1159
0
  return n / 2;
1160
0
}
1161
1162
// Applies the following mapping to every byte in the control array:
1163
//   * kDeleted -> kEmpty
1164
//   * kEmpty -> kEmpty
1165
//   * _ -> kDeleted
1166
// PRECONDITION:
1167
//   IsValidCapacity(capacity)
1168
//   ctrl[capacity] == ctrl_t::kSentinel
1169
//   ctrl[i] != ctrl_t::kSentinel for all i < capacity
1170
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
1171
1172
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
1173
0
constexpr size_t NormalizeCapacity(size_t n) {
1174
0
  return n ? ~size_t{} >> countl_zero(n) : 1;
1175
0
}
1176
1177
// General notes on capacity/growth methods below:
1178
// - We use 27/32 as maximum load factor. For 16-wide groups, that gives an
1179
//   average of 2.5 empty slots per group.
1180
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
1181
//   never need to probe (the whole table fits in one group) so we don't need a
1182
//   load factor less than 1.
1183
// - For (capacity+1) == Group::kWidth, growth is capacity - 1 since we need
1184
//   at least one empty slot for probing algorithm.
1185
// - For (capacity+1) > Group::kWidth, growth is 27/32*capacity.
1186
1187
// Given `capacity`, applies the load factor; i.e., it returns the maximum
1188
// number of values we should put into the table before a resizing rehash.
1189
4
constexpr size_t CapacityToGrowth(size_t capacity) {
1190
4
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
1191
  // `capacity*27/32`
1192
4
  if (Group::kWidth == 8 && capacity == 7) {
1193
    // formula does not work when x==7.
1194
0
    return 6;
1195
0
  }
1196
4
  return capacity - capacity / 8 - capacity / 32;
1197
4
}
1198
1199
// Given `size`, "unapplies" the load factor to find how large the capacity
1200
// should be to stay within the load factor.
1201
//
1202
// For size == 0, returns 0.
1203
// For other values, returns the same as `NormalizeCapacity(size*32/27)`.
1204
0
constexpr size_t SizeToCapacity(size_t size) {
1205
0
  if (size == 0) {
1206
0
    return 0;
1207
0
  }
1208
  // The minimum possible capacity is NormalizeCapacity(size).
1209
  // Shifting right `~size_t{}` by `leading_zeros` yields
1210
  // NormalizeCapacity(size).
1211
0
  int leading_zeros = absl::countl_zero(size);
1212
0
  size_t next_capacity = ~size_t{} >> leading_zeros;
1213
0
  size_t max_size_for_next_capacity = CapacityToGrowth(next_capacity);
1214
  // Decrease shift if size is too big for the minimum capacity.
1215
0
  leading_zeros -= static_cast<int>(size > max_size_for_next_capacity);
1216
0
  return (~size_t{}) >> leading_zeros;
1217
0
}
1218
1219
template <class InputIter>
1220
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1221
                                     size_t bucket_count) {
1222
  if (bucket_count != 0) {
1223
    return bucket_count;
1224
  }
1225
  if (base_internal::IsAtLeastIterator<std::random_access_iterator_tag,
1226
                                       InputIter>()) {
1227
    return SizeToCapacity(static_cast<size_t>(std::distance(first, last)));
1228
  }
1229
  return 0;
1230
}
1231
1232
0
constexpr bool SwisstableDebugEnabled() {
1233
0
#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
1234
0
    ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
1235
0
  return true;
1236
0
#else
1237
0
  return false;
1238
0
#endif
1239
0
}
1240
1241
inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1242
                         const GenerationType* generation_ptr,
1243
14
                         const char* operation) {
1244
14
  if (!SwisstableDebugEnabled()) return;
1245
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1246
  // enabled. To minimize their impact in those builds:
1247
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1248
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1249
  //   the chances that the hot paths will be inlined.
1250
14
  if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
1251
0
    ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
1252
0
  }
1253
14
  if (ABSL_PREDICT_FALSE(ctrl == DefaultIterControl())) {
1254
0
    ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
1255
0
                 operation);
1256
0
  }
1257
14
  if (SwisstableGenerationsEnabled()) {
1258
0
    if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1259
0
      ABSL_RAW_LOG(FATAL,
1260
0
                   "%s called on invalid iterator. The table could have "
1261
0
                   "rehashed or moved since this iterator was initialized.",
1262
0
                   operation);
1263
0
    }
1264
0
    if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1265
0
      ABSL_RAW_LOG(
1266
0
          FATAL,
1267
0
          "%s called on invalid iterator. The element was likely erased.",
1268
0
          operation);
1269
0
    }
1270
14
  } else {
1271
14
    if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1272
0
      ABSL_RAW_LOG(
1273
0
          FATAL,
1274
0
          "%s called on invalid iterator. The element might have been erased "
1275
0
          "or the table might have rehashed. Consider running with "
1276
0
          "--config=asan to diagnose rehashing issues.",
1277
0
          operation);
1278
0
    }
1279
14
  }
1280
14
}
1281
1282
// Note that for comparisons, null/end iterators are valid.
1283
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1284
                                       GenerationType generation,
1285
28
                                       const GenerationType* generation_ptr) {
1286
28
  if (!SwisstableDebugEnabled()) return;
1287
28
  const bool ctrl_is_valid_for_comparison =
1288
28
      ctrl == nullptr || ctrl == DefaultIterControl() || IsFull(*ctrl);
1289
28
  if (SwisstableGenerationsEnabled()) {
1290
0
    if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1291
0
      ABSL_RAW_LOG(FATAL,
1292
0
                   "Invalid iterator comparison. The table could have rehashed "
1293
0
                   "or moved since this iterator was initialized.");
1294
0
    }
1295
0
    if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
1296
0
      ABSL_RAW_LOG(
1297
0
          FATAL, "Invalid iterator comparison. The element was likely erased.");
1298
0
    }
1299
28
  } else {
1300
28
    ABSL_HARDENING_ASSERT_SLOW(
1301
28
        ctrl_is_valid_for_comparison &&
1302
28
        "Invalid iterator comparison. The element might have been erased or "
1303
28
        "the table might have rehashed. Consider running with --config=asan to "
1304
28
        "diagnose rehashing issues.");
1305
28
  }
1306
28
}
1307
1308
// If the two iterators come from the same container, then their pointers will
1309
// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1310
// Note: we take slots by reference so that it's not UB if they're uninitialized
1311
// as long as we don't read them (when ctrl is null).
1312
inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1313
                                      const ctrl_t* ctrl_b,
1314
                                      const void* const& slot_a,
1315
14
                                      const void* const& slot_b) {
1316
  // If either control byte is null, then we can't tell.
1317
14
  if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1318
14
  const bool a_is_soo = IsSooControl(ctrl_a);
1319
14
  if (a_is_soo != IsSooControl(ctrl_b)) return false;
1320
14
  if (a_is_soo) return slot_a == slot_b;
1321
1322
14
  const void* low_slot = slot_a;
1323
14
  const void* hi_slot = slot_b;
1324
14
  if (ctrl_a > ctrl_b) {
1325
0
    std::swap(ctrl_a, ctrl_b);
1326
0
    std::swap(low_slot, hi_slot);
1327
0
  }
1328
14
  return ctrl_b < low_slot && low_slot <= hi_slot;
1329
14
}
1330
1331
// Asserts that two iterators come from the same container.
1332
// Note: we take slots by reference so that it's not UB if they're uninitialized
1333
// as long as we don't read them (when ctrl is null).
1334
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1335
                                const void* const& slot_a,
1336
                                const void* const& slot_b,
1337
                                const GenerationType* generation_ptr_a,
1338
14
                                const GenerationType* generation_ptr_b) {
1339
14
  if (!SwisstableDebugEnabled()) return;
1340
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1341
  // enabled. To minimize their impact in those builds:
1342
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1343
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1344
  //   the chances that the hot paths will be inlined.
1345
1346
  // fail_if(is_invalid, message) crashes when is_invalid is true and provides
1347
  // an error message based on `message`.
1348
14
  const auto fail_if = [](bool is_invalid, const char* message) {
1349
14
    if (ABSL_PREDICT_FALSE(is_invalid)) {
1350
0
      ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
1351
0
    }
1352
14
  };
1353
1354
14
  const bool a_is_default = ctrl_a == DefaultIterControl();
1355
14
  const bool b_is_default = ctrl_b == DefaultIterControl();
1356
14
  if (a_is_default && b_is_default) return;
1357
14
  fail_if(a_is_default != b_is_default,
1358
14
          "Comparing default-constructed hashtable iterator with a "
1359
14
          "non-default-constructed hashtable iterator.");
1360
1361
14
  if (SwisstableGenerationsEnabled()) {
1362
0
    if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
1363
0
    const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1364
0
    const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1365
0
    fail_if(a_is_empty != b_is_empty,
1366
0
            "Comparing an iterator from an empty hashtable with an iterator "
1367
0
            "from a non-empty hashtable.");
1368
0
    fail_if(a_is_empty && b_is_empty,
1369
0
            "Comparing iterators from different empty hashtables.");
1370
1371
0
    const bool a_is_end = ctrl_a == nullptr;
1372
0
    const bool b_is_end = ctrl_b == nullptr;
1373
0
    fail_if(a_is_end || b_is_end,
1374
0
            "Comparing iterator with an end() iterator from a different "
1375
0
            "hashtable.");
1376
0
    fail_if(true, "Comparing non-end() iterators from different hashtables.");
1377
14
  } else {
1378
14
    ABSL_HARDENING_ASSERT_SLOW(
1379
14
        AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1380
14
        "Invalid iterator comparison. The iterators may be from different "
1381
14
        "containers or the container might have rehashed or moved. Consider "
1382
14
        "running with --config=asan to diagnose issues.");
1383
14
  }
1384
14
}
1385
1386
struct FindInfo {
1387
  size_t offset;
1388
  size_t probe_length;
1389
};
1390
1391
// Whether a table fits entirely into a probing group.
1392
// Arbitrary order of elements in such tables is correct.
1393
12
constexpr bool is_single_group(size_t capacity) {
1394
12
  return capacity <= Group::kWidth;
1395
12
}
1396
1397
// Begins a probing operation on `common.control`, using `hash`.
1398
26
inline probe_seq<Group::kWidth> probe_h1(size_t capacity, size_t h1) {
1399
26
  return probe_seq<Group::kWidth>(h1, capacity);
1400
26
}
1401
26
inline probe_seq<Group::kWidth> probe(size_t capacity, size_t hash) {
1402
26
  return probe_h1(capacity, H1(hash));
1403
26
}
1404
26
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1405
26
  return probe(common.capacity(), hash);
1406
26
}
1407
1408
constexpr size_t kProbedElementIndexSentinel = ~size_t{};
1409
1410
// Implementation detail of transfer_unprobed_elements_to_next_capacity_fn.
1411
// Tries to find the new index for an element whose hash corresponds to
1412
// `h1` for growth to the next capacity.
1413
// Returns kProbedElementIndexSentinel if full probing is required.
1414
//
1415
// If element is located in the first probing group in the table before growth,
1416
// returns one of two positions: `old_index` or `old_index + old_capacity + 1`.
1417
//
1418
// Otherwise, we will try to insert it into the first probe group of the new
1419
// table. We only attempt to do so if the first probe group is already
1420
// initialized.
1421
template <typename = void>
1422
inline size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
1423
                                            size_t old_capacity,
1424
                                            ctrl_t* new_ctrl,
1425
0
                                            size_t new_capacity) {
1426
0
  size_t index_diff = old_index - h1;
1427
  // The first probe group starts with h1 & capacity.
1428
  // All following groups start at (h1 + Group::kWidth * K) & capacity.
1429
  // We can find an index within the floating group as index_diff modulo
1430
  // Group::kWidth.
1431
  // Both old and new capacity are larger than Group::kWidth so we can avoid
1432
  // computing `& capacity`.
1433
0
  size_t in_floating_group_index = index_diff & (Group::kWidth - 1);
1434
  // By subtracting we will get the difference between the first probe group
1435
  // and the probe group corresponding to old_index.
1436
0
  index_diff -= in_floating_group_index;
1437
0
  if (ABSL_PREDICT_TRUE((index_diff & old_capacity) == 0)) {
1438
0
    size_t new_index = (h1 + in_floating_group_index) & new_capacity;
1439
0
    ABSL_ASSUME(new_index != kProbedElementIndexSentinel);
1440
0
    return new_index;
1441
0
  }
1442
0
  ABSL_SWISSTABLE_ASSERT(((old_index - h1) & old_capacity) >= Group::kWidth);
1443
  // Try to insert element into the first probe group.
1444
  // new_ctrl is not yet fully initialized so we can't use regular search via
1445
  // find_first_non_full.
1446
1447
  // We can search in the first probe group only if it is located in already
1448
  // initialized part of the table.
1449
0
  if (ABSL_PREDICT_FALSE((h1 & old_capacity) >= old_index)) {
1450
0
    return kProbedElementIndexSentinel;
1451
0
  }
1452
0
  size_t offset = h1 & new_capacity;
1453
0
  Group new_g(new_ctrl + offset);
1454
0
  if (auto mask = new_g.MaskNonFull(); ABSL_PREDICT_TRUE(mask)) {
1455
0
    size_t result = offset + mask.LowestBitSet();
1456
0
    ABSL_ASSUME(result != kProbedElementIndexSentinel);
1457
0
    return result;
1458
0
  }
1459
0
  return kProbedElementIndexSentinel;
1460
0
}
1461
1462
// Extern template for inline function keeps possibility of inlining.
1463
// When compiler decided to not inline, no symbols will be added to the
1464
// corresponding translation unit.
1465
extern template size_t TryFindNewIndexWithoutProbing(size_t h1,
1466
                                                     size_t old_index,
1467
                                                     size_t old_capacity,
1468
                                                     ctrl_t* new_ctrl,
1469
                                                     size_t new_capacity);
1470
1471
// Sets sanitizer poisoning for slot corresponding to control byte being set.
1472
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
1473
12
                                size_t slot_size) {
1474
12
  ABSL_SWISSTABLE_ASSERT(i < c.capacity());
1475
12
  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
1476
12
  if (IsFull(h)) {
1477
12
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1478
12
  } else {
1479
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
1480
0
  }
1481
12
}
1482
1483
// Sets `ctrl[i]` to `h`.
1484
//
1485
// Unlike setting it directly, this function will perform bounds checks and
1486
// mirror the value to the cloned tail if necessary.
1487
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
1488
8
                    size_t slot_size) {
1489
8
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
1490
8
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
1491
8
  ctrl_t* ctrl = c.control();
1492
8
  ctrl[i] = h;
1493
8
  ctrl[((i - NumClonedBytes()) & c.capacity()) +
1494
8
       (NumClonedBytes() & c.capacity())] = h;
1495
8
}
1496
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1497
8
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
1498
8
  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
1499
8
}
1500
1501
// Like SetCtrl, but in a single group table, we can save some operations when
1502
// setting the cloned control byte.
1503
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
1504
4
                                      size_t slot_size) {
1505
4
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
1506
4
  ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
1507
4
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
1508
4
  ctrl_t* ctrl = c.control();
1509
4
  ctrl[i] = h;
1510
4
  ctrl[i + c.capacity() + 1] = h;
1511
4
}
1512
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1513
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
1514
4
                                      size_t slot_size) {
1515
4
  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
1516
4
}
1517
1518
// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
1519
// we can save some operations when setting the cloned control byte.
1520
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
1521
0
                                size_t slot_size) {
1522
0
  ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
1523
0
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
1524
0
  ctrl_t* ctrl = c.control();
1525
0
  ctrl[i] = h;
1526
0
  ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
1527
0
}
1528
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1529
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
1530
0
                                size_t slot_size) {
1531
0
  SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
1532
0
}
1533
1534
// growth_info (which is a size_t) is stored with the backing array.
1535
0
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1536
0
  return (std::max)(align_of_slot, alignof(GrowthInfo));
1537
0
}
1538
1539
// Returns the address of the ith slot in slots where each slot occupies
1540
// slot_size.
1541
2
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
1542
2
  return static_cast<void*>(static_cast<char*>(slot_array) +
1543
2
                            (slot * slot_size));
1544
2
}
1545
1546
// Iterates over all full slots and calls `cb(const ctrl_t*, void*)`.
1547
// No insertion to the table is allowed during `cb` call.
1548
// Erasure is allowed only for the element passed to the callback.
1549
// The table must not be in SOO mode.
1550
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
1551
                          absl::FunctionRef<void(const ctrl_t*, void*)> cb);
1552
1553
template <typename CharAlloc>
1554
0
constexpr bool ShouldSampleHashtablezInfoForAlloc() {
1555
0
  // Folks with custom allocators often make unwarranted assumptions about the
1556
0
  // behavior of their classes vis-a-vis trivial destructability and what
1557
0
  // calls they will or won't make.  Avoid sampling for people with custom
1558
0
  // allocators to get us out of this mess.  This is not a hard guarantee but
1559
0
  // a workaround while we plan the exact guarantee we want to provide.
1560
0
  return std::is_same_v<CharAlloc, std::allocator<char>>;
1561
0
}
1562
1563
// Allocates `n` bytes for a backing array.
1564
template <size_t AlignOfBackingArray, typename Alloc>
1565
8
ABSL_ATTRIBUTE_NOINLINE void* AllocateBackingArray(void* alloc, size_t n) {
1566
8
  return Allocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), n);
1567
8
}
1568
1569
template <size_t AlignOfBackingArray, typename Alloc>
1570
ABSL_ATTRIBUTE_NOINLINE void DeallocateBackingArray(
1571
    void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
1572
6
    size_t slot_align, bool had_infoz) {
1573
6
  RawHashSetLayout layout(capacity, slot_size, slot_align, had_infoz);
1574
6
  void* backing_array = ctrl - layout.control_offset();
1575
  // Unpoison before returning the memory to the allocator.
1576
6
  SanitizerUnpoisonMemoryRegion(backing_array, layout.alloc_size());
1577
6
  Deallocate<AlignOfBackingArray>(static_cast<Alloc*>(alloc), backing_array,
1578
6
                                  layout.alloc_size());
1579
6
}
1580
1581
// PolicyFunctions bundles together some information for a particular
1582
// raw_hash_set<T, ...> instantiation. This information is passed to
1583
// type-erased functions that want to do small amounts of type-specific
1584
// work.
1585
struct PolicyFunctions {
1586
  uint32_t key_size;
1587
  uint32_t value_size;
1588
  uint32_t slot_size;
1589
  uint16_t slot_align;
1590
  bool soo_enabled;
1591
  bool is_hashtablez_eligible;
1592
1593
  // Returns the pointer to the hash function stored in the set.
1594
  void* (*hash_fn)(CommonFields& common);
1595
1596
  // Returns the hash of the pointed-to slot.
1597
  HashSlotFn hash_slot;
1598
1599
  // Transfers the contents of `count` slots from src_slot to dst_slot.
1600
  // We use ability to transfer several slots in single group table growth.
1601
  void (*transfer_n)(void* set, void* dst_slot, void* src_slot, size_t count);
1602
1603
  // Returns the pointer to the CharAlloc stored in the set.
1604
  void* (*get_char_alloc)(CommonFields& common);
1605
1606
  // Allocates n bytes for the backing store for common.
1607
  void* (*alloc)(void* alloc, size_t n);
1608
1609
  // Deallocates the backing store from common.
1610
  void (*dealloc)(void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
1611
                  size_t slot_align, bool had_infoz);
1612
1613
  // Implementation detail of GrowToNextCapacity.
1614
  // Iterates over all full slots and transfers unprobed elements.
1615
  // Initializes the new control bytes except mirrored bytes and kSentinel.
1616
  // Caller must finish the initialization.
1617
  // All slots corresponding to the full control bytes are transferred.
1618
  // Probed elements are reported by `encode_probed_element` callback.
1619
  // encode_probed_element may overwrite old_ctrl buffer till source_offset.
1620
  // Different encoding is used depending on the capacity of the table.
1621
  // See ProbedItem*Bytes classes for details.
1622
  void (*transfer_unprobed_elements_to_next_capacity)(
1623
      CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
1624
      // TODO(b/382423690): Try to use absl::FunctionRef here.
1625
      void* probed_storage,
1626
      void (*encode_probed_element)(void* probed_storage, h2_t h2,
1627
                                    size_t source_offset, size_t h1));
1628
1629
8
  uint8_t soo_capacity() const {
1630
8
    return static_cast<uint8_t>(soo_enabled ? SooCapacity() : 0);
1631
8
  }
1632
};
1633
1634
// Returns the maximum valid size for a table with 1-byte slots.
1635
// This function is an utility shared by MaxValidSize and IsAboveValidSize.
1636
// Template parameter is only used to enable testing.
1637
template <size_t kSizeOfSizeT = sizeof(size_t)>
1638
0
constexpr size_t MaxValidSizeFor1ByteSlot() {
1639
0
  if constexpr (kSizeOfSizeT == 8) {
1640
0
    return CapacityToGrowth(
1641
0
        static_cast<size_t>(uint64_t{1} << HashtableSize::kSizeBitCount) - 1);
1642
  } else {
1643
    static_assert(kSizeOfSizeT == 4);
1644
    return CapacityToGrowth((size_t{1} << (kSizeOfSizeT * 8 - 2)) - 1);
1645
  }
1646
0
}
1647
1648
// Returns the maximum valid size for a table with provided slot size.
1649
// Template parameter is only used to enable testing.
1650
template <size_t kSizeOfSizeT = sizeof(size_t)>
1651
0
constexpr size_t MaxValidSize(size_t slot_size) {
1652
0
  if constexpr (kSizeOfSizeT == 8) {
1653
    // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
1654
0
    if (slot_size < size_t{1} << (64 - HashtableSize::kSizeBitCount)) {
1655
0
      return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
1656
0
    }
1657
0
    return (size_t{1} << (kSizeOfSizeT * 8 - 2)) / slot_size;
1658
  } else {
1659
    return MaxValidSizeFor1ByteSlot<kSizeOfSizeT>() / slot_size;
1660
  }
1661
0
}
1662
1663
// Returns true if size is larger than the maximum valid size.
1664
// It is an optimization to avoid the division operation in the common case.
1665
// Template parameter is only used to enable testing.
1666
template <size_t kSizeOfSizeT = sizeof(size_t)>
1667
0
constexpr bool IsAboveValidSize(size_t size, size_t slot_size) {
1668
0
  if constexpr (kSizeOfSizeT == 8) {
1669
    // For small slot sizes we are limited by HashtableSize::kSizeBitCount.
1670
0
    if (ABSL_PREDICT_TRUE(slot_size <
1671
0
                          (size_t{1} << (64 - HashtableSize::kSizeBitCount)))) {
1672
0
      return size > MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
1673
0
    }
1674
0
    return size > MaxValidSize<kSizeOfSizeT>(slot_size);
1675
  } else {
1676
    return uint64_t{size} * slot_size >
1677
           MaxValidSizeFor1ByteSlot<kSizeOfSizeT>();
1678
  }
1679
0
}
1680
1681
// Returns the index of the SOO slot when growing from SOO to non-SOO in a
1682
// single group. See also InitializeSmallControlBytesAfterSoo(). It's important
1683
// to use index 1 so that when resizing from capacity 1 to 3, we can still have
1684
// random iteration order between the first two inserted elements.
1685
// I.e. it allows inserting the second element at either index 0 or 2.
1686
2
constexpr size_t SooSlotIndex() { return 1; }
1687
1688
// Maximum capacity for the algorithm for small table after SOO.
1689
// Note that typical size after SOO is 3, but we allow up to 7.
1690
// Allowing till 16 would require additional store that can be avoided.
1691
0
constexpr size_t MaxSmallAfterSooCapacity() { return 7; }
1692
1693
// Type erased version of raw_hash_set::reserve.
1694
// Requires: `new_size > policy.soo_capacity`.
1695
void ReserveTableToFitNewSize(CommonFields& common,
1696
                              const PolicyFunctions& policy, size_t new_size);
1697
1698
// Resizes empty non-allocated table to the next valid capacity after
1699
// `bucket_count`. Requires:
1700
//   1. `c.capacity() == policy.soo_capacity`.
1701
//   2. `c.empty()`.
1702
//   3. `new_size > policy.soo_capacity`.
1703
// The table will be attempted to be sampled.
1704
void ReserveEmptyNonAllocatedTableToFitBucketCount(
1705
    CommonFields& common, const PolicyFunctions& policy, size_t bucket_count);
1706
1707
// Type erased version of raw_hash_set::rehash.
1708
void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n);
1709
1710
// Type erased version of copy constructor.
1711
void Copy(CommonFields& common, const PolicyFunctions& policy,
1712
          const CommonFields& other,
1713
          absl::FunctionRef<void(void*, const void*)> copy_fn);
1714
1715
// Returns the optimal size for memcpy when transferring SOO slot.
1716
// Otherwise, returns the optimal size for memcpy SOO slot transfer
1717
// to SooSlotIndex().
1718
// At the destination we are allowed to copy upto twice more bytes,
1719
// because there is at least one more slot after SooSlotIndex().
1720
// The result must not exceed MaxSooSlotSize().
1721
// Some of the cases are merged to minimize the number of function
1722
// instantiations.
1723
constexpr size_t OptimalMemcpySizeForSooSlotTransfer(
1724
0
    size_t slot_size, size_t max_soo_slot_size = MaxSooSlotSize()) {
1725
0
  static_assert(MaxSooSlotSize() >= 8, "unexpectedly small SOO slot size");
1726
0
  if (slot_size == 1) {
1727
0
    return 1;
1728
0
  }
1729
0
  if (slot_size <= 3) {
1730
0
    return 4;
1731
0
  }
1732
0
  // We are merging 4 and 8 into one case because we expect them to be the
1733
0
  // hottest cases. Copying 8 bytes is as fast on common architectures.
1734
0
  if (slot_size <= 8) {
1735
0
    return 8;
1736
0
  }
1737
0
  if (max_soo_slot_size <= 16) {
1738
0
    return max_soo_slot_size;
1739
0
  }
1740
0
  if (slot_size <= 16) {
1741
0
    return 16;
1742
0
  }
1743
0
  if (max_soo_slot_size <= 24) {
1744
0
    return max_soo_slot_size;
1745
0
  }
1746
0
  static_assert(MaxSooSlotSize() <= 24, "unexpectedly large SOO slot size");
1747
0
  return 24;
1748
0
}
1749
1750
// Resizes SOO table to the NextCapacity(SooCapacity()) and prepares insert for
1751
// the given new_hash. Returns the offset of the new element.
1752
// All possible template combinations are defined in cc file to improve
1753
// compilation time.
1754
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
1755
size_t GrowSooTableToNextCapacityAndPrepareInsert(
1756
    CommonFields& common, const PolicyFunctions& policy,
1757
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling);
1758
1759
// PrepareInsert for small tables (is_small()==true).
1760
// Returns the new control and the new slot.
1761
// Hash is only computed if the table is sampled or grew to large size
1762
// (is_small()==false).
1763
std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(
1764
    CommonFields& common, const PolicyFunctions& policy,
1765
    absl::FunctionRef<size_t(size_t)> get_hash);
1766
1767
// Resizes table with allocated slots and change the table seed.
1768
// Tables with SOO enabled must have capacity > policy.soo_capacity.
1769
// No sampling will be performed since table is already allocated.
1770
void ResizeAllocatedTableWithSeedChange(CommonFields& common,
1771
                                        const PolicyFunctions& policy,
1772
                                        size_t new_capacity);
1773
1774
// ClearBackingArray clears the backing array, either modifying it in place,
1775
// or creating a new one based on the value of "reuse".
1776
// REQUIRES: c.capacity > 0
1777
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1778
                       void* alloc, bool reuse, bool soo_enabled);
1779
1780
// Type-erased versions of raw_hash_set::erase_meta_only_{small,large}.
1781
void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size);
1782
void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size);
1783
1784
// For trivially relocatable types we use memcpy directly. This allows us to
1785
// share the same function body for raw_hash_set instantiations that have the
1786
// same slot size as long as they are relocatable.
1787
// Separate function for relocating single slot cause significant binary bloat.
1788
template <size_t SizeOfSlot>
1789
ABSL_ATTRIBUTE_NOINLINE void TransferNRelocatable(void*, void* dst, void* src,
1790
6
                                                  size_t count) {
1791
  // TODO(b/382423690): Experiment with making specialization for power of 2 and
1792
  // non power of 2. This would require passing the size of the slot.
1793
6
  memcpy(dst, src, SizeOfSlot * count);
1794
6
}
1795
1796
// Returns a pointer to `common`. This is used to implement type erased
1797
// raw_hash_set::get_hash_ref_fn and raw_hash_set::get_alloc_ref_fn for the
1798
// empty class cases.
1799
void* GetRefForEmptyClass(CommonFields& common);
1800
1801
// Given the hash of a value not currently in the table and the first empty
1802
// slot in the probe sequence, finds a viable slot index to insert it at.
1803
//
1804
// In case there's no space left, the table can be resized or rehashed
1805
// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
1806
//
1807
// In the case of absence of deleted slots and positive growth_left, the element
1808
// can be inserted in the provided `target` position.
1809
//
1810
// When the table has deleted slots (according to GrowthInfo), the target
1811
// position will be searched one more time using `find_first_non_full`.
1812
//
1813
// REQUIRES: `!common.is_small()`.
1814
// REQUIRES: At least one non-full slot available.
1815
// REQUIRES: `target` is a valid empty position to insert.
1816
size_t PrepareInsertLarge(CommonFields& common, const PolicyFunctions& policy,
1817
                          size_t hash, FindInfo target);
1818
1819
// Same as above, but with generations enabled, we may end up changing the seed,
1820
// which means we need to be able to recompute the hash.
1821
size_t PrepareInsertLargeGenerationsEnabled(
1822
    CommonFields& common, const PolicyFunctions& policy, size_t hash,
1823
    FindInfo target, absl::FunctionRef<size_t(size_t)> recompute_hash);
1824
1825
// A SwissTable.
1826
//
1827
// Policy: a policy defines how to perform different operations on
1828
// the slots of the hashtable (see hash_policy_traits.h for the full interface
1829
// of policy).
1830
//
1831
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
1832
// functor should accept a key and return size_t as hash. For best performance
1833
// it is important that the hash function provides high entropy across all bits
1834
// of the hash.
1835
//
1836
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
1837
// should accept two (of possibly different type) keys and return a bool: true
1838
// if they are equal, false if they are not. If two keys compare equal, then
1839
// their hash values as defined by Hash MUST be equal.
1840
//
1841
// Allocator: an Allocator
1842
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
1843
// the storage of the hashtable will be allocated and the elements will be
1844
// constructed and destroyed.
1845
template <class Policy, class Hash, class Eq, class Alloc>
1846
class raw_hash_set {
1847
  using PolicyTraits = hash_policy_traits<Policy>;
1848
  using KeyArgImpl =
1849
      KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
1850
1851
 public:
1852
  using init_type = typename PolicyTraits::init_type;
1853
  using key_type = typename PolicyTraits::key_type;
1854
  using allocator_type = Alloc;
1855
  using size_type = size_t;
1856
  using difference_type = ptrdiff_t;
1857
  using hasher = Hash;
1858
  using key_equal = Eq;
1859
  using policy_type = Policy;
1860
  using value_type = typename PolicyTraits::value_type;
1861
  using reference = value_type&;
1862
  using const_reference = const value_type&;
1863
  using pointer = typename absl::allocator_traits<
1864
      allocator_type>::template rebind_traits<value_type>::pointer;
1865
  using const_pointer = typename absl::allocator_traits<
1866
      allocator_type>::template rebind_traits<value_type>::const_pointer;
1867
1868
 private:
1869
  // Alias used for heterogeneous lookup functions.
1870
  // `key_arg<K>` evaluates to `K` when the functors are transparent and to
1871
  // `key_type` otherwise. It permits template argument deduction on `K` for the
1872
  // transparent case.
1873
  template <class K>
1874
  using key_arg = typename KeyArgImpl::template type<K, key_type>;
1875
1876
  using slot_type = typename PolicyTraits::slot_type;
1877
1878
  constexpr static bool kIsDefaultHash =
1879
      std::is_same_v<hasher, absl::Hash<key_type>> ||
1880
      std::is_same_v<hasher, absl::container_internal::StringHash>;
1881
1882
  // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
1883
  // after CommonFields to allow inlining larger slot_types (e.g. std::string),
1884
  // but it's a bit complicated if we want to support incomplete mapped_type in
1885
  // flat_hash_map. We could potentially do this for flat_hash_set and for an
1886
  // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
1887
  // types, strings, cords, and pairs/tuples of allowlisted types.
1888
0
  constexpr static bool SooEnabled() {
1889
0
    return PolicyTraits::soo_enabled() &&
1890
0
           sizeof(slot_type) <= sizeof(HeapOrSoo) &&
1891
0
           alignof(slot_type) <= alignof(HeapOrSoo);
1892
0
  }
1893
1894
0
  constexpr static size_t DefaultCapacity() {
1895
0
    return SooEnabled() ? SooCapacity() : 0;
1896
0
  }
1897
1898
  // Whether `size` fits in the SOO capacity of this table.
1899
184
  bool fits_in_soo(size_t size) const {
1900
184
    return SooEnabled() && size <= SooCapacity();
1901
184
  }
1902
  // Whether this table is in SOO mode or non-SOO mode.
1903
184
  bool is_soo() const { return fits_in_soo(capacity()); }
1904
0
  bool is_full_soo() const { return is_soo() && !empty(); }
1905
1906
66
  bool is_small() const { return common().is_small(); }
1907
1908
  // Give an early error when key_type is not hashable/eq.
1909
  auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
1910
  auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
1911
1912
  // Try to be helpful when the hasher returns an unreasonable type.
1913
  using key_hash_result =
1914
      absl::remove_cvref_t<decltype(std::declval<const Hash&>()(
1915
          std::declval<const key_type&>()))>;
1916
  static_assert(sizeof(key_hash_result) >= sizeof(size_t),
1917
                "`Hash::operator()` should return a `size_t`");
1918
1919
  using AllocTraits = absl::allocator_traits<allocator_type>;
1920
  using SlotAlloc = typename absl::allocator_traits<
1921
      allocator_type>::template rebind_alloc<slot_type>;
1922
  // People are often sloppy with the exact type of their allocator (sometimes
1923
  // it has an extra const or is missing the pair, but rebinds made it work
1924
  // anyway).
1925
  using CharAlloc =
1926
      typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
1927
  using SlotAllocTraits = typename absl::allocator_traits<
1928
      allocator_type>::template rebind_traits<slot_type>;
1929
1930
  static_assert(std::is_lvalue_reference<reference>::value,
1931
                "Policy::element() must return a reference");
1932
1933
  // An enabler for insert(T&&): T must be convertible to init_type or be the
1934
  // same as [cv] value_type [ref].
1935
  template <class T>
1936
  using Insertable = absl::disjunction<
1937
      std::is_same<absl::remove_cvref_t<reference>, absl::remove_cvref_t<T>>,
1938
      std::is_convertible<T, init_type>>;
1939
  template <class T>
1940
  using IsNotBitField = std::is_pointer<T*>;
1941
1942
  // RequiresNotInit is a workaround for gcc prior to 7.1.
1943
  // See https://godbolt.org/g/Y4xsUh.
1944
  template <class T>
1945
  using RequiresNotInit =
1946
      typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1947
1948
  template <class... Ts>
1949
  using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1950
1951
  template <class T>
1952
  using IsDecomposableAndInsertable =
1953
      IsDecomposable<std::enable_if_t<Insertable<T>::value, T>>;
1954
1955
  // Evaluates to true if an assignment from the given type would require the
1956
  // source object to remain alive for the life of the element.
1957
  template <class U>
1958
  using IsLifetimeBoundAssignmentFrom = std::conditional_t<
1959
      policy_trait_element_is_owner<Policy>::value, std::false_type,
1960
      type_traits_internal::IsLifetimeBoundAssignment<init_type, U>>;
1961
1962
 public:
1963
  static_assert(std::is_same<pointer, value_type*>::value,
1964
                "Allocators with custom pointer types are not supported");
1965
  static_assert(std::is_same<const_pointer, const value_type*>::value,
1966
                "Allocators with custom pointer types are not supported");
1967
1968
  class iterator : private HashSetIteratorGenerationInfo {
1969
    friend class raw_hash_set;
1970
    friend struct HashtableFreeFunctionsAccess;
1971
1972
   public:
1973
    using iterator_category = std::forward_iterator_tag;
1974
    using value_type = typename raw_hash_set::value_type;
1975
    using reference =
1976
        absl::conditional_t<PolicyTraits::constant_iterators::value,
1977
                            const value_type&, value_type&>;
1978
    using pointer = absl::remove_reference_t<reference>*;
1979
    using difference_type = typename raw_hash_set::difference_type;
1980
1981
    iterator() {}
1982
1983
    // PRECONDITION: not an end() iterator.
1984
14
    reference operator*() const {
1985
14
      assert_is_full("operator*()");
1986
14
      return unchecked_deref();
1987
14
    }
1988
1989
    // PRECONDITION: not an end() iterator.
1990
0
    pointer operator->() const {
1991
0
      assert_is_full("operator->");
1992
0
      return &operator*();
1993
0
    }
1994
1995
    // PRECONDITION: not an end() iterator.
1996
0
    iterator& operator++() {
1997
0
      assert_is_full("operator++");
1998
0
      ++ctrl_;
1999
0
      ++slot_;
2000
0
      skip_empty_or_deleted();
2001
0
      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
2002
0
      return *this;
2003
0
    }
2004
    // PRECONDITION: not an end() iterator.
2005
    iterator operator++(int) {
2006
      auto tmp = *this;
2007
      ++*this;
2008
      return tmp;
2009
    }
2010
2011
14
    friend bool operator==(const iterator& a, const iterator& b) {
2012
14
      AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
2013
14
      AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
2014
14
      AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
2015
14
                          a.generation_ptr(), b.generation_ptr());
2016
14
      return a.ctrl_ == b.ctrl_;
2017
14
    }
2018
0
    friend bool operator!=(const iterator& a, const iterator& b) {
2019
0
      return !(a == b);
2020
0
    }
2021
2022
   private:
2023
    iterator(ctrl_t* ctrl, slot_type* slot,
2024
             const GenerationType* generation_ptr)
2025
30
        : HashSetIteratorGenerationInfo(generation_ptr),
2026
30
          ctrl_(ctrl),
2027
30
          slot_(slot) {
2028
      // This assumption helps the compiler know that any non-end iterator is
2029
      // not equal to any end iterator.
2030
30
      ABSL_ASSUME(ctrl != nullptr);
2031
30
    }
2032
    // This constructor is used in begin() to avoid an MSan
2033
    // use-of-uninitialized-value error. Delegating from this constructor to
2034
    // the previous one doesn't avoid the error.
2035
    iterator(ctrl_t* ctrl, MaybeInitializedPtr<void> slot,
2036
             const GenerationType* generation_ptr)
2037
0
        : HashSetIteratorGenerationInfo(generation_ptr),
2038
0
          ctrl_(ctrl),
2039
0
          slot_(to_slot(slot.get())) {
2040
      // This assumption helps the compiler know that any non-end iterator is
2041
      // not equal to any end iterator.
2042
0
      ABSL_ASSUME(ctrl != nullptr);
2043
0
    }
2044
    // For end() iterators.
2045
    explicit iterator(const GenerationType* generation_ptr)
2046
0
        : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
2047
2048
14
    void assert_is_full(const char* operation) const {
2049
14
      AssertIsFull(ctrl_, generation(), generation_ptr(), operation);
2050
14
    }
2051
2052
    // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
2053
    // `slot_` until they reach one.
2054
0
    void skip_empty_or_deleted() {
2055
0
      while (IsEmptyOrDeleted(*ctrl_)) {
2056
0
        ++ctrl_;
2057
0
        ++slot_;
2058
0
      }
2059
0
    }
2060
2061
    // An equality check which skips ABSL Hardening iterator invalidation
2062
    // checks.
2063
    // Should be used when the lifetimes of the iterators are well-enough
2064
    // understood to prove that they cannot be invalid.
2065
    bool unchecked_equals(const iterator& b) const {
2066
      return ctrl_ == b.control();
2067
    }
2068
2069
    // Dereferences the iterator without ABSL Hardening iterator invalidation
2070
    // checks.
2071
14
    reference unchecked_deref() const { return PolicyTraits::element(slot_); }
2072
2073
0
    ctrl_t* control() const { return ctrl_; }
2074
16
    slot_type* slot() const { return slot_; }
2075
2076
    // We use DefaultIterControl() for default-constructed iterators so that
2077
    // they can be distinguished from end iterators, which have nullptr ctrl_.
2078
    ctrl_t* ctrl_ = DefaultIterControl();
2079
    // To avoid uninitialized member warnings, put slot_ in an anonymous union.
2080
    // The member is not initialized on singleton and end iterators.
2081
    union {
2082
      slot_type* slot_;
2083
    };
2084
  };
2085
2086
  class const_iterator {
2087
    friend class raw_hash_set;
2088
    template <class Container, typename Enabler>
2089
    friend struct absl::container_internal::hashtable_debug_internal::
2090
        HashtableDebugAccess;
2091
2092
   public:
2093
    using iterator_category = typename iterator::iterator_category;
2094
    using value_type = typename raw_hash_set::value_type;
2095
    using reference = typename raw_hash_set::const_reference;
2096
    using pointer = typename raw_hash_set::const_pointer;
2097
    using difference_type = typename raw_hash_set::difference_type;
2098
2099
    const_iterator() = default;
2100
    // Implicit construction from iterator.
2101
28
    const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
2102
2103
    reference operator*() const { return *inner_; }
2104
    pointer operator->() const { return inner_.operator->(); }
2105
2106
    const_iterator& operator++() {
2107
      ++inner_;
2108
      return *this;
2109
    }
2110
    const_iterator operator++(int) { return inner_++; }
2111
2112
14
    friend bool operator==(const const_iterator& a, const const_iterator& b) {
2113
14
      return a.inner_ == b.inner_;
2114
14
    }
2115
    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
2116
      return !(a == b);
2117
    }
2118
2119
   private:
2120
    const_iterator(const ctrl_t* ctrl, const slot_type* slot,
2121
                   const GenerationType* gen)
2122
        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
2123
    }
2124
    bool unchecked_equals(const const_iterator& b) const {
2125
      return inner_.unchecked_equals(b.inner_);
2126
    }
2127
    ctrl_t* control() const { return inner_.control(); }
2128
    slot_type* slot() const { return inner_.slot(); }
2129
2130
    iterator inner_;
2131
  };
2132
2133
  using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
2134
  using insert_return_type = InsertReturnType<iterator, node_type>;
2135
2136
  // Note: can't use `= default` due to non-default noexcept (causes
2137
  // problems for some compilers). NOLINTNEXTLINE
2138
  raw_hash_set() noexcept(
2139
      std::is_nothrow_default_constructible<hasher>::value &&
2140
      std::is_nothrow_default_constructible<key_equal>::value &&
2141
2
      std::is_nothrow_default_constructible<allocator_type>::value) {}
2142
2143
  explicit raw_hash_set(
2144
      size_t bucket_count, const hasher& hash = hasher(),
2145
      const key_equal& eq = key_equal(),
2146
      const allocator_type& alloc = allocator_type())
2147
      : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
2148
                  alloc) {
2149
    if (bucket_count > DefaultCapacity()) {
2150
      ReserveEmptyNonAllocatedTableToFitBucketCount(
2151
          common(), GetPolicyFunctions(), bucket_count);
2152
    }
2153
  }
2154
2155
  raw_hash_set(size_t bucket_count, const hasher& hash,
2156
               const allocator_type& alloc)
2157
      : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
2158
2159
  raw_hash_set(size_t bucket_count, const allocator_type& alloc)
2160
      : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
2161
2162
  explicit raw_hash_set(const allocator_type& alloc)
2163
      : raw_hash_set(0, hasher(), key_equal(), alloc) {}
2164
2165
  template <class InputIter>
2166
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
2167
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2168
               const allocator_type& alloc = allocator_type())
2169
      : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
2170
                     hash, eq, alloc) {
2171
    insert(first, last);
2172
  }
2173
2174
  template <class InputIter>
2175
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2176
               const hasher& hash, const allocator_type& alloc)
2177
      : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
2178
2179
  template <class InputIter>
2180
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2181
               const allocator_type& alloc)
2182
      : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
2183
2184
  template <class InputIter>
2185
  raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
2186
      : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
2187
2188
  // Instead of accepting std::initializer_list<value_type> as the first
2189
  // argument like std::unordered_set<value_type> does, we have two overloads
2190
  // that accept std::initializer_list<T> and std::initializer_list<init_type>.
2191
  // This is advantageous for performance.
2192
  //
2193
  //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
2194
  //   // copies the strings into the set.
2195
  //   std::unordered_set<std::string> s = {"abc", "def"};
2196
  //
2197
  //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
2198
  //   // copies the strings into the set.
2199
  //   absl::flat_hash_set<std::string> s = {"abc", "def"};
2200
  //
2201
  // The same trick is used in insert().
2202
  //
2203
  // The enabler is necessary to prevent this constructor from triggering where
2204
  // the copy constructor is meant to be called.
2205
  //
2206
  //   absl::flat_hash_set<int> a, b{a};
2207
  //
2208
  // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
2209
  template <class T, RequiresNotInit<T> = 0,
2210
            std::enable_if_t<Insertable<T>::value, int> = 0>
2211
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
2212
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2213
               const allocator_type& alloc = allocator_type())
2214
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2215
2216
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
2217
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2218
               const allocator_type& alloc = allocator_type())
2219
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2220
2221
  template <class T, RequiresNotInit<T> = 0,
2222
            std::enable_if_t<Insertable<T>::value, int> = 0>
2223
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2224
               const hasher& hash, const allocator_type& alloc)
2225
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2226
2227
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2228
               const hasher& hash, const allocator_type& alloc)
2229
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2230
2231
  template <class T, RequiresNotInit<T> = 0,
2232
            std::enable_if_t<Insertable<T>::value, int> = 0>
2233
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2234
               const allocator_type& alloc)
2235
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2236
2237
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2238
               const allocator_type& alloc)
2239
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2240
2241
  template <class T, RequiresNotInit<T> = 0,
2242
            std::enable_if_t<Insertable<T>::value, int> = 0>
2243
  raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
2244
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2245
2246
  raw_hash_set(std::initializer_list<init_type> init,
2247
               const allocator_type& alloc)
2248
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2249
2250
  raw_hash_set(const raw_hash_set& that)
2251
      : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
2252
                               allocator_type(that.char_alloc_ref()))) {}
2253
2254
  raw_hash_set(const raw_hash_set& that, const allocator_type& a)
2255
      : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
2256
    that.AssertNotDebugCapacity();
2257
    if (that.empty()) return;
2258
    Copy(common(), GetPolicyFunctions(), that.common(),
2259
         [this](void* dst, const void* src) {
2260
           // TODO(b/413598253): type erase for trivially copyable types via
2261
           // PolicyTraits.
2262
           construct(to_slot(dst),
2263
                     PolicyTraits::element(
2264
                         static_cast<slot_type*>(const_cast<void*>(src))));
2265
         });
2266
  }
2267
2268
  ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
2269
      std::is_nothrow_copy_constructible<hasher>::value &&
2270
      std::is_nothrow_copy_constructible<key_equal>::value &&
2271
      std::is_nothrow_copy_constructible<allocator_type>::value)
2272
      :  // Hash, equality and allocator are copied instead of moved because
2273
         // `that` must be left valid. If Hash is std::function<Key>, moving it
2274
         // would create a nullptr functor that cannot be called.
2275
         // Note: we avoid using exchange for better generated code.
2276
        settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
2277
                      ? std::move(that.common())
2278
                      : CommonFields{full_soo_tag_t{}},
2279
                  that.hash_ref(), that.eq_ref(), that.char_alloc_ref()) {
2280
    if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
2281
      transfer(soo_slot(), that.soo_slot());
2282
    }
2283
    that.common() = CommonFields::CreateDefault<SooEnabled()>();
2284
    annotate_for_bug_detection_on_move(that);
2285
  }
2286
2287
  raw_hash_set(raw_hash_set&& that, const allocator_type& a)
2288
      : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
2289
                  that.eq_ref(), a) {
2290
    if (CharAlloc(a) == that.char_alloc_ref()) {
2291
      swap_common(that);
2292
      annotate_for_bug_detection_on_move(that);
2293
    } else {
2294
      move_elements_allocs_unequal(std::move(that));
2295
    }
2296
  }
2297
2298
  raw_hash_set& operator=(const raw_hash_set& that) {
2299
    that.AssertNotDebugCapacity();
2300
    if (ABSL_PREDICT_FALSE(this == &that)) return *this;
2301
    constexpr bool propagate_alloc =
2302
        AllocTraits::propagate_on_container_copy_assignment::value;
2303
    // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
2304
    // is an exact match for that.size(). If this->capacity() is too big, then
2305
    // it would make iteration very slow to reuse the allocation. Maybe we can
2306
    // do the same heuristic as clear() and reuse if it's small enough.
2307
    allocator_type alloc(propagate_alloc ? that.char_alloc_ref()
2308
                                         : char_alloc_ref());
2309
    raw_hash_set tmp(that, alloc);
2310
    // NOLINTNEXTLINE: not returning *this for performance.
2311
    return assign_impl<propagate_alloc>(std::move(tmp));
2312
  }
2313
2314
  raw_hash_set& operator=(raw_hash_set&& that) noexcept(
2315
      AllocTraits::is_always_equal::value &&
2316
      std::is_nothrow_move_assignable<hasher>::value &&
2317
      std::is_nothrow_move_assignable<key_equal>::value) {
2318
    // TODO(sbenza): We should only use the operations from the noexcept clause
2319
    // to make sure we actually adhere to that contract.
2320
    // NOLINTNEXTLINE: not returning *this for performance.
2321
    return move_assign(
2322
        std::move(that),
2323
        typename AllocTraits::propagate_on_container_move_assignment());
2324
  }
2325
2326
0
  ~raw_hash_set() {
2327
0
    destructor_impl();
2328
0
    if constexpr (SwisstableAssertAccessToDestroyedTable()) {
2329
0
      common().set_capacity(InvalidCapacity::kDestroyed);
2330
0
    }
2331
0
  }
2332
2333
0
  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2334
0
    if (ABSL_PREDICT_FALSE(empty())) return end();
2335
0
    if (is_small()) return single_iterator();
2336
0
    iterator it = {control(), common().slots_union(),
2337
0
                   common().generation_ptr()};
2338
0
    it.skip_empty_or_deleted();
2339
0
    ABSL_SWISSTABLE_ASSERT(IsFull(*it.control()));
2340
0
    return it;
2341
0
  }
2342
0
  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2343
0
    AssertNotDebugCapacity();
2344
0
    return iterator(common().generation_ptr());
2345
0
  }
2346
2347
  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2348
    return const_cast<raw_hash_set*>(this)->begin();
2349
  }
2350
  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2351
    return const_cast<raw_hash_set*>(this)->end();
2352
  }
2353
  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2354
    return begin();
2355
  }
2356
  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
2357
2358
4
  bool empty() const { return !size(); }
2359
4
  size_t size() const {
2360
4
    AssertNotDebugCapacity();
2361
4
    return common().size();
2362
4
  }
2363
218
  size_t capacity() const {
2364
218
    const size_t cap = common().capacity();
2365
    // Compiler complains when using functions in ASSUME so use local variable.
2366
218
    [[maybe_unused]] static constexpr size_t kDefaultCapacity =
2367
218
        DefaultCapacity();
2368
218
    ABSL_ASSUME(cap >= kDefaultCapacity);
2369
218
    return cap;
2370
218
  }
2371
  size_t max_size() const { return MaxValidSize(sizeof(slot_type)); }
2372
2373
0
  ABSL_ATTRIBUTE_REINITIALIZES void clear() {
2374
0
    if (SwisstableGenerationsEnabled() &&
2375
0
        capacity() >= InvalidCapacity::kMovedFrom) {
2376
0
      common().set_capacity(DefaultCapacity());
2377
0
    }
2378
0
    AssertNotDebugCapacity();
2379
    // Iterating over this container is O(bucket_count()). When bucket_count()
2380
    // is much greater than size(), iteration becomes prohibitively expensive.
2381
    // For clear() it is more important to reuse the allocated array when the
2382
    // container is small because allocation takes comparatively long time
2383
    // compared to destruction of the elements of the container. So we pick the
2384
    // largest bucket_count() threshold for which iteration is still fast and
2385
    // past that we simply deallocate the array.
2386
0
    const size_t cap = capacity();
2387
0
    if (cap == 0) {
2388
      // Already guaranteed to be empty; so nothing to do.
2389
0
    } else if (is_small()) {
2390
0
      if (!empty()) {
2391
0
        destroy(single_slot());
2392
0
        decrement_small_size();
2393
0
      }
2394
0
    } else {
2395
0
      destroy_slots();
2396
0
      clear_backing_array(/*reuse=*/cap < 128);
2397
0
    }
2398
0
    common().set_reserved_growth(0);
2399
0
    common().set_reservation_size(0);
2400
0
  }
2401
2402
  // This overload kicks in when the argument is an rvalue of insertable and
2403
  // decomposable type other than init_type.
2404
  //
2405
  //   flat_hash_map<std::string, int> m;
2406
  //   m.insert(std::make_pair("abc", 42));
2407
  template <class T,
2408
            int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
2409
                                       IsNotBitField<T>::value &&
2410
                                       !IsLifetimeBoundAssignmentFrom<T>::value,
2411
                                   int>()>
2412
16
  std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2413
16
    return emplace(std::forward<T>(value));
2414
16
  }
2415
2416
  template <class T, int&...,
2417
            std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
2418
                                 IsNotBitField<T>::value &&
2419
                                 IsLifetimeBoundAssignmentFrom<T>::value,
2420
                             int> = 0>
2421
  std::pair<iterator, bool> insert(
2422
      T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
2423
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2424
    return this->template insert<T, 0>(std::forward<T>(value));
2425
  }
2426
2427
  // This overload kicks in when the argument is a bitfield or an lvalue of
2428
  // insertable and decomposable type.
2429
  //
2430
  //   union { int n : 1; };
2431
  //   flat_hash_set<int> s;
2432
  //   s.insert(n);
2433
  //
2434
  //   flat_hash_set<std::string> s;
2435
  //   const char* p = "hello";
2436
  //   s.insert(p);
2437
  //
2438
  template <class T, int = std::enable_if_t<
2439
                         IsDecomposableAndInsertable<const T&>::value &&
2440
                             !IsLifetimeBoundAssignmentFrom<const T&>::value,
2441
                         int>()>
2442
  std::pair<iterator, bool> insert(const T& value)
2443
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2444
    return emplace(value);
2445
  }
2446
  template <class T, int&...,
2447
            std::enable_if_t<IsDecomposableAndInsertable<const T&>::value &&
2448
                                 IsLifetimeBoundAssignmentFrom<const T&>::value,
2449
                             int> = 0>
2450
  std::pair<iterator, bool> insert(
2451
      const T& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
2452
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2453
    return this->template insert<T, 0>(value);
2454
  }
2455
2456
  // This overload kicks in when the argument is an rvalue of init_type. Its
2457
  // purpose is to handle brace-init-list arguments.
2458
  //
2459
  //   flat_hash_map<std::string, int> s;
2460
  //   s.insert({"abc", 42});
2461
  std::pair<iterator, bool> insert(init_type&& value)
2462
      ABSL_ATTRIBUTE_LIFETIME_BOUND
2463
#if __cplusplus >= 202002L
2464
    requires(!IsLifetimeBoundAssignmentFrom<init_type>::value)
2465
#endif
2466
0
  {
2467
0
    return emplace(std::move(value));
2468
0
  }
2469
#if __cplusplus >= 202002L
2470
  std::pair<iterator, bool> insert(
2471
      init_type&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
2472
      ABSL_ATTRIBUTE_LIFETIME_BOUND
2473
    requires(IsLifetimeBoundAssignmentFrom<init_type>::value)
2474
  {
2475
    return emplace(std::move(value));
2476
  }
2477
#endif
2478
2479
  template <class T,
2480
            int = std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
2481
                                       IsNotBitField<T>::value &&
2482
                                       !IsLifetimeBoundAssignmentFrom<T>::value,
2483
                                   int>()>
2484
  iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2485
    return insert(std::forward<T>(value)).first;
2486
  }
2487
  template <class T, int&...,
2488
            std::enable_if_t<IsDecomposableAndInsertable<T>::value &&
2489
                                 IsNotBitField<T>::value &&
2490
                                 IsLifetimeBoundAssignmentFrom<T>::value,
2491
                             int> = 0>
2492
  iterator insert(const_iterator hint,
2493
                  T&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this))
2494
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2495
    return this->template insert<T, 0>(hint, std::forward<T>(value));
2496
  }
2497
2498
  template <class T, std::enable_if_t<
2499
                         IsDecomposableAndInsertable<const T&>::value, int> = 0>
2500
  iterator insert(const_iterator,
2501
                  const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2502
    return insert(value).first;
2503
  }
2504
2505
  iterator insert(const_iterator,
2506
                  init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2507
    return insert(std::move(value)).first;
2508
  }
2509
2510
  template <class InputIt>
2511
  void insert(InputIt first, InputIt last) {
2512
    for (; first != last; ++first) emplace(*first);
2513
  }
2514
2515
  template <class T, RequiresNotInit<T> = 0,
2516
            std::enable_if_t<Insertable<const T&>::value, int> = 0>
2517
  void insert(std::initializer_list<T> ilist) {
2518
    insert(ilist.begin(), ilist.end());
2519
  }
2520
2521
  void insert(std::initializer_list<init_type> ilist) {
2522
    insert(ilist.begin(), ilist.end());
2523
  }
2524
2525
  insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2526
    if (!node) return {end(), false, node_type()};
2527
    const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
2528
    auto res = PolicyTraits::apply(
2529
        InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
2530
        elem);
2531
    if (res.second) {
2532
      CommonAccess::Reset(&node);
2533
      return {res.first, true, node_type()};
2534
    } else {
2535
      return {res.first, false, std::move(node)};
2536
    }
2537
  }
2538
2539
  iterator insert(const_iterator,
2540
                  node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2541
    auto res = insert(std::move(node));
2542
    node = std::move(res.node);
2543
    return res.position;
2544
  }
2545
2546
  // This overload kicks in if we can deduce the key from args. This enables us
2547
  // to avoid constructing value_type if an entry with the same key already
2548
  // exists.
2549
  //
2550
  // For example:
2551
  //
2552
  //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
2553
  //   // Creates no std::string copies and makes no heap allocations.
2554
  //   m.emplace("abc", "xyz");
2555
  template <class... Args,
2556
            std::enable_if_t<IsDecomposable<Args...>::value, int> = 0>
2557
  std::pair<iterator, bool> emplace(Args&&... args)
2558
16
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2559
16
    return PolicyTraits::apply(EmplaceDecomposable{*this},
2560
16
                               std::forward<Args>(args)...);
2561
16
  }
_ZN4absl18container_internal12raw_hash_setINS0_17FlatHashMapPolicyINSt3__117basic_string_viewIcNS3_11char_traitsIcEEEEPNS_15CommandLineFlagEEENS0_10StringHashENS0_8StringEqENS3_9allocatorINS3_4pairIKS7_S9_EEEEE7emplaceIJSG_ETnNS3_9enable_ifIXsr14IsDecomposableIDpT_EE5valueEiE4typeELi0EEENSE_INSI_8iteratorEbEEDpOSL_
Line
Count
Source
2558
16
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2559
16
    return PolicyTraits::apply(EmplaceDecomposable{*this},
2560
16
                               std::forward<Args>(args)...);
2561
16
  }
Unexecuted instantiation: _ZN4absl18container_internal12raw_hash_setINS0_17FlatHashMapPolicyINSt3__117basic_string_viewIcNS3_11char_traitsIcEEEEPNS_15CommandLineFlagEEENS0_10StringHashENS0_8StringEqENS3_9allocatorINS3_4pairIKS7_S9_EEEEE7emplaceIJNSE_IS7_S9_EEETnNS3_9enable_ifIXsr14IsDecomposableIDpT_EE5valueEiE4typeELi0EEENSE_INSI_8iteratorEbEEDpOSM_
2562
2563
  // This overload kicks in if we cannot deduce the key from args. It constructs
2564
  // value_type unconditionally and then either moves it into the table or
2565
  // destroys.
2566
  template <class... Args,
2567
            std::enable_if_t<!IsDecomposable<Args...>::value, int> = 0>
2568
  std::pair<iterator, bool> emplace(Args&&... args)
2569
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2570
    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2571
    slot_type* slot = to_slot(&raw);
2572
2573
    construct(slot, std::forward<Args>(args)...);
2574
    const auto& elem = PolicyTraits::element(slot);
2575
    return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
2576
  }
2577
2578
  template <class... Args>
2579
  iterator emplace_hint(const_iterator,
2580
                        Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2581
    return emplace(std::forward<Args>(args)...).first;
2582
  }
2583
2584
  // Extension API: support for lazy emplace.
2585
  //
2586
  // Looks up key in the table. If found, returns the iterator to the element.
2587
  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
2588
  // and returns an iterator to the new element.
2589
  //
2590
  // `f` must abide by several restrictions:
2591
  //  - it MUST call `raw_hash_set::constructor` with arguments as if a
2592
  //    `raw_hash_set::value_type` is constructed,
2593
  //  - it MUST NOT access the container before the call to
2594
  //    `raw_hash_set::constructor`, and
2595
  //  - it MUST NOT erase the lazily emplaced element.
2596
  // Doing any of these is undefined behavior.
2597
  //
2598
  // For example:
2599
  //
2600
  //   std::unordered_set<ArenaString> s;
2601
  //   // Makes ArenaStr even if "abc" is in the map.
2602
  //   s.insert(ArenaString(&arena, "abc"));
2603
  //
2604
  //   flat_hash_set<ArenaStr> s;
2605
  //   // Makes ArenaStr only if "abc" is not in the map.
2606
  //   s.lazy_emplace("abc", [&](const constructor& ctor) {
2607
  //     ctor(&arena, "abc");
2608
  //   });
2609
  //
2610
  // WARNING: This API is currently experimental. If there is a way to implement
2611
  // the same thing with the rest of the API, prefer that.
2612
  class constructor {
2613
    friend class raw_hash_set;
2614
2615
   public:
2616
    template <class... Args>
2617
    void operator()(Args&&... args) const {
2618
      ABSL_SWISSTABLE_ASSERT(*slot_);
2619
      PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
2620
      *slot_ = nullptr;
2621
    }
2622
2623
   private:
2624
    constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
2625
2626
    allocator_type* alloc_;
2627
    slot_type** slot_;
2628
  };
2629
2630
  template <class K = key_type, class F>
2631
  iterator lazy_emplace(const key_arg<K>& key,
2632
                        F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2633
    auto res = find_or_prepare_insert(key);
2634
    if (res.second) {
2635
      slot_type* slot = res.first.slot();
2636
      allocator_type alloc(char_alloc_ref());
2637
      std::forward<F>(f)(constructor(&alloc, &slot));
2638
      ABSL_SWISSTABLE_ASSERT(!slot);
2639
    }
2640
    return res.first;
2641
  }
2642
2643
  // Extension API: support for heterogeneous keys.
2644
  //
2645
  //   std::unordered_set<std::string> s;
2646
  //   // Turns "abc" into std::string.
2647
  //   s.erase("abc");
2648
  //
2649
  //   flat_hash_set<std::string> s;
2650
  //   // Uses "abc" directly without copying it into std::string.
2651
  //   s.erase("abc");
2652
  template <class K = key_type>
2653
  size_type erase(const key_arg<K>& key) {
2654
    auto it = find(key);
2655
    if (it == end()) return 0;
2656
    erase(it);
2657
    return 1;
2658
  }
2659
2660
  // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
2661
  // this method returns void to reduce algorithmic complexity to O(1). The
2662
  // iterator is invalidated so any increment should be done before calling
2663
  // erase (e.g. `erase(it++)`).
2664
  void erase(const_iterator cit) { erase(cit.inner_); }
2665
2666
  // This overload is necessary because otherwise erase<K>(const K&) would be
2667
  // a better match if non-const iterator is passed as an argument.
2668
  void erase(iterator it) {
2669
    ABSL_SWISSTABLE_ASSERT(capacity() > 0);
2670
    AssertNotDebugCapacity();
2671
    it.assert_is_full("erase()");
2672
    destroy(it.slot());
2673
    erase_meta_only(it);
2674
  }
2675
2676
  iterator erase(const_iterator first,
2677
                 const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2678
    AssertNotDebugCapacity();
2679
    // We check for empty first because clear_backing_array requires that
2680
    // capacity() > 0 as a precondition.
2681
    if (empty()) return end();
2682
    if (first == last) return last.inner_;
2683
    if (is_small()) {
2684
      destroy(single_slot());
2685
      erase_meta_only_small();
2686
      return end();
2687
    }
2688
    if (first == begin() && last == end()) {
2689
      // TODO(ezb): we access control bytes in destroy_slots so it could make
2690
      // sense to combine destroy_slots and clear_backing_array to avoid cache
2691
      // misses when the table is large. Note that we also do this in clear().
2692
      destroy_slots();
2693
      clear_backing_array(/*reuse=*/true);
2694
      common().set_reserved_growth(common().reservation_size());
2695
      return end();
2696
    }
2697
    while (first != last) {
2698
      erase(first++);
2699
    }
2700
    return last.inner_;
2701
  }
2702
2703
  // Moves elements from `src` into `this`.
2704
  // If the element already exists in `this`, it is left unmodified in `src`.
2705
  template <typename H, typename E>
2706
  void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
2707
    AssertNotDebugCapacity();
2708
    src.AssertNotDebugCapacity();
2709
    assert(this != &src);
2710
    // Returns whether insertion took place.
2711
    const auto insert_slot = [this](slot_type* src_slot) {
2712
      return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
2713
                                 PolicyTraits::element(src_slot))
2714
          .second;
2715
    };
2716
2717
    if (src.is_small()) {
2718
      if (src.empty()) return;
2719
      if (insert_slot(src.single_slot()))
2720
        src.erase_meta_only_small();
2721
      return;
2722
    }
2723
    for (auto it = src.begin(), e = src.end(); it != e;) {
2724
      auto next = std::next(it);
2725
      if (insert_slot(it.slot())) src.erase_meta_only_large(it);
2726
      it = next;
2727
    }
2728
  }
2729
2730
  template <typename H, typename E>
2731
  void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
2732
    merge(src);
2733
  }
2734
2735
  node_type extract(const_iterator position) {
2736
    AssertNotDebugCapacity();
2737
    position.inner_.assert_is_full("extract()");
2738
    allocator_type alloc(char_alloc_ref());
2739
    auto node = CommonAccess::Transfer<node_type>(alloc, position.slot());
2740
    erase_meta_only(position);
2741
    return node;
2742
  }
2743
2744
  template <class K = key_type,
2745
            std::enable_if_t<!std::is_same<K, iterator>::value, int> = 0>
2746
  node_type extract(const key_arg<K>& key) {
2747
    auto it = find(key);
2748
    return it == end() ? node_type() : extract(const_iterator{it});
2749
  }
2750
2751
  void swap(raw_hash_set& that) noexcept(
2752
      AllocTraits::is_always_equal::value &&
2753
      std::is_nothrow_swappable<hasher>::value &&
2754
      std::is_nothrow_swappable<key_equal>::value) {
2755
    AssertNotDebugCapacity();
2756
    that.AssertNotDebugCapacity();
2757
    using std::swap;
2758
    swap_common(that);
2759
    swap(hash_ref(), that.hash_ref());
2760
    swap(eq_ref(), that.eq_ref());
2761
    SwapAlloc(char_alloc_ref(), that.char_alloc_ref(),
2762
              typename AllocTraits::propagate_on_container_swap{});
2763
  }
2764
2765
  void rehash(size_t n) { Rehash(common(), GetPolicyFunctions(), n); }
2766
2767
  void reserve(size_t n) {
2768
    if (ABSL_PREDICT_TRUE(n > DefaultCapacity())) {
2769
      ReserveTableToFitNewSize(common(), GetPolicyFunctions(), n);
2770
    }
2771
  }
2772
2773
  // Extension API: support for heterogeneous keys.
2774
  //
2775
  //   std::unordered_set<std::string> s;
2776
  //   // Turns "abc" into std::string.
2777
  //   s.count("abc");
2778
  //
2779
  //   ch_set<std::string> s;
2780
  //   // Uses "abc" directly without copying it into std::string.
2781
  //   s.count("abc");
2782
  template <class K = key_type>
2783
  size_t count(const key_arg<K>& key) const {
2784
    return find(key) == end() ? 0 : 1;
2785
  }
2786
2787
  // Issues CPU prefetch instructions for the memory needed to find or insert
2788
  // a key.  Like all lookup functions, this support heterogeneous keys.
2789
  //
2790
  // NOTE: This is a very low level operation and should not be used without
2791
  // specific benchmarks indicating its importance.
2792
  template <class K = key_type>
2793
  void prefetch([[maybe_unused]] const key_arg<K>& key) const {
2794
    if (capacity() == DefaultCapacity()) return;
2795
    // Avoid probing if we won't be able to prefetch the addresses received.
2796
#ifdef ABSL_HAVE_PREFETCH
2797
    prefetch_heap_block();
2798
    if (is_small()) return;
2799
    auto seq = probe(common(), hash_of(key));
2800
    PrefetchToLocalCache(control() + seq.offset());
2801
    PrefetchToLocalCache(slot_array() + seq.offset());
2802
#endif  // ABSL_HAVE_PREFETCH
2803
  }
2804
2805
  template <class K = key_type>
2806
  ABSL_DEPRECATE_AND_INLINE()
2807
  iterator find(const key_arg<K>& key,
2808
                size_t) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2809
    return find(key);
2810
  }
2811
  // The API of find() has one extension: the type of the key argument doesn't
2812
  // have to be key_type. This is so called heterogeneous key support.
2813
  template <class K = key_type>
2814
14
  iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2815
14
    AssertOnFind(key);
2816
14
    if (is_small()) return find_small(key);
2817
14
    prefetch_heap_block();
2818
14
    return find_large(key, hash_of(key));
2819
14
  }
2820
2821
  template <class K = key_type>
2822
  ABSL_DEPRECATE_AND_INLINE()
2823
  const_iterator find(const key_arg<K>& key,
2824
                      size_t) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2825
    return find(key);
2826
  }
2827
  template <class K = key_type>
2828
  const_iterator find(const key_arg<K>& key) const
2829
14
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2830
14
    return const_cast<raw_hash_set*>(this)->find(key);
2831
14
  }
2832
2833
  template <class K = key_type>
2834
  bool contains(const key_arg<K>& key) const {
2835
    // Here neither the iterator returned by `find()` nor `end()` can be invalid
2836
    // outside of potential thread-safety issues.
2837
    // `find()`'s return value is constructed, used, and then destructed
2838
    // all in this context.
2839
    return !find(key).unchecked_equals(end());
2840
  }
2841
2842
  template <class K = key_type>
2843
  std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
2844
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2845
    auto it = find(key);
2846
    if (it != end()) return {it, std::next(it)};
2847
    return {it, it};
2848
  }
2849
  template <class K = key_type>
2850
  std::pair<const_iterator, const_iterator> equal_range(
2851
      const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2852
    auto it = find(key);
2853
    if (it != end()) return {it, std::next(it)};
2854
    return {it, it};
2855
  }
2856
2857
  size_t bucket_count() const { return capacity(); }
2858
  float load_factor() const {
2859
    return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
2860
  }
2861
  float max_load_factor() const { return 1.0f; }
2862
  void max_load_factor(float) {
2863
    // Does nothing.
2864
  }
2865
2866
  hasher hash_function() const { return hash_ref(); }
2867
  key_equal key_eq() const { return eq_ref(); }
2868
  allocator_type get_allocator() const {
2869
    return allocator_type(char_alloc_ref());
2870
  }
2871
2872
  friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
2873
    if (a.size() != b.size()) return false;
2874
    const raw_hash_set* outer = &a;
2875
    const raw_hash_set* inner = &b;
2876
    if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2877
    for (const value_type& elem : *outer) {
2878
      auto it = PolicyTraits::apply(FindElement{*inner}, elem);
2879
      if (it == inner->end()) return false;
2880
      // Note: we used key_equal to check for key equality in FindElement, but
2881
      // we may need to do an additional comparison using
2882
      // value_type::operator==. E.g. the keys could be equal and the
2883
      // mapped_types could be unequal in a map or even in a set, key_equal
2884
      // could ignore some fields that aren't ignored by operator==.
2885
      static constexpr bool kKeyEqIsValueEq =
2886
          std::is_same<key_type, value_type>::value &&
2887
          std::is_same<key_equal, hash_default_eq<key_type>>::value;
2888
      if (!kKeyEqIsValueEq && !(*it == elem)) return false;
2889
    }
2890
    return true;
2891
  }
2892
2893
  friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
2894
    return !(a == b);
2895
  }
2896
2897
  template <typename H>
2898
  friend typename std::enable_if<H::template is_hashable<value_type>::value,
2899
                                 H>::type
2900
  AbslHashValue(H h, const raw_hash_set& s) {
2901
    return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
2902
                      hash_internal::WeaklyMixedInteger{s.size()});
2903
  }
2904
2905
  friend void swap(raw_hash_set& a,
2906
                   raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
2907
    a.swap(b);
2908
  }
2909
2910
 private:
2911
  template <class Container, typename Enabler>
2912
  friend struct absl::container_internal::hashtable_debug_internal::
2913
      HashtableDebugAccess;
2914
2915
  friend struct absl::container_internal::HashtableFreeFunctionsAccess;
2916
2917
  struct FindElement {
2918
    template <class K, class... Args>
2919
14
    const_iterator operator()(const K& key, Args&&...) const {
2920
14
      return s.find(key);
2921
14
    }
2922
    const raw_hash_set& s;
2923
  };
2924
2925
  struct EmplaceDecomposable {
2926
    template <class K, class... Args>
2927
16
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2928
16
      auto res = s.find_or_prepare_insert(key);
2929
16
      if (res.second) {
2930
16
        s.emplace_at(res.first, std::forward<Args>(args)...);
2931
16
      }
2932
16
      return res;
2933
16
    }
std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
Line
Count
Source
2927
16
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2928
16
      auto res = s.find_or_prepare_insert(key);
2929
16
      if (res.second) {
2930
16
        s.emplace_at(res.first, std::forward<Args>(args)...);
2931
16
      }
2932
16
      return res;
2933
16
    }
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
2934
    raw_hash_set& s;
2935
  };
2936
2937
  template <bool do_destroy>
2938
  struct InsertSlot {
2939
    template <class K, class... Args>
2940
    std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2941
      auto res = s.find_or_prepare_insert(key);
2942
      if (res.second) {
2943
        s.transfer(res.first.slot(), &slot);
2944
      } else if (do_destroy) {
2945
        s.destroy(&slot);
2946
      }
2947
      return res;
2948
    }
2949
    raw_hash_set& s;
2950
    // Constructed slot. Either moved into place or destroyed.
2951
    slot_type&& slot;
2952
  };
2953
2954
  template <typename... Args>
2955
16
  inline void construct(slot_type* slot, Args&&... args) {
2956
16
    common().RunWithReentrancyGuard([&] {
2957
16
      allocator_type alloc(char_alloc_ref());
2958
16
      PolicyTraits::construct(&alloc, slot, std::forward<Args>(args)...);
2959
16
    });
absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}::operator()() const
Line
Count
Source
2956
16
    common().RunWithReentrancyGuard([&] {
2957
16
      allocator_type alloc(char_alloc_ref());
2958
16
      PolicyTraits::construct(&alloc, slot, std::forward<Args>(args)...);
2959
16
    });
Unexecuted instantiation: absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}::operator()() const
2960
16
  }
void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
Line
Count
Source
2955
16
  inline void construct(slot_type* slot, Args&&... args) {
2956
16
    common().RunWithReentrancyGuard([&] {
2957
16
      allocator_type alloc(char_alloc_ref());
2958
16
      PolicyTraits::construct(&alloc, slot, std::forward<Args>(args)...);
2959
16
    });
2960
16
  }
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
2961
0
  inline void destroy(slot_type* slot) {
2962
0
    common().RunWithReentrancyGuard([&] {
2963
0
      allocator_type alloc(char_alloc_ref());
2964
0
      PolicyTraits::destroy(&alloc, slot);
2965
0
    });
2966
0
  }
2967
0
  inline void transfer(slot_type* to, slot_type* from) {
2968
0
    common().RunWithReentrancyGuard([&] {
2969
0
      allocator_type alloc(char_alloc_ref());
2970
0
      PolicyTraits::transfer(&alloc, to, from);
2971
0
    });
2972
0
  }
2973
2974
  // TODO(b/289225379): consider having a helper class that has the impls for
2975
  // SOO functionality.
2976
  template <class K = key_type>
2977
0
  iterator find_small(const key_arg<K>& key) {
2978
0
    ABSL_SWISSTABLE_ASSERT(is_small());
2979
0
    return empty() || !equal_to(key, single_slot()) ? end() : single_iterator();
2980
0
  }
2981
2982
  template <class K = key_type>
2983
14
  iterator find_large(const key_arg<K>& key, size_t hash) {
2984
14
    ABSL_SWISSTABLE_ASSERT(!is_small());
2985
14
    auto seq = probe(common(), hash);
2986
14
    const h2_t h2 = H2(hash);
2987
14
    const ctrl_t* ctrl = control();
2988
14
    while (true) {
2989
14
#ifndef ABSL_HAVE_MEMORY_SANITIZER
2990
14
      absl::PrefetchToLocalCache(slot_array() + seq.offset());
2991
14
#endif
2992
14
      Group g{ctrl + seq.offset()};
2993
14
      for (uint32_t i : g.Match(h2)) {
2994
14
        if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i))))
2995
14
          return iterator_at(seq.offset(i));
2996
14
      }
2997
0
      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
2998
0
      seq.next();
2999
0
      ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!");
3000
0
    }
3001
14
  }
3002
3003
  // Returns true if the table needs to be sampled.
3004
  // This should be called on insertion into an empty SOO table and in copy
3005
  // construction when the size can fit in SOO capacity.
3006
  bool should_sample_soo() const {
3007
    ABSL_SWISSTABLE_ASSERT(is_soo());
3008
    if (!ShouldSampleHashtablezInfoForAlloc<CharAlloc>()) return false;
3009
    return ABSL_PREDICT_FALSE(ShouldSampleNextTable());
3010
  }
3011
3012
0
  void clear_backing_array(bool reuse) {
3013
0
    ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
3014
0
    ClearBackingArray(common(), GetPolicyFunctions(), &char_alloc_ref(), reuse,
3015
0
                      SooEnabled());
3016
0
  }
3017
3018
0
  void destroy_slots() {
3019
0
    ABSL_SWISSTABLE_ASSERT(!is_small());
3020
0
    if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
3021
0
    auto destroy_slot = [&](const ctrl_t*, void* slot) {
3022
0
      this->destroy(static_cast<slot_type*>(slot));
3023
0
    };
3024
0
    if constexpr (SwisstableAssertAccessToDestroyedTable()) {
3025
0
      CommonFields common_copy(non_soo_tag_t{}, this->common());
3026
0
      common().set_capacity(InvalidCapacity::kDestroyed);
3027
0
      IterateOverFullSlots(common_copy, sizeof(slot_type), destroy_slot);
3028
0
      common().set_capacity(common_copy.capacity());
3029
    } else {
3030
      IterateOverFullSlots(common(), sizeof(slot_type), destroy_slot);
3031
    }
3032
0
  }
3033
3034
0
  void dealloc() {
3035
0
    ABSL_SWISSTABLE_ASSERT(capacity() > DefaultCapacity());
3036
    // Unpoison before returning the memory to the allocator.
3037
0
    SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
3038
0
    infoz().Unregister();
3039
0
    DeallocateBackingArray<BackingArrayAlignment(alignof(slot_type)),
3040
0
                           CharAlloc>(&char_alloc_ref(), capacity(), control(),
3041
0
                                      sizeof(slot_type), alignof(slot_type),
3042
0
                                      common().has_infoz());
3043
0
  }
3044
3045
0
  void destructor_impl() {
3046
0
    if (SwisstableGenerationsEnabled() &&
3047
0
        capacity() >= InvalidCapacity::kMovedFrom) {
3048
0
      return;
3049
0
    }
3050
0
    if (capacity() == 0) return;
3051
0
    if (is_small()) {
3052
0
      if (!empty()) {
3053
0
        ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(single_slot()));
3054
0
      }
3055
      if constexpr (SooEnabled()) return;
3056
0
    } else {
3057
0
      destroy_slots();
3058
0
    }
3059
0
    dealloc();
3060
0
  }
3061
3062
  // Erases, but does not destroy, the value pointed to by `it`.
3063
  //
3064
  // This merely updates the pertinent control byte. This can be used in
3065
  // conjunction with Policy::transfer to move the object to another place.
3066
  void erase_meta_only(const_iterator it) {
3067
    if (is_small()) {
3068
      erase_meta_only_small();
3069
      return;
3070
    }
3071
    erase_meta_only_large(it);
3072
  }
3073
  void erase_meta_only_small() {
3074
    EraseMetaOnlySmall(common(), SooEnabled(), sizeof(slot_type));
3075
  }
3076
  void erase_meta_only_large(const_iterator it) {
3077
    EraseMetaOnlyLarge(common(), it.control(), sizeof(slot_type));
3078
  }
3079
3080
  template <class K>
3081
  ABSL_ATTRIBUTE_ALWAYS_INLINE bool equal_to(const K& key,
3082
16
                                             slot_type* slot) const {
3083
16
    return PolicyTraits::apply(EqualElement<K, key_equal>{key, eq_ref()},
3084
16
                               PolicyTraits::element(slot));
3085
16
  }
3086
  template <class K>
3087
26
  ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(const K& key) const {
3088
26
    return HashElement<hasher, kIsDefaultHash>{hash_ref(),
3089
26
                                               common().seed().seed()}(key);
3090
26
  }
3091
0
  ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(slot_type* slot) const {
3092
0
    return PolicyTraits::apply(
3093
0
        HashElement<hasher, kIsDefaultHash>{hash_ref(), common().seed().seed()},
3094
0
        PolicyTraits::element(slot));
3095
0
  }
3096
3097
  // Casting directly from e.g. char* to slot_type* can cause compilation errors
3098
  // on objective-C. This function converts to void* first, avoiding the issue.
3099
4
  static ABSL_ATTRIBUTE_ALWAYS_INLINE slot_type* to_slot(void* buf) {
3100
4
    return static_cast<slot_type*>(buf);
3101
4
  }
3102
3103
  // Requires that lhs does not have a full SOO slot.
3104
  static void move_common(bool rhs_is_full_soo, CharAlloc& rhs_alloc,
3105
                          CommonFields& lhs, CommonFields&& rhs) {
3106
    if (PolicyTraits::transfer_uses_memcpy() || !rhs_is_full_soo) {
3107
      lhs = std::move(rhs);
3108
    } else {
3109
      lhs.move_non_heap_or_soo_fields(rhs);
3110
      rhs.RunWithReentrancyGuard([&] {
3111
        lhs.RunWithReentrancyGuard([&] {
3112
          PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
3113
                                 to_slot(rhs.soo_data()));
3114
        });
3115
      });
3116
    }
3117
  }
3118
3119
  // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
3120
  // aren't allowed to do so.
3121
  void swap_common(raw_hash_set& that) {
3122
    using std::swap;
3123
    if (PolicyTraits::transfer_uses_memcpy()) {
3124
      swap(common(), that.common());
3125
      return;
3126
    }
3127
    CommonFields tmp = CommonFields(uninitialized_tag_t{});
3128
    const bool that_is_full_soo = that.is_full_soo();
3129
    move_common(that_is_full_soo, that.char_alloc_ref(), tmp,
3130
                std::move(that.common()));
3131
    move_common(is_full_soo(), char_alloc_ref(), that.common(),
3132
                std::move(common()));
3133
    move_common(that_is_full_soo, that.char_alloc_ref(), common(),
3134
                std::move(tmp));
3135
  }
3136
3137
0
  void annotate_for_bug_detection_on_move([[maybe_unused]] raw_hash_set& that) {
3138
0
    // We only enable moved-from validation when generations are enabled (rather
3139
0
    // than using NDEBUG) to avoid issues in which NDEBUG is enabled in some
3140
0
    // translation units but not in others.
3141
0
    if (SwisstableGenerationsEnabled()) {
3142
0
      that.common().set_capacity(this == &that ? InvalidCapacity::kSelfMovedFrom
3143
0
                                               : InvalidCapacity::kMovedFrom);
3144
0
    }
3145
0
    if (!SwisstableGenerationsEnabled() || capacity() == DefaultCapacity() ||
3146
0
        capacity() > kAboveMaxValidCapacity) {
3147
0
      return;
3148
0
    }
3149
0
    common().increment_generation();
3150
0
    if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
3151
0
      ResizeAllocatedTableWithSeedChange(common(), GetPolicyFunctions(),
3152
0
                                         capacity());
3153
0
    }
3154
0
  }
3155
3156
  template <bool propagate_alloc>
3157
  raw_hash_set& assign_impl(raw_hash_set&& that) {
3158
    // We don't bother checking for this/that aliasing. We just need to avoid
3159
    // breaking the invariants in that case.
3160
    destructor_impl();
3161
    move_common(that.is_full_soo(), that.char_alloc_ref(), common(),
3162
                std::move(that.common()));
3163
    hash_ref() = that.hash_ref();
3164
    eq_ref() = that.eq_ref();
3165
    CopyAlloc(char_alloc_ref(), that.char_alloc_ref(),
3166
              std::integral_constant<bool, propagate_alloc>());
3167
    that.common() = CommonFields::CreateDefault<SooEnabled()>();
3168
    annotate_for_bug_detection_on_move(that);
3169
    return *this;
3170
  }
3171
3172
  raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
3173
    const size_t size = that.size();
3174
    if (size == 0) return *this;
3175
    reserve(size);
3176
    for (iterator it = that.begin(); it != that.end(); ++it) {
3177
      insert(std::move(PolicyTraits::element(it.slot())));
3178
      that.destroy(it.slot());
3179
    }
3180
    if (!that.is_soo()) that.dealloc();
3181
    that.common() = CommonFields::CreateDefault<SooEnabled()>();
3182
    annotate_for_bug_detection_on_move(that);
3183
    return *this;
3184
  }
3185
3186
  raw_hash_set& move_assign(raw_hash_set&& that,
3187
                            std::true_type /*propagate_alloc*/) {
3188
    return assign_impl<true>(std::move(that));
3189
  }
3190
  raw_hash_set& move_assign(raw_hash_set&& that,
3191
                            std::false_type /*propagate_alloc*/) {
3192
    if (char_alloc_ref() == that.char_alloc_ref()) {
3193
      return assign_impl<false>(std::move(that));
3194
    }
3195
    // Aliasing can't happen here because allocs would compare equal above.
3196
    assert(this != &that);
3197
    destructor_impl();
3198
    // We can't take over that's memory so we need to move each element.
3199
    // While moving elements, this should have that's hash/eq so copy hash/eq
3200
    // before moving elements.
3201
    hash_ref() = that.hash_ref();
3202
    eq_ref() = that.eq_ref();
3203
    return move_elements_allocs_unequal(std::move(that));
3204
  }
3205
3206
  template <class K>
3207
  std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
3208
    ABSL_SWISSTABLE_ASSERT(is_soo());
3209
    bool force_sampling;
3210
    if (empty()) {
3211
      if (!should_sample_soo()) {
3212
        common().set_full_soo();
3213
        return {single_iterator(), true};
3214
      }
3215
      force_sampling = true;
3216
    } else if (equal_to(key, single_slot())) {
3217
      return {single_iterator(), false};
3218
    } else {
3219
      force_sampling = false;
3220
    }
3221
    ABSL_SWISSTABLE_ASSERT(capacity() == 1);
3222
    constexpr bool kUseMemcpy =
3223
        PolicyTraits::transfer_uses_memcpy() && SooEnabled();
3224
    size_t index = GrowSooTableToNextCapacityAndPrepareInsert<
3225
        kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0,
3226
        kUseMemcpy>(common(), GetPolicyFunctions(),
3227
                    HashKey<hasher, K, kIsDefaultHash>{hash_ref(), key},
3228
                    force_sampling);
3229
    return {iterator_at(index), true};
3230
  }
3231
3232
  template <class K>
3233
4
  std::pair<iterator, bool> find_or_prepare_insert_small(const K& key) {
3234
4
    ABSL_SWISSTABLE_ASSERT(is_small());
3235
    if constexpr (SooEnabled()) {
3236
      return find_or_prepare_insert_soo(key);
3237
    }
3238
4
    if (!empty()) {
3239
2
      if (equal_to(key, single_slot())) {
3240
0
        return {single_iterator(), false};
3241
0
      }
3242
2
    }
3243
4
    return {iterator_at_ptr(PrepareInsertSmallNonSoo(
3244
4
                common(), GetPolicyFunctions(),
3245
4
                HashKey<hasher, K, kIsDefaultHash>{hash_ref(), key})),
3246
4
            true};
3247
4
  }
3248
3249
  template <class K>
3250
12
  std::pair<iterator, bool> find_or_prepare_insert_large(const K& key) {
3251
12
    ABSL_SWISSTABLE_ASSERT(!is_soo());
3252
12
    prefetch_heap_block();
3253
12
    const size_t hash = hash_of(key);
3254
12
    auto seq = probe(common(), hash);
3255
12
    const h2_t h2 = H2(hash);
3256
12
    const ctrl_t* ctrl = control();
3257
12
    while (true) {
3258
12
#ifndef ABSL_HAVE_MEMORY_SANITIZER
3259
12
      absl::PrefetchToLocalCache(slot_array() + seq.offset());
3260
12
#endif
3261
12
      Group g{ctrl + seq.offset()};
3262
12
      for (uint32_t i : g.Match(h2)) {
3263
0
        if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i))))
3264
0
          return {iterator_at(seq.offset(i)), false};
3265
0
      }
3266
12
      auto mask_empty = g.MaskEmpty();
3267
12
      if (ABSL_PREDICT_TRUE(mask_empty)) {
3268
12
        size_t target = seq.offset(mask_empty.LowestBitSet());
3269
12
        size_t index =
3270
12
            SwisstableGenerationsEnabled()
3271
12
                ? PrepareInsertLargeGenerationsEnabled(
3272
0
                      common(), GetPolicyFunctions(), hash,
3273
0
                      FindInfo{target, seq.index()},
3274
0
                      HashKey<hasher, K, kIsDefaultHash>{hash_ref(), key})
3275
12
                : PrepareInsertLarge(common(), GetPolicyFunctions(), hash,
3276
12
                                     FindInfo{target, seq.index()});
3277
12
        return {iterator_at(index), true};
3278
12
      }
3279
0
      seq.next();
3280
0
      ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!");
3281
0
    }
3282
12
  }
3283
3284
 protected:
3285
  // Asserts for correctness that we run on find/find_or_prepare_insert.
3286
  template <class K>
3287
30
  void AssertOnFind([[maybe_unused]] const K& key) {
3288
30
    AssertHashEqConsistent(key);
3289
30
    AssertNotDebugCapacity();
3290
30
  }
3291
3292
  // Asserts that the capacity is not a sentinel invalid value.
3293
34
  void AssertNotDebugCapacity() const {
3294
#ifdef NDEBUG
3295
    if (!SwisstableGenerationsEnabled()) {
3296
      return;
3297
    }
3298
#endif
3299
34
    if (ABSL_PREDICT_TRUE(capacity() <
3300
34
                          InvalidCapacity::kAboveMaxValidCapacity)) {
3301
34
      return;
3302
34
    }
3303
0
    assert(capacity() != InvalidCapacity::kReentrance &&
3304
0
           "Reentrant container access during element construction/destruction "
3305
0
           "is not allowed.");
3306
0
    if constexpr (SwisstableAssertAccessToDestroyedTable()) {
3307
0
      if (capacity() == InvalidCapacity::kDestroyed) {
3308
0
        ABSL_RAW_LOG(FATAL, "Use of destroyed hash table.");
3309
0
      }
3310
0
    }
3311
0
    if (SwisstableGenerationsEnabled() &&
3312
0
        ABSL_PREDICT_FALSE(capacity() >= InvalidCapacity::kMovedFrom)) {
3313
0
      if (capacity() == InvalidCapacity::kSelfMovedFrom) {
3314
        // If this log triggers, then a hash table was move-assigned to itself
3315
        // and then used again later without being reinitialized.
3316
0
        ABSL_RAW_LOG(FATAL, "Use of self-move-assigned hash table.");
3317
0
      }
3318
0
      ABSL_RAW_LOG(FATAL, "Use of moved-from hash table.");
3319
0
    }
3320
0
  }
3321
3322
  // Asserts that hash and equal functors provided by the user are consistent,
3323
  // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
3324
  template <class K>
3325
30
  void AssertHashEqConsistent(const K& key) {
3326
#ifdef NDEBUG
3327
    return;
3328
#endif
3329
    // If the hash/eq functors are known to be consistent, then skip validation.
3330
30
    if (std::is_same<hasher, absl::container_internal::StringHash>::value &&
3331
30
        std::is_same<key_equal, absl::container_internal::StringEq>::value) {
3332
30
      return;
3333
30
    }
3334
0
    if (std::is_scalar<key_type>::value &&
3335
0
        std::is_same<hasher, absl::Hash<key_type>>::value &&
3336
0
        std::is_same<key_equal, std::equal_to<key_type>>::value) {
3337
0
      return;
3338
0
    }
3339
0
    if (empty()) return;
3340
3341
0
    const size_t hash_of_arg = hash_of(key);
3342
0
    const auto assert_consistent = [&](const ctrl_t*, void* slot) {
3343
0
      const bool is_key_equal = equal_to(key, to_slot(slot));
3344
0
      if (!is_key_equal) return;
3345
3346
0
      [[maybe_unused]] const bool is_hash_equal =
3347
0
          hash_of_arg == hash_of(to_slot(slot));
3348
0
      assert((!is_key_equal || is_hash_equal) &&
3349
0
             "eq(k1, k2) must imply that hash(k1) == hash(k2). "
3350
0
             "hash/eq functors are inconsistent.");
3351
0
    };
3352
3353
0
    if (is_small()) {
3354
0
      assert_consistent(/*unused*/ nullptr, single_slot());
3355
0
      return;
3356
0
    }
3357
    // We only do validation for small tables so that it's constant time.
3358
0
    if (capacity() > 16) return;
3359
0
    IterateOverFullSlots(common(), sizeof(slot_type), assert_consistent);
3360
0
  }
3361
3362
  // Attempts to find `key` in the table; if it isn't found, returns an iterator
3363
  // where the value can be inserted into, with the control byte already set to
3364
  // `key`'s H2. Returns a bool indicating whether an insertion can take place.
3365
  template <class K>
3366
16
  std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
3367
16
    AssertOnFind(key);
3368
16
    if (is_small()) return find_or_prepare_insert_small(key);
3369
12
    return find_or_prepare_insert_large(key);
3370
16
  }
3371
3372
  // Constructs the value in the space pointed by the iterator. This only works
3373
  // after an unsuccessful find_or_prepare_insert() and before any other
3374
  // modifications happen in the raw_hash_set.
3375
  //
3376
  // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
3377
  // the key decomposed from `forward<Args>(args)...`, and the bool returned by
3378
  // find_or_prepare_insert(k) was true.
3379
  // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
3380
  template <class... Args>
3381
16
  void emplace_at(iterator iter, Args&&... args) {
3382
16
    construct(iter.slot(), std::forward<Args>(args)...);
3383
3384
    // When is_small, find calls find_small and if size is 0, then it will
3385
    // return an end iterator. This can happen in the raw_hash_set copy ctor.
3386
16
    assert((is_small() ||
3387
16
            PolicyTraits::apply(FindElement{*this}, *iter) == iter) &&
3388
16
           "constructed value does not match the lookup key");
3389
16
  }
void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
Line
Count
Source
3381
16
  void emplace_at(iterator iter, Args&&... args) {
3382
16
    construct(iter.slot(), std::forward<Args>(args)...);
3383
3384
    // When is_small, find calls find_small and if size is 0, then it will
3385
    // return an end iterator. This can happen in the raw_hash_set copy ctor.
3386
16
    assert((is_small() ||
3387
16
            PolicyTraits::apply(FindElement{*this}, *iter) == iter) &&
3388
16
           "constructed value does not match the lookup key");
3389
16
  }
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
3390
3391
26
  iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3392
26
    return {control() + i, slot_array() + i, common().generation_ptr()};
3393
26
  }
3394
  const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
3395
    return const_cast<raw_hash_set*>(this)->iterator_at(i);
3396
  }
3397
  iterator iterator_at_ptr(std::pair<ctrl_t*, void*> ptrs)
3398
4
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3399
4
    return {ptrs.first, to_slot(ptrs.second), common().generation_ptr()};
3400
4
  }
3401
3402
  reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
3403
3404
 private:
3405
  friend struct RawHashSetTestOnlyAccess;
3406
3407
  // The number of slots we can still fill without needing to rehash.
3408
  //
3409
  // This is stored separately due to tombstones: we do not include tombstones
3410
  // in the growth capacity, because we'd like to rehash when the table is
3411
  // otherwise filled with tombstones: otherwise, probe sequences might get
3412
  // unacceptably long without triggering a rehash. Callers can also force a
3413
  // rehash via the standard `rehash(0)`, which will recompute this value as a
3414
  // side-effect.
3415
  //
3416
  // See `CapacityToGrowth()`.
3417
  size_t growth_left() const {
3418
    return common().growth_left();
3419
  }
3420
3421
  GrowthInfo& growth_info() {
3422
    return common().growth_info();
3423
  }
3424
  GrowthInfo growth_info() const {
3425
    return common().growth_info();
3426
  }
3427
3428
  // Prefetch the heap-allocated memory region to resolve potential TLB and
3429
  // cache misses. This is intended to overlap with execution of calculating the
3430
  // hash for a key.
3431
26
  void prefetch_heap_block() const {
3432
26
    ABSL_SWISSTABLE_ASSERT(!is_soo());
3433
26
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
3434
26
    __builtin_prefetch(control(), 0, 1);
3435
26
#endif
3436
26
  }
3437
3438
88
  CommonFields& common() { return settings_.template get<0>(); }
3439
460
  const CommonFields& common() const { return settings_.template get<0>(); }
3440
3441
78
  ctrl_t* control() const {
3442
78
    ABSL_SWISSTABLE_ASSERT(!is_soo());
3443
78
    return common().control();
3444
78
  }
3445
68
  slot_type* slot_array() const {
3446
68
    ABSL_SWISSTABLE_ASSERT(!is_soo());
3447
68
    return static_cast<slot_type*>(common().slot_array());
3448
68
  }
3449
0
  slot_type* soo_slot() {
3450
0
    ABSL_SWISSTABLE_ASSERT(is_soo());
3451
0
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
3452
0
        static_cast<slot_type*>(common().soo_data()));
3453
0
  }
3454
  const slot_type* soo_slot() const {
3455
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(
3456
        const_cast<raw_hash_set*>(this)->soo_slot());
3457
  }
3458
2
  slot_type* single_slot() {
3459
2
    ABSL_SWISSTABLE_ASSERT(is_small());
3460
2
    return SooEnabled() ? soo_slot() : slot_array();
3461
2
  }
3462
  const slot_type* single_slot() const {
3463
    return const_cast<raw_hash_set*>(this)->single_slot();
3464
  }
3465
0
  void decrement_small_size() {
3466
0
    ABSL_SWISSTABLE_ASSERT(is_small());
3467
0
    SooEnabled() ? common().set_empty_soo() : common().decrement_size();
3468
0
    if (!SooEnabled()) {
3469
0
      SanitizerPoisonObject(single_slot());
3470
0
    }
3471
0
  }
3472
0
  iterator single_iterator() {
3473
0
    return {SooControl(), single_slot(), common().generation_ptr()};
3474
0
  }
3475
  const_iterator single_iterator() const {
3476
    return const_cast<raw_hash_set*>(this)->single_iterator();
3477
  }
3478
0
  HashtablezInfoHandle infoz() {
3479
0
    ABSL_SWISSTABLE_ASSERT(!is_soo());
3480
0
    return common().infoz();
3481
0
  }
3482
3483
4
  hasher& hash_ref() { return settings_.template get<1>(); }
3484
26
  const hasher& hash_ref() const { return settings_.template get<1>(); }
3485
0
  key_equal& eq_ref() { return settings_.template get<2>(); }
3486
16
  const key_equal& eq_ref() const { return settings_.template get<2>(); }
3487
16
  CharAlloc& char_alloc_ref() { return settings_.template get<3>(); }
3488
  const CharAlloc& char_alloc_ref() const {
3489
    return settings_.template get<3>();
3490
  }
3491
3492
0
  static void* get_char_alloc_ref_fn(CommonFields& common) {
3493
0
    auto* h = reinterpret_cast<raw_hash_set*>(&common);
3494
0
    return &h->char_alloc_ref();
3495
0
  }
3496
0
  static void* get_hash_ref_fn(CommonFields& common) {
3497
0
    auto* h = reinterpret_cast<raw_hash_set*>(&common);
3498
0
    // TODO(b/397453582): Remove support for const hasher.
3499
0
    return const_cast<std::remove_const_t<hasher>*>(&h->hash_ref());
3500
0
  }
3501
  static void transfer_n_slots_fn(void* set, void* dst, void* src,
3502
0
                                  size_t count) {
3503
0
    auto* src_slot = to_slot(src);
3504
0
    auto* dst_slot = to_slot(dst);
3505
0
3506
0
    auto* h = static_cast<raw_hash_set*>(set);
3507
0
    for (; count > 0; --count, ++src_slot, ++dst_slot) {
3508
0
      h->transfer(dst_slot, src_slot);
3509
0
    }
3510
0
  }
3511
3512
  // TODO(b/382423690): Try to type erase entire function or at least type erase
3513
  // by GetKey + Hash for memcpyable types.
3514
  // TODO(b/382423690): Try to type erase for big slots: sizeof(slot_type) > 16.
3515
  static void transfer_unprobed_elements_to_next_capacity_fn(
3516
      CommonFields& common, const ctrl_t* old_ctrl, void* old_slots,
3517
      void* probed_storage,
3518
      void (*encode_probed_element)(void* probed_storage, h2_t h2,
3519
0
                                    size_t source_offset, size_t h1)) {
3520
0
    const size_t new_capacity = common.capacity();
3521
0
    const size_t old_capacity = PreviousCapacity(new_capacity);
3522
0
    ABSL_ASSUME(old_capacity + 1 >= Group::kWidth);
3523
0
    ABSL_ASSUME((old_capacity + 1) % Group::kWidth == 0);
3524
3525
0
    auto* set = reinterpret_cast<raw_hash_set*>(&common);
3526
0
    slot_type* old_slots_ptr = to_slot(old_slots);
3527
0
    ctrl_t* new_ctrl = common.control();
3528
0
    slot_type* new_slots = set->slot_array();
3529
3530
0
    for (size_t group_index = 0; group_index < old_capacity;
3531
0
         group_index += Group::kWidth) {
3532
0
      GroupFullEmptyOrDeleted old_g(old_ctrl + group_index);
3533
0
      std::memset(new_ctrl + group_index, static_cast<int8_t>(ctrl_t::kEmpty),
3534
0
                  Group::kWidth);
3535
0
      std::memset(new_ctrl + group_index + old_capacity + 1,
3536
0
                  static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
3537
      // TODO(b/382423690): try to type erase everything outside of the loop.
3538
      // We will share a lot of code in expense of one function call per group.
3539
0
      for (auto in_fixed_group_index : old_g.MaskFull()) {
3540
0
        size_t old_index = group_index + in_fixed_group_index;
3541
0
        slot_type* old_slot = old_slots_ptr + old_index;
3542
        // TODO(b/382423690): try to avoid entire hash calculation since we need
3543
        // only one new bit of h1.
3544
0
        size_t hash = set->hash_of(old_slot);
3545
0
        size_t h1 = H1(hash);
3546
0
        h2_t h2 = H2(hash);
3547
0
        size_t new_index = TryFindNewIndexWithoutProbing(
3548
0
            h1, old_index, old_capacity, new_ctrl, new_capacity);
3549
        // Note that encode_probed_element is allowed to use old_ctrl buffer
3550
        // till and included the old_index.
3551
0
        if (ABSL_PREDICT_FALSE(new_index == kProbedElementIndexSentinel)) {
3552
0
          encode_probed_element(probed_storage, h2, old_index, h1);
3553
0
          continue;
3554
0
        }
3555
0
        ABSL_SWISSTABLE_ASSERT((new_index & old_capacity) <= old_index);
3556
0
        ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_index]));
3557
0
        new_ctrl[new_index] = static_cast<ctrl_t>(h2);
3558
0
        auto* new_slot = new_slots + new_index;
3559
0
        SanitizerUnpoisonMemoryRegion(new_slot, sizeof(slot_type));
3560
0
        set->transfer(new_slot, old_slot);
3561
0
        SanitizerPoisonMemoryRegion(old_slot, sizeof(slot_type));
3562
0
      }
3563
0
    }
3564
0
  }
3565
3566
16
  static const PolicyFunctions& GetPolicyFunctions() {
3567
16
    static_assert(sizeof(slot_type) <= (std::numeric_limits<uint32_t>::max)(),
3568
16
                  "Slot size is too large. Use std::unique_ptr for value type "
3569
16
                  "or use absl::node_hash_{map,set}.");
3570
16
    static_assert(alignof(slot_type) <=
3571
16
                  size_t{(std::numeric_limits<uint16_t>::max)()});
3572
16
    static_assert(sizeof(key_type) <=
3573
16
                  size_t{(std::numeric_limits<uint32_t>::max)()});
3574
16
    static_assert(sizeof(value_type) <=
3575
16
                  size_t{(std::numeric_limits<uint32_t>::max)()});
3576
16
    static constexpr size_t kBackingArrayAlignment =
3577
16
        BackingArrayAlignment(alignof(slot_type));
3578
16
    static constexpr PolicyFunctions value = {
3579
16
        static_cast<uint32_t>(sizeof(key_type)),
3580
16
        static_cast<uint32_t>(sizeof(value_type)),
3581
16
        static_cast<uint32_t>(sizeof(slot_type)),
3582
16
        static_cast<uint16_t>(alignof(slot_type)), SooEnabled(),
3583
16
        ShouldSampleHashtablezInfoForAlloc<CharAlloc>(),
3584
        // TODO(b/328722020): try to type erase
3585
        // for standard layout and alignof(Hash) <= alignof(CommonFields).
3586
16
        std::is_empty_v<hasher> ? &GetRefForEmptyClass
3587
16
                                : &raw_hash_set::get_hash_ref_fn,
3588
16
        PolicyTraits::template get_hash_slot_fn<hasher, kIsDefaultHash>(),
3589
16
        PolicyTraits::transfer_uses_memcpy()
3590
16
            ? TransferNRelocatable<sizeof(slot_type)>
3591
16
            : &raw_hash_set::transfer_n_slots_fn,
3592
16
        std::is_empty_v<Alloc> ? &GetRefForEmptyClass
3593
16
                               : &raw_hash_set::get_char_alloc_ref_fn,
3594
16
        &AllocateBackingArray<kBackingArrayAlignment, CharAlloc>,
3595
16
        &DeallocateBackingArray<kBackingArrayAlignment, CharAlloc>,
3596
16
        &raw_hash_set::transfer_unprobed_elements_to_next_capacity_fn};
3597
16
    return value;
3598
16
  }
3599
3600
  // Bundle together CommonFields plus other objects which might be empty.
3601
  // CompressedTuple will ensure that sizeof is not affected by any of the empty
3602
  // fields that occur after CommonFields.
3603
  absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
3604
                                            CharAlloc>
3605
      settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
3606
                key_equal{}, CharAlloc{}};
3607
};
3608
3609
// Friend access for free functions in raw_hash_set.h.
3610
struct HashtableFreeFunctionsAccess {
3611
  template <class Predicate, typename Set>
3612
  static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
3613
    if (c->empty()) {
3614
      return 0;
3615
    }
3616
    if (c->is_small()) {
3617
      auto it = c->single_iterator();
3618
      if (!pred(*it)) {
3619
        ABSL_SWISSTABLE_ASSERT(c->size() == 1 &&
3620
                               "hash table was modified unexpectedly");
3621
        return 0;
3622
      }
3623
      c->destroy(it.slot());
3624
      c->erase_meta_only_small();
3625
      return 1;
3626
    }
3627
    [[maybe_unused]] const size_t original_size_for_assert = c->size();
3628
    size_t num_deleted = 0;
3629
    using SlotType = typename Set::slot_type;
3630
    IterateOverFullSlots(
3631
        c->common(), sizeof(SlotType),
3632
        [&](const ctrl_t* ctrl, void* slot_void) {
3633
          auto* slot = static_cast<SlotType*>(slot_void);
3634
          if (pred(Set::PolicyTraits::element(slot))) {
3635
            c->destroy(slot);
3636
            EraseMetaOnlyLarge(c->common(), ctrl, sizeof(*slot));
3637
            ++num_deleted;
3638
          }
3639
        });
3640
    // NOTE: IterateOverFullSlots allow removal of the current element, so we
3641
    // verify the size additionally here.
3642
    ABSL_SWISSTABLE_ASSERT(original_size_for_assert - num_deleted ==
3643
                               c->size() &&
3644
                           "hash table was modified unexpectedly");
3645
    return num_deleted;
3646
  }
3647
3648
  template <class Callback, typename Set>
3649
  static void ForEach(Callback& cb, Set* c) {
3650
    if (c->empty()) {
3651
      return;
3652
    }
3653
    if (c->is_small()) {
3654
      cb(*c->single_iterator());
3655
      return;
3656
    }
3657
    using SlotType = typename Set::slot_type;
3658
    using ElementTypeWithConstness = decltype(*c->begin());
3659
    IterateOverFullSlots(
3660
        c->common(), sizeof(SlotType), [&cb](const ctrl_t*, void* slot) {
3661
          ElementTypeWithConstness& element =
3662
              Set::PolicyTraits::element(static_cast<SlotType*>(slot));
3663
          cb(element);
3664
        });
3665
  }
3666
};
3667
3668
// Erases all elements that satisfy the predicate `pred` from the container `c`.
3669
template <typename P, typename H, typename E, typename A, typename Predicate>
3670
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
3671
    Predicate& pred, raw_hash_set<P, H, E, A>* c) {
3672
  return HashtableFreeFunctionsAccess::EraseIf(pred, c);
3673
}
3674
3675
// Calls `cb` for all elements in the container `c`.
3676
template <typename P, typename H, typename E, typename A, typename Callback>
3677
void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
3678
  return HashtableFreeFunctionsAccess::ForEach(cb, c);
3679
}
3680
template <typename P, typename H, typename E, typename A, typename Callback>
3681
void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
3682
  return HashtableFreeFunctionsAccess::ForEach(cb, c);
3683
}
3684
3685
namespace hashtable_debug_internal {
3686
template <typename Set>
3687
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
3688
  using Traits = typename Set::PolicyTraits;
3689
  using Slot = typename Traits::slot_type;
3690
3691
  constexpr static bool kIsDefaultHash = Set::kIsDefaultHash;
3692
3693
  static size_t GetNumProbes(const Set& set,
3694
                             const typename Set::key_type& key) {
3695
    if (set.is_small()) return 0;
3696
    size_t num_probes = 0;
3697
    const size_t hash = set.hash_of(key);
3698
    auto seq = probe(set.common(), hash);
3699
    const h2_t h2 = H2(hash);
3700
    const ctrl_t* ctrl = set.control();
3701
    while (true) {
3702
      container_internal::Group g{ctrl + seq.offset()};
3703
      for (uint32_t i : g.Match(h2)) {
3704
        if (set.equal_to(key, set.slot_array() + seq.offset(i)))
3705
          return num_probes;
3706
        ++num_probes;
3707
      }
3708
      if (g.MaskEmpty()) return num_probes;
3709
      seq.next();
3710
      ++num_probes;
3711
    }
3712
  }
3713
3714
  static size_t AllocatedByteSize(const Set& c) {
3715
    size_t capacity = c.capacity();
3716
    if (capacity == 0) return 0;
3717
    size_t m =
3718
        c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
3719
3720
    size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
3721
    if (per_slot != ~size_t{}) {
3722
      m += per_slot * c.size();
3723
    } else {
3724
      for (auto it = c.begin(); it != c.end(); ++it) {
3725
        m += Traits::space_used(it.slot());
3726
      }
3727
    }
3728
    return m;
3729
  }
3730
};
3731
3732
}  // namespace hashtable_debug_internal
3733
3734
// Extern template instantiations reduce binary size and linker input size.
3735
// Function definition is in raw_hash_set.cc.
3736
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
3737
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
3738
    bool);
3739
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<1, true>(
3740
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
3741
    bool);
3742
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<4, true>(
3743
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
3744
    bool);
3745
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<8, true>(
3746
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
3747
    bool);
3748
#if UINTPTR_MAX == UINT64_MAX
3749
extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<16, true>(
3750
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
3751
    bool);
3752
#endif
3753
3754
}  // namespace container_internal
3755
ABSL_NAMESPACE_END
3756
}  // namespace absl
3757
3758
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
3759
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
3760
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
3761
#undef ABSL_SWISSTABLE_ASSERT
3762
3763
#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_