Coverage Report

Created: 2024-09-23 06:29

/src/abseil-cpp/absl/container/internal/raw_hash_set.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// An open-addressing
16
// hashtable with quadratic probing.
17
//
18
// This is a low level hashtable on top of which different interfaces can be
19
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20
//
21
// The table interface is similar to that of std::unordered_set. Notable
22
// differences are that most member functions support heterogeneous keys when
23
// BOTH the hash and eq functions are marked as transparent. They do so by
24
// providing a typedef called `is_transparent`.
25
//
26
// When heterogeneous lookup is enabled, functions that take key_type act as if
27
// they have an overload set like:
28
//
29
//   iterator find(const key_type& key);
30
//   template <class K>
31
//   iterator find(const K& key);
32
//
33
//   size_type erase(const key_type& key);
34
//   template <class K>
35
//   size_type erase(const K& key);
36
//
37
//   std::pair<iterator, iterator> equal_range(const key_type& key);
38
//   template <class K>
39
//   std::pair<iterator, iterator> equal_range(const K& key);
40
//
41
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42
// exist.
43
//
44
// find() also supports passing the hash explicitly:
45
//
46
//   iterator find(const key_type& key, size_t hash);
47
//   template <class U>
48
//   iterator find(const U& key, size_t hash);
49
//
50
// In addition the pointer to element and iterator stability guarantees are
51
// weaker: all iterators and pointers are invalidated after a new element is
52
// inserted.
53
//
54
// IMPLEMENTATION DETAILS
55
//
56
// # Table Layout
57
//
58
// A raw_hash_set's backing array consists of control bytes followed by slots
59
// that may or may not contain objects.
60
//
61
// The layout of the backing array, for `capacity` slots, is thus, as a
62
// pseudo-struct:
63
//
64
//   struct BackingArray {
65
//     // Sampling handler. This field isn't present when the sampling is
66
//     // disabled or this allocation hasn't been selected for sampling.
67
//     HashtablezInfoHandle infoz_;
68
//     // The number of elements we can insert before growing the capacity.
69
//     size_t growth_left;
70
//     // Control bytes for the "real" slots.
71
//     ctrl_t ctrl[capacity];
72
//     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
73
//     // stop and serves no other purpose.
74
//     ctrl_t sentinel;
75
//     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
76
//     // that if a probe sequence picks a value near the end of `ctrl`,
77
//     // `Group` will have valid control bytes to look at.
78
//     ctrl_t clones[kWidth - 1];
79
//     // The actual slot data.
80
//     slot_type slots[capacity];
81
//   };
82
//
83
// The length of this array is computed by `RawHashSetLayout::alloc_size` below.
84
//
85
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
86
// platform-specific size) that define the state of the corresponding slot in
87
// the slot array. Group manipulation is tightly optimized to be as efficient
88
// as possible: SSE and friends on x86, clever bit operations on other arches.
89
//
90
//      Group 1         Group 2        Group 3
91
// +---------------+---------------+---------------+
92
// | | | | | | | | | | | | | | | | | | | | | | | | |
93
// +---------------+---------------+---------------+
94
//
95
// Each control byte is either a special value for empty slots, deleted slots
96
// (sometimes called *tombstones*), and a special end-of-table marker used by
97
// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
98
// corresponding slot.
99
//
100
// Storing control bytes in a separate array also has beneficial cache effects,
101
// since more logical slots will fit into a cache line.
102
//
103
// # Small Object Optimization (SOO)
104
//
105
// When the size/alignment of the value_type and the capacity of the table are
106
// small, we enable small object optimization and store the values inline in
107
// the raw_hash_set object. This optimization allows us to avoid
108
// allocation/deallocation as well as cache/dTLB misses.
109
//
110
// # Hashing
111
//
112
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
113
// `H1(hash(x))` is an index into `slots`, and essentially the starting point
114
// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
115
// objects that cannot possibly be the one we are looking for.
116
//
117
// # Table operations.
118
//
119
// The key operations are `insert`, `find`, and `erase`.
120
//
121
// Since `insert` and `erase` are implemented in terms of `find`, we describe
122
// `find` first. To `find` a value `x`, we compute `hash(x)`. From
123
// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
124
// group of slots in some interesting order.
125
//
126
// We now walk through these indices. At each index, we select the entire group
127
// starting with that index and extract potential candidates: occupied slots
128
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
129
// group, we stop and return an error. Each candidate slot `y` is compared with
130
// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
131
// next probe index. Tombstones effectively behave like full slots that never
132
// match the value we're looking for.
133
//
134
// The `H2` bits ensure when we compare a slot to an object with `==`, we are
135
// likely to have actually found the object.  That is, the chance is low that
136
// `==` is called and returns `false`.  Thus, when we search for an object, we
137
// are unlikely to call `==` many times.  This likelyhood can be analyzed as
138
// follows (assuming that H2 is a random enough hash function).
139
//
140
// Let's assume that there are `k` "wrong" objects that must be examined in a
141
// probe sequence.  For example, when doing a `find` on an object that is in the
142
// table, `k` is the number of objects between the start of the probe sequence
143
// and the final found object (not including the final found object).  The
144
// expected number of objects with an H2 match is then `k/128`.  Measurements
145
// and analysis indicate that even at high load factors, `k` is less than 32,
146
// meaning that the number of "false positive" comparisons we must perform is
147
// less than 1/8 per `find`.
148
149
// `insert` is implemented in terms of `unchecked_insert`, which inserts a
150
// value presumed to not be in the table (violating this requirement will cause
151
// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
152
// it, we construct a `probe_seq` once again, and use it to find the first
153
// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
154
// first such slot in the group and mark it as full with `x`'s H2.
155
//
156
// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
157
// perform a `find` to see if it's already present; if it is, we're done. If
158
// it's not, we may decide the table is getting overcrowded (i.e. the load
159
// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
160
// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
161
// each element of the table into the new array (we know that no insertion here
162
// will insert an already-present value), and discard the old backing array. At
163
// this point, we may `unchecked_insert` the value `x`.
164
//
165
// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
166
// presents a viable, initialized slot pointee to the caller.
167
//
168
// `erase` is implemented in terms of `erase_at`, which takes an index to a
169
// slot. Given an offset, we simply create a tombstone and destroy its contents.
170
// If we can prove that the slot would not appear in a probe sequence, we can
171
// make the slot as empty, instead. We can prove this by observing that if a
172
// group has any empty slots, it has never been full (assuming we never create
173
// an empty slot in a group with no empties, which this heuristic guarantees we
174
// never do) and find would stop at this group anyways (since it does not probe
175
// beyond groups with empties).
176
//
177
// `erase` is `erase_at` composed with `find`: if we
178
// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
179
// slot.
180
//
181
// To iterate, we simply traverse the array, skipping empty and deleted slots
182
// and stopping when we hit a `kSentinel`.
183
184
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
185
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
186
187
#include <algorithm>
188
#include <cassert>
189
#include <cmath>
190
#include <cstddef>
191
#include <cstdint>
192
#include <cstring>
193
#include <functional>
194
#include <initializer_list>
195
#include <iterator>
196
#include <limits>
197
#include <memory>
198
#include <tuple>
199
#include <type_traits>
200
#include <utility>
201
202
#include "absl/base/attributes.h"
203
#include "absl/base/config.h"
204
#include "absl/base/internal/endian.h"
205
#include "absl/base/internal/raw_logging.h"
206
#include "absl/base/macros.h"
207
#include "absl/base/optimization.h"
208
#include "absl/base/options.h"
209
#include "absl/base/port.h"
210
#include "absl/base/prefetch.h"
211
#include "absl/container/internal/common.h"  // IWYU pragma: export // for node_handle
212
#include "absl/container/internal/compressed_tuple.h"
213
#include "absl/container/internal/container_memory.h"
214
#include "absl/container/internal/hash_function_defaults.h"
215
#include "absl/container/internal/hash_policy_traits.h"
216
#include "absl/container/internal/hashtable_debug_hooks.h"
217
#include "absl/container/internal/hashtablez_sampler.h"
218
#include "absl/hash/hash.h"
219
#include "absl/memory/memory.h"
220
#include "absl/meta/type_traits.h"
221
#include "absl/numeric/bits.h"
222
#include "absl/utility/utility.h"
223
224
#ifdef ABSL_INTERNAL_HAVE_SSE2
225
#include <emmintrin.h>
226
#endif
227
228
#ifdef ABSL_INTERNAL_HAVE_SSSE3
229
#include <tmmintrin.h>
230
#endif
231
232
#ifdef _MSC_VER
233
#include <intrin.h>
234
#endif
235
236
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
237
#include <arm_neon.h>
238
#endif
239
240
namespace absl {
241
ABSL_NAMESPACE_BEGIN
242
namespace container_internal {
243
244
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
245
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
246
#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) ||   \
247
       defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
248
       defined(ABSL_HAVE_MEMORY_SANITIZER)) &&   \
249
    !defined(NDEBUG_SANITIZER)  // If defined, performance is important.
250
// When compiled in sanitizer mode, we add generation integers to the backing
251
// array and iterators. In the backing array, we store the generation between
252
// the control bytes and the slots. When iterators are dereferenced, we assert
253
// that the container has not been mutated in a way that could cause iterator
254
// invalidation since the iterator was initialized.
255
#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
256
#endif
257
258
// We use uint8_t so we don't need to worry about padding.
259
using GenerationType = uint8_t;
260
261
// A sentinel value for empty generations. Using 0 makes it easy to constexpr
262
// initialize an array of this value.
263
8
constexpr GenerationType SentinelEmptyGeneration() { return 0; }
264
265
8
constexpr GenerationType NextGeneration(GenerationType generation) {
266
8
  return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
267
8
}
268
269
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
270
constexpr bool SwisstableGenerationsEnabled() { return true; }
271
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
272
#else
273
0
constexpr bool SwisstableGenerationsEnabled() { return false; }
274
14
constexpr size_t NumGenerationBytes() { return 0; }
275
#endif
276
277
template <typename AllocType>
278
void SwapAlloc(AllocType& lhs, AllocType& rhs,
279
               std::true_type /* propagate_on_container_swap */) {
280
  using std::swap;
281
  swap(lhs, rhs);
282
}
283
template <typename AllocType>
284
void SwapAlloc(AllocType& lhs, AllocType& rhs,
285
               std::false_type /* propagate_on_container_swap */) {
286
  (void)lhs;
287
  (void)rhs;
288
  assert(lhs == rhs &&
289
         "It's UB to call swap with unequal non-propagating allocators.");
290
}
291
292
template <typename AllocType>
293
void CopyAlloc(AllocType& lhs, AllocType& rhs,
294
               std::true_type /* propagate_alloc */) {
295
  lhs = rhs;
296
}
297
template <typename AllocType>
298
void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
299
300
// The state for a probe sequence.
301
//
302
// Currently, the sequence is a triangular progression of the form
303
//
304
//   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
305
//
306
// The use of `Width` ensures that each probe step does not overlap groups;
307
// the sequence effectively outputs the addresses of *groups* (although not
308
// necessarily aligned to any boundary). The `Group` machinery allows us
309
// to check an entire group with minimal branching.
310
//
311
// Wrapping around at `mask + 1` is important, but not for the obvious reason.
312
// As described above, the first few entries of the control byte array
313
// are mirrored at the end of the array, which `Group` will find and use
314
// for selecting candidates. However, when those candidates' slots are
315
// actually inspected, there are no corresponding slots for the cloned bytes,
316
// so we need to make sure we've treated those offsets as "wrapping around".
317
//
318
// It turns out that this probe sequence visits every group exactly once if the
319
// number of groups is a power of two, since (i^2+i)/2 is a bijection in
320
// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
321
template <size_t Width>
322
class probe_seq {
323
 public:
324
  // Creates a new probe sequence using `hash` as the initial value of the
325
  // sequence and `mask` (usually the capacity of the table) as the mask to
326
  // apply to each value in the progression.
327
40
  probe_seq(size_t hash, size_t mask) {
328
40
    assert(((mask + 1) & mask) == 0 && "not a mask");
329
40
    mask_ = mask;
330
40
    offset_ = hash & mask_;
331
40
  }
332
333
  // The offset within the table, i.e., the value `p(i)` above.
334
40
  size_t offset() const { return offset_; }
335
51
  size_t offset(size_t i) const { return (offset_ + i) & mask_; }
336
337
0
  void next() {
338
0
    index_ += Width;
339
0
    offset_ += index_;
340
0
    offset_ &= mask_;
341
0
  }
342
  // 0-based probe index, a multiple of `Width`.
343
16
  size_t index() const { return index_; }
344
345
 private:
346
  size_t mask_;
347
  size_t offset_;
348
  size_t index_ = 0;
349
};
350
351
template <class ContainerKey, class Hash, class Eq>
352
struct RequireUsableKey {
353
  template <class PassedKey, class... Args>
354
  std::pair<
355
      decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
356
      decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
357
                                         std::declval<const PassedKey&>()))>*
358
  operator()(const PassedKey&, const Args&...) const;
359
};
360
361
template <class E, class Policy, class Hash, class Eq, class... Ts>
362
struct IsDecomposable : std::false_type {};
363
364
template <class Policy, class Hash, class Eq, class... Ts>
365
struct IsDecomposable<
366
    absl::void_t<decltype(Policy::apply(
367
        RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
368
        std::declval<Ts>()...))>,
369
    Policy, Hash, Eq, Ts...> : std::true_type {};
370
371
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
372
template <class T>
373
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
374
  using std::swap;
375
  return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
376
}
377
template <class T>
378
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
379
  return false;
380
}
381
382
template <typename T>
383
35
uint32_t TrailingZeros(T x) {
384
35
  ABSL_ASSUME(x != 0);
385
35
  return static_cast<uint32_t>(countr_zero(x));
386
35
}
unsigned int absl::container_internal::TrailingZeros<unsigned short>(unsigned short)
Line
Count
Source
383
35
uint32_t TrailingZeros(T x) {
384
35
  ABSL_ASSUME(x != 0);
385
35
  return static_cast<uint32_t>(countr_zero(x));
386
35
}
Unexecuted instantiation: unsigned int absl::container_internal::TrailingZeros<unsigned int>(unsigned int)
Unexecuted instantiation: unsigned int absl::container_internal::TrailingZeros<unsigned long>(unsigned long)
387
388
// 8 bytes bitmask with most significant bit set for every byte.
389
constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
390
391
// An abstract bitmask, such as that emitted by a SIMD instruction.
392
//
393
// Specifically, this type implements a simple bitset whose representation is
394
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
395
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
396
// width of an abstract bit in the representation.
397
// This mask provides operations for any number of real bits set in an abstract
398
// bit. To add iteration on top of that, implementation must guarantee no more
399
// than the most significant real bit is set in a set abstract bit.
400
template <class T, int SignificantBits, int Shift = 0>
401
class NonIterableBitMask {
402
 public:
403
80
  explicit NonIterableBitMask(T mask) : mask_(mask) {}
404
405
16
  explicit operator bool() const { return this->mask_ != 0; }
406
407
  // Returns the index of the lowest *abstract* bit set in `self`.
408
35
  uint32_t LowestBitSet() const {
409
35
    return container_internal::TrailingZeros(mask_) >> Shift;
410
35
  }
absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::LowestBitSet() const
Line
Count
Source
408
35
  uint32_t LowestBitSet() const {
409
35
    return container_internal::TrailingZeros(mask_) >> Shift;
410
35
  }
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::LowestBitSet() const
411
412
  // Returns the index of the highest *abstract* bit set in `self`.
413
0
  uint32_t HighestBitSet() const {
414
0
    return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
415
0
  }
416
417
  // Returns the number of trailing zero *abstract* bits.
418
0
  uint32_t TrailingZeros() const {
419
0
    return container_internal::TrailingZeros(mask_) >> Shift;
420
0
  }
421
422
  // Returns the number of leading zero *abstract* bits.
423
0
  uint32_t LeadingZeros() const {
424
0
    constexpr int total_significant_bits = SignificantBits << Shift;
425
0
    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
426
0
    return static_cast<uint32_t>(
427
0
               countl_zero(static_cast<T>(mask_ << extra_bits))) >>
428
0
           Shift;
429
0
  }
430
431
  T mask_;
432
};
433
434
// Mask that can be iterable
435
//
436
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
437
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
438
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
439
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
440
// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
441
// non zero abstract bit is allowed to have additional bits
442
// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
443
//
444
// For example:
445
//   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
446
//   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
447
template <class T, int SignificantBits, int Shift = 0,
448
          bool NullifyBitsOnIteration = false>
449
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
450
  using Base = NonIterableBitMask<T, SignificantBits, Shift>;
451
  static_assert(std::is_unsigned<T>::value, "");
452
  static_assert(Shift == 0 || Shift == 3, "");
453
  static_assert(!NullifyBitsOnIteration || Shift == 3, "");
454
455
 public:
456
64
  explicit BitMask(T mask) : Base(mask) {
457
64
    if (Shift == 3 && !NullifyBitsOnIteration) {
458
0
      assert(this->mask_ == (this->mask_ & kMsbs8Bytes));
459
0
    }
460
64
  }
461
  // BitMask is an iterator over the indices of its abstract bits.
462
  using value_type = int;
463
  using iterator = BitMask;
464
  using const_iterator = BitMask;
465
466
3
  BitMask& operator++() {
467
3
    if (Shift == 3 && NullifyBitsOnIteration) {
468
0
      this->mask_ &= kMsbs8Bytes;
469
0
    }
470
3
    this->mask_ &= (this->mask_ - 1);
471
3
    return *this;
472
3
  }
absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator++()
Line
Count
Source
466
3
  BitMask& operator++() {
467
3
    if (Shift == 3 && NullifyBitsOnIteration) {
468
0
      this->mask_ &= kMsbs8Bytes;
469
0
    }
470
3
    this->mask_ &= (this->mask_ - 1);
471
3
    return *this;
472
3
  }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator++()
473
474
19
  uint32_t operator*() const { return Base::LowestBitSet(); }
absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator*() const
Line
Count
Source
474
19
  uint32_t operator*() const { return Base::LowestBitSet(); }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator*() const
475
476
32
  BitMask begin() const { return *this; }
absl::container_internal::BitMask<unsigned short, 16, 0, false>::begin() const
Line
Count
Source
476
32
  BitMask begin() const { return *this; }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::begin() const
477
32
  BitMask end() const { return BitMask(0); }
absl::container_internal::BitMask<unsigned short, 16, 0, false>::end() const
Line
Count
Source
477
32
  BitMask end() const { return BitMask(0); }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::end() const
478
479
 private:
480
  friend bool operator==(const BitMask& a, const BitMask& b) {
481
    return a.mask_ == b.mask_;
482
  }
483
35
  friend bool operator!=(const BitMask& a, const BitMask& b) {
484
35
    return a.mask_ != b.mask_;
485
35
  }
absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned short, 16, 0, false> const&, absl::container_internal::BitMask<unsigned short, 16, 0, false> const&)
Line
Count
Source
483
35
  friend bool operator!=(const BitMask& a, const BitMask& b) {
484
35
    return a.mask_ != b.mask_;
485
35
  }
Unexecuted instantiation: absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned long, 8, 3, false> const&, absl::container_internal::BitMask<unsigned long, 8, 3, false> const&)
486
};
487
488
using h2_t = uint8_t;
489
490
// The values here are selected for maximum performance. See the static asserts
491
// below for details.
492
493
// A `ctrl_t` is a single control byte, which can have one of four
494
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
495
// and the sentinel. They have the following bit patterns:
496
//
497
//      empty: 1 0 0 0 0 0 0 0
498
//    deleted: 1 1 1 1 1 1 1 0
499
//       full: 0 h h h h h h h  // h represents the hash bits.
500
//   sentinel: 1 1 1 1 1 1 1 1
501
//
502
// These values are specifically tuned for SSE-flavored SIMD.
503
// The static_asserts below detail the source of these choices.
504
//
505
// We use an enum class so that when strict aliasing is enabled, the compiler
506
// knows ctrl_t doesn't alias other types.
507
enum class ctrl_t : int8_t {
508
  kEmpty = -128,   // 0b10000000
509
  kDeleted = -2,   // 0b11111110
510
  kSentinel = -1,  // 0b11111111
511
};
512
static_assert(
513
    (static_cast<int8_t>(ctrl_t::kEmpty) &
514
     static_cast<int8_t>(ctrl_t::kDeleted) &
515
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
516
    "Special markers need to have the MSB to make checking for them efficient");
517
static_assert(
518
    ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
519
    "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
520
    "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
521
static_assert(
522
    ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
523
    "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
524
    "registers (pcmpeqd xmm, xmm)");
525
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
526
              "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
527
              "existence efficient (psignb xmm, xmm)");
528
static_assert(
529
    (~static_cast<int8_t>(ctrl_t::kEmpty) &
530
     ~static_cast<int8_t>(ctrl_t::kDeleted) &
531
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
532
    "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
533
    "shared by ctrl_t::kSentinel to make the scalar test for "
534
    "MaskEmptyOrDeleted() efficient");
535
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
536
              "ctrl_t::kDeleted must be -2 to make the implementation of "
537
              "ConvertSpecialToEmptyAndFullToDeleted efficient");
538
539
// See definition comment for why this is size 32.
540
ABSL_DLL extern const ctrl_t kEmptyGroup[32];
541
542
// We use these sentinel capacity values in debug mode to indicate different
543
// classes of bugs.
544
enum InvalidCapacity : size_t {
545
  kAboveMaxValidCapacity = ~size_t{} - 100,
546
  kReentrance,
547
  kDestroyed,
548
549
  // These two must be last because we use `>= kMovedFrom` to mean moved-from.
550
  kMovedFrom,
551
  kSelfMovedFrom,
552
};
553
554
// Returns a pointer to a control byte group that can be used by empty tables.
555
82
inline ctrl_t* EmptyGroup() {
556
  // Const must be cast away here; no uses of this function will actually write
557
  // to it because it is only used for empty tables.
558
82
  return const_cast<ctrl_t*>(kEmptyGroup + 16);
559
82
}
560
561
// For use in SOO iterators.
562
// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
563
// bit in iterators. This would add branches but reduce cache misses.
564
ABSL_DLL extern const ctrl_t kSooControl[17];
565
566
// Returns a pointer to a full byte followed by a sentinel byte.
567
32
inline ctrl_t* SooControl() {
568
  // Const must be cast away here; no uses of this function will actually write
569
  // to it because it is only used for SOO iterators.
570
32
  return const_cast<ctrl_t*>(kSooControl);
571
32
}
572
// Whether ctrl is from the SooControl array.
573
32
inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
574
575
// Returns a pointer to a generation to use for an empty hashtable.
576
GenerationType* EmptyGeneration();
577
578
// Returns whether `generation` is a generation for an empty hashtable that
579
// could be returned by EmptyGeneration().
580
0
inline bool IsEmptyGeneration(const GenerationType* generation) {
581
0
  return *generation == SentinelEmptyGeneration();
582
0
}
583
584
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
585
// randomize insertion order within groups.
586
bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
587
                                   const ctrl_t* ctrl);
588
589
ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
590
    ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
591
0
    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
592
#if defined(NDEBUG)
593
  return false;
594
#else
595
0
  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl);
596
0
#endif
597
0
}
598
599
// Returns insert position for the given mask.
600
// We want to add entropy even when ASLR is not enabled.
601
// In debug build we will randomly insert in either the front or back of
602
// the group.
603
// TODO(kfm,sbenza): revisit after we do unconditional mixing
604
template <class Mask>
605
ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
606
    Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
607
    ABSL_ATTRIBUTE_UNUSED size_t hash,
608
16
    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {
609
#if defined(NDEBUG)
610
  return mask.LowestBitSet();
611
#else
612
16
  return ShouldInsertBackwardsForDebug(capacity, hash, ctrl)
613
16
             ? mask.HighestBitSet()
614
16
             : mask.LowestBitSet();
615
16
#endif
616
16
}
617
618
// Returns a per-table, hash salt, which changes on resize. This gets mixed into
619
// H1 to randomize iteration order per-table.
620
//
621
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
622
// non-determinism of iteration order in most cases.
623
40
inline size_t PerTableSalt(const ctrl_t* ctrl) {
624
  // The low bits of the pointer have little or no entropy because of
625
  // alignment. We shift the pointer to try to use higher entropy bits. A
626
  // good number seems to be 12 bits, because that aligns with page size.
627
40
  return reinterpret_cast<uintptr_t>(ctrl) >> 12;
628
40
}
629
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
630
40
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
631
40
  return (hash >> 7) ^ PerTableSalt(ctrl);
632
40
}
633
634
// Extracts the H2 portion of a hash: the 7 bits not used for H1.
635
//
636
// These are used as an occupied control byte.
637
48
inline h2_t H2(size_t hash) { return hash & 0x7F; }
638
639
// Helpers for checking the state of a control byte.
640
40
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
641
114
inline bool IsFull(ctrl_t c) {
642
  // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
643
  // is not a value in the enum. Both ways are equivalent, but this way makes
644
  // linters happier.
645
114
  return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
646
114
}
647
0
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
648
0
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
649
650
#ifdef ABSL_INTERNAL_HAVE_SSE2
651
// Quick reference guide for intrinsics used below:
652
//
653
// * __m128i: An XMM (128-bit) word.
654
//
655
// * _mm_setzero_si128: Returns a zero vector.
656
// * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
657
//
658
// * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
659
// * _mm_and_si128:    Ands two i128s together.
660
// * _mm_or_si128:     Ors two i128s together.
661
// * _mm_andnot_si128: And-nots two i128s together.
662
//
663
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
664
//                   filling each lane with 0x00 or 0xff.
665
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
666
//
667
// * _mm_loadu_si128:  Performs an unaligned load of an i128.
668
// * _mm_storeu_si128: Performs an unaligned store of an i128.
669
//
670
// * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
671
//                      argument if the corresponding lane of the second
672
//                      argument is positive, negative, or zero, respectively.
673
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
674
//                      bitmask consisting of those bits.
675
// * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
676
//                      four bits of each i8 lane in the second argument as
677
//                      indices.
678
679
// https://github.com/abseil/abseil-cpp/issues/209
680
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
681
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
682
// Work around this by using the portable implementation of Group
683
// when using -funsigned-char under GCC.
684
0
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
685
#if defined(__GNUC__) && !defined(__clang__)
686
  if (std::is_unsigned<char>::value) {
687
    const __m128i mask = _mm_set1_epi8(0x80);
688
    const __m128i diff = _mm_subs_epi8(b, a);
689
    return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
690
  }
691
#endif
692
0
  return _mm_cmpgt_epi8(a, b);
693
0
}
694
695
struct GroupSse2Impl {
696
  static constexpr size_t kWidth = 16;  // the number of slots per group
697
698
32
  explicit GroupSse2Impl(const ctrl_t* pos) {
699
32
    ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
700
32
  }
701
702
  // Returns a bitmask representing the positions of slots that match hash.
703
32
  BitMask<uint16_t, kWidth> Match(h2_t hash) const {
704
32
    auto match = _mm_set1_epi8(static_cast<char>(hash));
705
32
    return BitMask<uint16_t, kWidth>(
706
32
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
707
32
  }
708
709
  // Returns a bitmask representing the positions of empty slots.
710
16
  NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
711
#ifdef ABSL_INTERNAL_HAVE_SSSE3
712
    // This only works because ctrl_t::kEmpty is -128.
713
    return NonIterableBitMask<uint16_t, kWidth>(
714
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
715
#else
716
16
    auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
717
16
    return NonIterableBitMask<uint16_t, kWidth>(
718
16
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
719
16
#endif
720
16
  }
721
722
  // Returns a bitmask representing the positions of full slots.
723
  // Note: for `is_small()` tables group may contain the "same" slot twice:
724
  // original and mirrored.
725
0
  BitMask<uint16_t, kWidth> MaskFull() const {
726
0
    return BitMask<uint16_t, kWidth>(
727
0
        static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
728
0
  }
729
730
  // Returns a bitmask representing the positions of non full slots.
731
  // Note: this includes: kEmpty, kDeleted, kSentinel.
732
  // It is useful in contexts when kSentinel is not present.
733
0
  auto MaskNonFull() const {
734
0
    return BitMask<uint16_t, kWidth>(
735
0
        static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
736
0
  }
737
738
  // Returns a bitmask representing the positions of empty or deleted slots.
739
0
  NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
740
0
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
741
0
    return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
742
0
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
743
0
  }
744
745
  // Returns the number of trailing empty or deleted elements in the group.
746
0
  uint32_t CountLeadingEmptyOrDeleted() const {
747
0
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
748
0
    return TrailingZeros(static_cast<uint32_t>(
749
0
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
750
0
  }
751
752
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
753
0
    auto msbs = _mm_set1_epi8(static_cast<char>(-128));
754
0
    auto x126 = _mm_set1_epi8(126);
755
#ifdef ABSL_INTERNAL_HAVE_SSSE3
756
    auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
757
#else
758
0
    auto zero = _mm_setzero_si128();
759
0
    auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
760
0
    auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
761
0
#endif
762
0
    _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
763
0
  }
764
765
  __m128i ctrl;
766
};
767
#endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
768
769
#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
770
struct GroupAArch64Impl {
771
  static constexpr size_t kWidth = 8;
772
773
  explicit GroupAArch64Impl(const ctrl_t* pos) {
774
    ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
775
  }
776
777
  auto Match(h2_t hash) const {
778
    uint8x8_t dup = vdup_n_u8(hash);
779
    auto mask = vceq_u8(ctrl, dup);
780
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
781
                   /*NullifyBitsOnIteration=*/true>(
782
        vget_lane_u64(vreinterpret_u64_u8(mask), 0));
783
  }
784
785
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
786
    uint64_t mask =
787
        vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
788
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
789
                          vreinterpret_s8_u8(ctrl))),
790
                      0);
791
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
792
  }
793
794
  // Returns a bitmask representing the positions of full slots.
795
  // Note: for `is_small()` tables group may contain the "same" slot twice:
796
  // original and mirrored.
797
  auto MaskFull() const {
798
    uint64_t mask = vget_lane_u64(
799
        vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
800
                                    vdup_n_s8(static_cast<int8_t>(0)))),
801
        0);
802
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
803
                   /*NullifyBitsOnIteration=*/true>(mask);
804
  }
805
806
  // Returns a bitmask representing the positions of non full slots.
807
  // Note: this includes: kEmpty, kDeleted, kSentinel.
808
  // It is useful in contexts when kSentinel is not present.
809
  auto MaskNonFull() const {
810
    uint64_t mask = vget_lane_u64(
811
        vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
812
                                    vdup_n_s8(static_cast<int8_t>(0)))),
813
        0);
814
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
815
                   /*NullifyBitsOnIteration=*/true>(mask);
816
  }
817
818
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
819
    uint64_t mask =
820
        vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
821
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
822
                          vreinterpret_s8_u8(ctrl))),
823
                      0);
824
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
825
  }
826
827
  uint32_t CountLeadingEmptyOrDeleted() const {
828
    uint64_t mask =
829
        vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
830
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
831
                          vreinterpret_s8_u8(ctrl))),
832
                      0);
833
    // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
834
    // produced bitfield. We then count number of trailing zeros.
835
    // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
836
    // so we should be fine.
837
    return static_cast<uint32_t>(countr_zero(mask)) >> 3;
838
  }
839
840
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
841
    uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
842
    constexpr uint64_t slsbs = 0x0202020202020202ULL;
843
    constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
844
    auto x = slsbs & (mask >> 6);
845
    auto res = (x + midbs) | kMsbs8Bytes;
846
    little_endian::Store64(dst, res);
847
  }
848
849
  uint8x8_t ctrl;
850
};
851
#endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
852
853
struct GroupPortableImpl {
854
  static constexpr size_t kWidth = 8;
855
856
  explicit GroupPortableImpl(const ctrl_t* pos)
857
0
      : ctrl(little_endian::Load64(pos)) {}
858
859
0
  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
860
0
    // For the technique, see:
861
0
    // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
862
0
    // (Determine if a word has a byte equal to n).
863
0
    //
864
0
    // Caveat: there are false positives but:
865
0
    // - they only occur if there is a real match
866
0
    // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
867
0
    // - they will be handled gracefully by subsequent checks in code
868
0
    //
869
0
    // Example:
870
0
    //   v = 0x1716151413121110
871
0
    //   hash = 0x12
872
0
    //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
873
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
874
0
    auto x = ctrl ^ (lsbs * hash);
875
0
    return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & kMsbs8Bytes);
876
0
  }
877
878
0
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
879
0
    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
880
0
                                                   kMsbs8Bytes);
881
0
  }
882
883
  // Returns a bitmask representing the positions of full slots.
884
  // Note: for `is_small()` tables group may contain the "same" slot twice:
885
  // original and mirrored.
886
0
  BitMask<uint64_t, kWidth, 3> MaskFull() const {
887
0
    return BitMask<uint64_t, kWidth, 3>((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
888
0
  }
889
890
  // Returns a bitmask representing the positions of non full slots.
891
  // Note: this includes: kEmpty, kDeleted, kSentinel.
892
  // It is useful in contexts when kSentinel is not present.
893
0
  auto MaskNonFull() const {
894
0
    return BitMask<uint64_t, kWidth, 3>(ctrl & kMsbs8Bytes);
895
0
  }
896
897
0
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
898
0
    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
899
0
                                                   kMsbs8Bytes);
900
0
  }
901
902
0
  uint32_t CountLeadingEmptyOrDeleted() const {
903
0
    // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
904
0
    // kDeleted. We lower all other bits and count number of trailing zeros.
905
0
    constexpr uint64_t bits = 0x0101010101010101ULL;
906
0
    return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
907
0
                                 3);
908
0
  }
909
910
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
911
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
912
0
    auto x = ctrl & kMsbs8Bytes;
913
0
    auto res = (~x + (x >> 7)) & ~lsbs;
914
0
    little_endian::Store64(dst, res);
915
0
  }
916
917
  uint64_t ctrl;
918
};
919
920
#ifdef ABSL_INTERNAL_HAVE_SSE2
921
using Group = GroupSse2Impl;
922
using GroupFullEmptyOrDeleted = GroupSse2Impl;
923
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
924
using Group = GroupAArch64Impl;
925
// For Aarch64, we use the portable implementation for counting and masking
926
// full, empty or deleted group elements. This is to avoid the latency of moving
927
// between data GPRs and Neon registers when it does not provide a benefit.
928
// Using Neon is profitable when we call Match(), but is not when we don't,
929
// which is the case when we do *EmptyOrDeleted and MaskFull operations.
930
// It is difficult to make a similar approach beneficial on other architectures
931
// such as x86 since they have much lower GPR <-> vector register transfer
932
// latency and 16-wide Groups.
933
using GroupFullEmptyOrDeleted = GroupPortableImpl;
934
#else
935
using Group = GroupPortableImpl;
936
using GroupFullEmptyOrDeleted = GroupPortableImpl;
937
#endif
938
939
// When there is an insertion with no reserved growth, we rehash with
940
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
941
// constant divided by capacity ensures that inserting N elements is still O(N)
942
// in the average case. Using the constant 16 means that we expect to rehash ~8
943
// times more often than when generations are disabled. We are adding expected
944
// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
945
// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
946
0
inline size_t RehashProbabilityConstant() { return 16; }
947
948
class CommonFieldsGenerationInfoEnabled {
949
  // A sentinel value for reserved_growth_ indicating that we just ran out of
950
  // reserved growth on the last insertion. When reserve is called and then
951
  // insertions take place, reserved_growth_'s state machine is N, ..., 1,
952
  // kReservedGrowthJustRanOut, 0.
953
  static constexpr size_t kReservedGrowthJustRanOut =
954
      (std::numeric_limits<size_t>::max)();
955
956
 public:
957
  CommonFieldsGenerationInfoEnabled() = default;
958
  CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
959
      : reserved_growth_(that.reserved_growth_),
960
        reservation_size_(that.reservation_size_),
961
0
        generation_(that.generation_) {
962
0
    that.reserved_growth_ = 0;
963
0
    that.reservation_size_ = 0;
964
0
    that.generation_ = EmptyGeneration();
965
0
  }
966
  CommonFieldsGenerationInfoEnabled& operator=(
967
      CommonFieldsGenerationInfoEnabled&&) = default;
968
969
  // Whether we should rehash on insert in order to detect bugs of using invalid
970
  // references. We rehash on the first insertion after reserved_growth_ reaches
971
  // 0 after a call to reserve. We also do a rehash with low probability
972
  // whenever reserved_growth_ is zero.
973
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
974
                                                 size_t capacity) const;
975
  // Similar to above, except that we don't depend on reserved_growth_.
976
  bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
977
                                               size_t capacity) const;
978
0
  void maybe_increment_generation_on_insert() {
979
0
    if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
980
0
981
0
    if (reserved_growth_ > 0) {
982
0
      if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
983
0
    } else {
984
0
      increment_generation();
985
0
    }
986
0
  }
987
0
  void increment_generation() { *generation_ = NextGeneration(*generation_); }
988
0
  void reset_reserved_growth(size_t reservation, size_t size) {
989
0
    reserved_growth_ = reservation - size;
990
0
  }
991
0
  size_t reserved_growth() const { return reserved_growth_; }
992
0
  void set_reserved_growth(size_t r) { reserved_growth_ = r; }
993
0
  size_t reservation_size() const { return reservation_size_; }
994
0
  void set_reservation_size(size_t r) { reservation_size_ = r; }
995
0
  GenerationType generation() const { return *generation_; }
996
0
  void set_generation(GenerationType g) { *generation_ = g; }
997
0
  GenerationType* generation_ptr() const { return generation_; }
998
0
  void set_generation_ptr(GenerationType* g) { generation_ = g; }
999
1000
 private:
1001
  // The number of insertions remaining that are guaranteed to not rehash due to
1002
  // a prior call to reserve. Note: we store reserved growth in addition to
1003
  // reservation size because calls to erase() decrease size_ but don't decrease
1004
  // reserved growth.
1005
  size_t reserved_growth_ = 0;
1006
  // The maximum argument to reserve() since the container was cleared. We need
1007
  // to keep track of this, in addition to reserved growth, because we reset
1008
  // reserved growth to this when erase(begin(), end()) is called.
1009
  size_t reservation_size_ = 0;
1010
  // Pointer to the generation counter, which is used to validate iterators and
1011
  // is stored in the backing array between the control bytes and the slots.
1012
  // Note that we can't store the generation inside the container itself and
1013
  // keep a pointer to the container in the iterators because iterators must
1014
  // remain valid when the container is moved.
1015
  // Note: we could derive this pointer from the control pointer, but it makes
1016
  // the code more complicated, and there's a benefit in having the sizes of
1017
  // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
1018
  // which is that tests are less likely to rely on the size remaining the same.
1019
  GenerationType* generation_ = EmptyGeneration();
1020
};
1021
1022
class CommonFieldsGenerationInfoDisabled {
1023
 public:
1024
  CommonFieldsGenerationInfoDisabled() = default;
1025
  CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
1026
      default;
1027
  CommonFieldsGenerationInfoDisabled& operator=(
1028
      CommonFieldsGenerationInfoDisabled&&) = default;
1029
1030
0
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
1031
0
    return false;
1032
0
  }
1033
0
  bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
1034
0
    return false;
1035
0
  }
1036
16
  void maybe_increment_generation_on_insert() {}
1037
0
  void increment_generation() {}
1038
0
  void reset_reserved_growth(size_t, size_t) {}
1039
0
  size_t reserved_growth() const { return 0; }
1040
0
  void set_reserved_growth(size_t) {}
1041
0
  size_t reservation_size() const { return 0; }
1042
0
  void set_reservation_size(size_t) {}
1043
8
  GenerationType generation() const { return 0; }
1044
8
  void set_generation(GenerationType) {}
1045
32
  GenerationType* generation_ptr() const { return nullptr; }
1046
8
  void set_generation_ptr(GenerationType*) {}
1047
};
1048
1049
class HashSetIteratorGenerationInfoEnabled {
1050
 public:
1051
  HashSetIteratorGenerationInfoEnabled() = default;
1052
  explicit HashSetIteratorGenerationInfoEnabled(
1053
      const GenerationType* generation_ptr)
1054
0
      : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
1055
1056
0
  GenerationType generation() const { return generation_; }
1057
0
  void reset_generation() { generation_ = *generation_ptr_; }
1058
0
  const GenerationType* generation_ptr() const { return generation_ptr_; }
1059
0
  void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
1060
1061
 private:
1062
  const GenerationType* generation_ptr_ = EmptyGeneration();
1063
  GenerationType generation_ = *generation_ptr_;
1064
};
1065
1066
class HashSetIteratorGenerationInfoDisabled {
1067
 public:
1068
  HashSetIteratorGenerationInfoDisabled() = default;
1069
32
  explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
1070
1071
48
  GenerationType generation() const { return 0; }
1072
0
  void reset_generation() {}
1073
80
  const GenerationType* generation_ptr() const { return nullptr; }
1074
0
  void set_generation_ptr(const GenerationType*) {}
1075
};
1076
1077
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
1078
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
1079
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
1080
#else
1081
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
1082
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
1083
#endif
1084
1085
// Stored the information regarding number of slots we can still fill
1086
// without needing to rehash.
1087
//
1088
// We want to ensure sufficient number of empty slots in the table in order
1089
// to keep probe sequences relatively short. Empty slot in the probe group
1090
// is required to stop probing.
1091
//
1092
// Tombstones (kDeleted slots) are not included in the growth capacity,
1093
// because we'd like to rehash when the table is filled with tombstones and/or
1094
// full slots.
1095
//
1096
// GrowthInfo also stores a bit that encodes whether table may have any
1097
// deleted slots.
1098
// Most of the tables (>95%) have no deleted slots, so some functions can
1099
// be more efficient with this information.
1100
//
1101
// Callers can also force a rehash via the standard `rehash(0)`,
1102
// which will recompute this value as a side-effect.
1103
//
1104
// See also `CapacityToGrowth()`.
1105
class GrowthInfo {
1106
 public:
1107
  // Leaves data member uninitialized.
1108
  GrowthInfo() = default;
1109
1110
  // Initializes the GrowthInfo assuming we can grow `growth_left` elements
1111
  // and there are no kDeleted slots in the table.
1112
8
  void InitGrowthLeftNoDeleted(size_t growth_left) {
1113
8
    growth_left_info_ = growth_left;
1114
8
  }
1115
1116
  // Overwrites single full slot with an empty slot.
1117
0
  void OverwriteFullAsEmpty() { ++growth_left_info_; }
1118
1119
  // Overwrites single empty slot with a full slot.
1120
0
  void OverwriteEmptyAsFull() {
1121
0
    assert(GetGrowthLeft() > 0);
1122
0
    --growth_left_info_;
1123
0
  }
1124
1125
  // Overwrites several empty slots with full slots.
1126
0
  void OverwriteManyEmptyAsFull(size_t cnt) {
1127
0
    assert(GetGrowthLeft() >= cnt);
1128
0
    growth_left_info_ -= cnt;
1129
0
  }
1130
1131
  // Overwrites specified control element with full slot.
1132
16
  void OverwriteControlAsFull(ctrl_t ctrl) {
1133
16
    assert(GetGrowthLeft() >= static_cast<size_t>(IsEmpty(ctrl)));
1134
16
    growth_left_info_ -= static_cast<size_t>(IsEmpty(ctrl));
1135
16
  }
1136
1137
  // Overwrites single full slot with a deleted slot.
1138
0
  void OverwriteFullAsDeleted() { growth_left_info_ |= kDeletedBit; }
1139
1140
  // Returns true if table satisfies two properties:
1141
  // 1. Guaranteed to have no kDeleted slots.
1142
  // 2. There is a place for at least one element to grow.
1143
16
  bool HasNoDeletedAndGrowthLeft() const {
1144
16
    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) > 0;
1145
16
  }
1146
1147
  // Returns true if the table satisfies two properties:
1148
  // 1. Guaranteed to have no kDeleted slots.
1149
  // 2. There is no growth left.
1150
8
  bool HasNoGrowthLeftAndNoDeleted() const { return growth_left_info_ == 0; }
1151
1152
  // Returns true if table guaranteed to have no k
1153
0
  bool HasNoDeleted() const {
1154
0
    return static_cast<std::make_signed_t<size_t>>(growth_left_info_) >= 0;
1155
0
  }
1156
1157
  // Returns the number of elements left to grow.
1158
16
  size_t GetGrowthLeft() const { return growth_left_info_ & kGrowthLeftMask; }
1159
1160
 private:
1161
  static constexpr size_t kGrowthLeftMask = ((~size_t{}) >> 1);
1162
  static constexpr size_t kDeletedBit = ~kGrowthLeftMask;
1163
  // Topmost bit signal whenever there are deleted slots.
1164
  size_t growth_left_info_;
1165
};
1166
1167
static_assert(sizeof(GrowthInfo) == sizeof(size_t), "");
1168
static_assert(alignof(GrowthInfo) == alignof(size_t), "");
1169
1170
// Returns whether `n` is a valid capacity (i.e., number of slots).
1171
//
1172
// A valid capacity is a non-zero integer `2^m - 1`.
1173
78
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
1174
1175
// Returns the number of "cloned control bytes".
1176
//
1177
// This is the number of control bytes that are present both at the beginning
1178
// of the control byte array and at the end, such that we can create a
1179
// `Group::kWidth`-width probe window starting from any control byte.
1180
54
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
1181
1182
// Returns the number of control bytes including cloned.
1183
20
constexpr size_t NumControlBytes(size_t capacity) {
1184
20
  return capacity + 1 + NumClonedBytes();
1185
20
}
1186
1187
// Computes the offset from the start of the backing allocation of control.
1188
// infoz and growth_info are stored at the beginning of the backing array.
1189
0
inline static size_t ControlOffset(bool has_infoz) {
1190
0
  return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(GrowthInfo);
1191
0
}
Unexecuted instantiation: reflection.cc:absl::container_internal::ControlOffset(bool)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::ControlOffset(bool)
1192
1193
// Helper class for computing offsets and allocation size of hash set fields.
1194
class RawHashSetLayout {
1195
 public:
1196
  explicit RawHashSetLayout(size_t capacity, size_t slot_align, bool has_infoz)
1197
      : capacity_(capacity),
1198
        control_offset_(ControlOffset(has_infoz)),
1199
        generation_offset_(control_offset_ + NumControlBytes(capacity)),
1200
        slot_offset_(
1201
            (generation_offset_ + NumGenerationBytes() + slot_align - 1) &
1202
14
            (~slot_align + 1)) {
1203
14
    assert(IsValidCapacity(capacity));
1204
14
  }
1205
1206
  // Returns the capacity of a table.
1207
8
  size_t capacity() const { return capacity_; }
1208
1209
  // Returns precomputed offset from the start of the backing allocation of
1210
  // control.
1211
14
  size_t control_offset() const { return control_offset_; }
1212
1213
  // Given the capacity of a table, computes the offset (from the start of the
1214
  // backing allocation) of the generation counter (if it exists).
1215
8
  size_t generation_offset() const { return generation_offset_; }
1216
1217
  // Given the capacity of a table, computes the offset (from the start of the
1218
  // backing allocation) at which the slots begin.
1219
8
  size_t slot_offset() const { return slot_offset_; }
1220
1221
  // Given the capacity of a table, computes the total size of the backing
1222
  // array.
1223
14
  size_t alloc_size(size_t slot_size) const {
1224
14
    return slot_offset_ + capacity_ * slot_size;
1225
14
  }
1226
1227
 private:
1228
  size_t capacity_;
1229
  size_t control_offset_;
1230
  size_t generation_offset_;
1231
  size_t slot_offset_;
1232
};
1233
1234
struct HashtableFreeFunctionsAccess;
1235
1236
// We only allow a maximum of 1 SOO element, which makes the implementation
1237
// much simpler. Complications with multiple SOO elements include:
1238
// - Satisfying the guarantee that erasing one element doesn't invalidate
1239
//   iterators to other elements means we would probably need actual SOO
1240
//   control bytes.
1241
// - In order to prevent user code from depending on iteration order for small
1242
//   tables, we would need to randomize the iteration order somehow.
1243
0
constexpr size_t SooCapacity() { return 1; }
1244
// Sentinel type to indicate SOO CommonFields construction.
1245
struct soo_tag_t {};
1246
// Sentinel type to indicate SOO CommonFields construction with full size.
1247
struct full_soo_tag_t {};
1248
// Sentinel type to indicate non-SOO CommonFields construction.
1249
struct non_soo_tag_t {};
1250
// Sentinel value to indicate non-SOO construction for moved-from values.
1251
struct moved_from_non_soo_tag_t {};
1252
// Sentinel value to indicate an uninitialized CommonFields for use in swapping.
1253
struct uninitialized_tag_t {};
1254
1255
// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
1256
// thinks that the call to slot_array() in find_or_prepare_insert() is reading
1257
// uninitialized memory, but slot_array is only called there when the table is
1258
// non-empty and this memory is initialized when the table is non-empty.
1259
#if !defined(__clang__) && defined(__GNUC__)
1260
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x)                    \
1261
  _Pragma("GCC diagnostic push")                                   \
1262
      _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")  \
1263
          _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \
1264
  _Pragma("GCC diagnostic pop")
1265
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \
1266
  ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x)
1267
#else
1268
0
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x
1269
592
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x
1270
#endif
1271
1272
// This allows us to work around an uninitialized memory warning when
1273
// constructing begin() iterators in empty hashtables.
1274
union MaybeInitializedPtr {
1275
133
  void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); }
1276
8
  void set(void* ptr) { p = ptr; }
1277
1278
  void* p;
1279
};
1280
1281
struct HeapPtrs {
1282
  HeapPtrs() = default;
1283
2
  explicit HeapPtrs(ctrl_t* c) : control(c) {}
1284
1285
  // The control bytes (and, also, a pointer near to the base of the backing
1286
  // array).
1287
  //
1288
  // This contains `capacity + 1 + NumClonedBytes()` entries, even
1289
  // when the table is empty (hence EmptyGroup).
1290
  //
1291
  // Note that growth_info is stored immediately before this pointer.
1292
  // May be uninitialized for SOO tables.
1293
  ctrl_t* control;
1294
1295
  // The beginning of the slots, located at `SlotOffset()` bytes after
1296
  // `control`. May be uninitialized for empty tables.
1297
  // Note: we can't use `slots` because Qt defines "slots" as a macro.
1298
  MaybeInitializedPtr slot_array;
1299
};
1300
1301
// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
1302
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
1303
union HeapOrSoo {
1304
  HeapOrSoo() = default;
1305
2
  explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
1306
1307
8
  ctrl_t*& control() {
1308
8
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
1309
8
  }
1310
310
  ctrl_t* control() const {
1311
310
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control);
1312
310
  }
1313
8
  MaybeInitializedPtr& slot_array() {
1314
8
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
1315
8
  }
1316
133
  MaybeInitializedPtr slot_array() const {
1317
133
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array);
1318
133
  }
1319
0
  void* get_soo_data() {
1320
0
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
1321
0
  }
1322
0
  const void* get_soo_data() const {
1323
0
    ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(soo_data);
1324
0
  }
1325
1326
  HeapPtrs heap;
1327
  unsigned char soo_data[sizeof(HeapPtrs)];
1328
};
1329
1330
// CommonFields hold the fields in raw_hash_set that do not depend
1331
// on template parameters. This allows us to conveniently pass all
1332
// of this state to helper functions as a single argument.
1333
class CommonFields : public CommonFieldsGenerationInfo {
1334
 public:
1335
0
  explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
1336
  explicit CommonFields(full_soo_tag_t)
1337
0
      : capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
1338
  explicit CommonFields(non_soo_tag_t)
1339
2
      : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
1340
  // For non-SOO moved-from values, we only need to initialize capacity_.
1341
0
  explicit CommonFields(moved_from_non_soo_tag_t) : capacity_(0) {}
1342
  // For use in swapping.
1343
0
  explicit CommonFields(uninitialized_tag_t) {}
1344
1345
  // Not copyable
1346
  CommonFields(const CommonFields&) = delete;
1347
  CommonFields& operator=(const CommonFields&) = delete;
1348
1349
  // Movable
1350
  CommonFields(CommonFields&& that) = default;
1351
  CommonFields& operator=(CommonFields&&) = default;
1352
1353
  template <bool kSooEnabled>
1354
2
  static CommonFields CreateDefault() {
1355
2
    return kSooEnabled ? CommonFields{soo_tag_t{}}
1356
2
                       : CommonFields{non_soo_tag_t{}};
1357
2
  }
1358
  template <bool kSooEnabled>
1359
0
  static CommonFields CreateMovedFrom() {
1360
0
    return CreateDefault<kSooEnabled>();
1361
0
  }
1362
1363
  // The inline data for SOO is written on top of control_/slots_.
1364
0
  const void* soo_data() const { return heap_or_soo_.get_soo_data(); }
1365
0
  void* soo_data() { return heap_or_soo_.get_soo_data(); }
1366
1367
8
  HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
1368
0
  const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
1369
1370
298
  ctrl_t* control() const { return heap_or_soo_.control(); }
1371
8
  void set_control(ctrl_t* c) { heap_or_soo_.control() = c; }
1372
0
  void* backing_array_start() const {
1373
    // growth_info (and maybe infoz) is stored before control bytes.
1374
0
    assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
1375
0
    return control() - ControlOffset(has_infoz());
1376
0
  }
1377
1378
  // Note: we can't use slots() because Qt defines "slots" as a macro.
1379
109
  void* slot_array() const { return heap_or_soo_.slot_array().get(); }
1380
0
  MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); }
1381
8
  void set_slots(void* s) { heap_or_soo_.slot_array().set(s); }
1382
1383
  // The number of filled slots.
1384
32
  size_t size() const { return size_ >> HasInfozShift(); }
1385
0
  void set_size(size_t s) {
1386
0
    size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
1387
0
  }
1388
0
  void set_empty_soo() {
1389
0
    AssertInSooMode();
1390
0
    size_ = 0;
1391
0
  }
1392
0
  void set_full_soo() {
1393
0
    AssertInSooMode();
1394
0
    size_ = size_t{1} << HasInfozShift();
1395
0
  }
1396
16
  void increment_size() {
1397
16
    assert(size() < capacity());
1398
16
    size_ += size_t{1} << HasInfozShift();
1399
16
  }
1400
0
  void decrement_size() {
1401
0
    assert(size() > 0);
1402
0
    size_ -= size_t{1} << HasInfozShift();
1403
0
  }
1404
1405
  // The total number of available slots.
1406
565
  size_t capacity() const { return capacity_; }
1407
40
  void set_capacity(size_t c) {
1408
    // We allow setting above the max valid capacity for debugging purposes.
1409
40
    assert(c == 0 || IsValidCapacity(c) || c > kAboveMaxValidCapacity);
1410
40
    capacity_ = c;
1411
40
  }
1412
1413
  // The number of slots we can still fill without needing to rehash.
1414
  // This is stored in the heap allocation before the control bytes.
1415
  // TODO(b/289225379): experiment with moving growth_info back inline to
1416
  // increase room for SOO.
1417
0
  size_t growth_left() const { return growth_info().GetGrowthLeft(); }
1418
1419
48
  GrowthInfo& growth_info() {
1420
48
    auto* gl_ptr = reinterpret_cast<GrowthInfo*>(control()) - 1;
1421
48
    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(GrowthInfo) == 0);
1422
48
    return *gl_ptr;
1423
48
  }
1424
0
  GrowthInfo growth_info() const {
1425
0
    return const_cast<CommonFields*>(this)->growth_info();
1426
0
  }
1427
1428
30
  bool has_infoz() const {
1429
30
    return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
1430
30
  }
1431
8
  void set_has_infoz(bool has_infoz) {
1432
8
    size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
1433
8
  }
1434
1435
22
  HashtablezInfoHandle infoz() {
1436
22
    return has_infoz()
1437
22
               ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
1438
22
               : HashtablezInfoHandle();
1439
22
  }
1440
0
  void set_infoz(HashtablezInfoHandle infoz) {
1441
0
    assert(has_infoz());
1442
0
    *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
1443
0
  }
1444
1445
0
  bool should_rehash_for_bug_detection_on_insert() const {
1446
0
    return CommonFieldsGenerationInfo::
1447
0
        should_rehash_for_bug_detection_on_insert(control(), capacity());
1448
0
  }
1449
0
  bool should_rehash_for_bug_detection_on_move() const {
1450
0
    return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move(
1451
0
        control(), capacity());
1452
0
  }
1453
0
  void reset_reserved_growth(size_t reservation) {
1454
0
    CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1455
0
  }
1456
1457
  // The size of the backing array allocation.
1458
0
  size_t alloc_size(size_t slot_size, size_t slot_align) const {
1459
0
    return RawHashSetLayout(capacity(), slot_align, has_infoz())
1460
0
        .alloc_size(slot_size);
1461
0
  }
1462
1463
  // Initialize fields that are left uninitialized by moved-from constructor.
1464
0
  void reinitialize_moved_from_non_soo() {
1465
0
    size_ = 0;
1466
0
    heap_or_soo_ = HeapOrSoo(EmptyGroup());
1467
0
  }
1468
1469
  // Move fields other than heap_or_soo_.
1470
0
  void move_non_heap_or_soo_fields(CommonFields& that) {
1471
0
    static_cast<CommonFieldsGenerationInfo&>(*this) =
1472
0
        std::move(static_cast<CommonFieldsGenerationInfo&>(that));
1473
0
    capacity_ = that.capacity_;
1474
0
    size_ = that.size_;
1475
0
  }
1476
1477
  // Returns the number of control bytes set to kDeleted. For testing only.
1478
0
  size_t TombstonesCount() const {
1479
0
    return static_cast<size_t>(
1480
0
        std::count(control(), control() + capacity(), ctrl_t::kDeleted));
1481
0
  }
1482
1483
  // Helper to enable sanitizer mode validation to protect against reentrant
1484
  // calls during element constructor/destructor.
1485
  template <typename F>
1486
16
  void RunWithReentrancyGuard(F f) {
1487
#ifdef NDEBUG
1488
    f();
1489
    return;
1490
#endif
1491
16
    const size_t cap = capacity();
1492
16
    set_capacity(InvalidCapacity::kReentrance);
1493
16
    f();
1494
16
    set_capacity(cap);
1495
16
  }
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1})
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::transfer(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::transfer(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)::{lambda()#1})
void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1})
Line
Count
Source
1486
16
  void RunWithReentrancyGuard(F f) {
1487
#ifdef NDEBUG
1488
    f();
1489
    return;
1490
#endif
1491
16
    const size_t cap = capacity();
1492
16
    set_capacity(InvalidCapacity::kReentrance);
1493
16
    f();
1494
16
    set_capacity(cap);
1495
16
  }
Unexecuted instantiation: void absl::container_internal::CommonFields::RunWithReentrancyGuard<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}>(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1})
1496
1497
 private:
1498
  // We store the has_infoz bit in the lowest bit of size_.
1499
86
  static constexpr size_t HasInfozShift() { return 1; }
1500
30
  static constexpr size_t HasInfozMask() {
1501
30
    return (size_t{1} << HasInfozShift()) - 1;
1502
30
  }
1503
1504
  // We can't assert that SOO is enabled because we don't have SooEnabled(), but
1505
  // we assert what we can.
1506
0
  void AssertInSooMode() const {
1507
0
    assert(capacity() == SooCapacity());
1508
0
    assert(!has_infoz());
1509
0
  }
1510
1511
  // The number of slots in the backing array. This is always 2^N-1 for an
1512
  // integer N. NOTE: we tried experimenting with compressing the capacity and
1513
  // storing it together with size_: (a) using 6 bits to store the corresponding
1514
  // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
1515
  // size_ and storing size in the low bits. Both of these experiments were
1516
  // regressions, presumably because we need capacity to do find operations.
1517
  size_t capacity_;
1518
1519
  // The size and also has one bit that stores whether we have infoz.
1520
  // TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
1521
  // encode the size in SOO case. We would be making size()/capacity() more
1522
  // expensive in order to have more SOO space.
1523
  size_t size_;
1524
1525
  // Either the control/slots pointers or the SOO slot.
1526
  HeapOrSoo heap_or_soo_;
1527
};
1528
1529
template <class Policy, class Hash, class Eq, class Alloc>
1530
class raw_hash_set;
1531
1532
// Returns the next valid capacity after `n`.
1533
8
inline size_t NextCapacity(size_t n) {
1534
8
  assert(IsValidCapacity(n) || n == 0);
1535
8
  return n * 2 + 1;
1536
8
}
1537
1538
// Applies the following mapping to every byte in the control array:
1539
//   * kDeleted -> kEmpty
1540
//   * kEmpty -> kEmpty
1541
//   * _ -> kDeleted
1542
// PRECONDITION:
1543
//   IsValidCapacity(capacity)
1544
//   ctrl[capacity] == ctrl_t::kSentinel
1545
//   ctrl[i] != ctrl_t::kSentinel for all i < capacity
1546
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
1547
1548
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
1549
0
inline size_t NormalizeCapacity(size_t n) {
1550
0
  return n ? ~size_t{} >> countl_zero(n) : 1;
1551
0
}
1552
1553
// General notes on capacity/growth methods below:
1554
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
1555
//   average of two empty slots per group.
1556
// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
1557
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
1558
//   never need to probe (the whole table fits in one group) so we don't need a
1559
//   load factor less than 1.
1560
1561
// Given `capacity`, applies the load factor; i.e., it returns the maximum
1562
// number of values we should put into the table before a resizing rehash.
1563
8
inline size_t CapacityToGrowth(size_t capacity) {
1564
8
  assert(IsValidCapacity(capacity));
1565
  // `capacity*7/8`
1566
8
  if (Group::kWidth == 8 && capacity == 7) {
1567
    // x-x/8 does not work when x==7.
1568
0
    return 6;
1569
0
  }
1570
8
  return capacity - capacity / 8;
1571
8
}
1572
1573
// Given `growth`, "unapplies" the load factor to find how large the capacity
1574
// should be to stay within the load factor.
1575
//
1576
// This might not be a valid capacity and `NormalizeCapacity()` should be
1577
// called on this.
1578
0
inline size_t GrowthToLowerboundCapacity(size_t growth) {
1579
0
  // `growth*8/7`
1580
0
  if (Group::kWidth == 8 && growth == 7) {
1581
0
    // x+(x-1)/7 does not work when x==7.
1582
0
    return 8;
1583
0
  }
1584
0
  return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
1585
0
}
1586
1587
template <class InputIter>
1588
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1589
                                     size_t bucket_count) {
1590
  if (bucket_count != 0) {
1591
    return bucket_count;
1592
  }
1593
  using InputIterCategory =
1594
      typename std::iterator_traits<InputIter>::iterator_category;
1595
  if (std::is_base_of<std::random_access_iterator_tag,
1596
                      InputIterCategory>::value) {
1597
    return GrowthToLowerboundCapacity(
1598
        static_cast<size_t>(std::distance(first, last)));
1599
  }
1600
  return 0;
1601
}
1602
1603
0
constexpr bool SwisstableDebugEnabled() {
1604
0
#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
1605
0
    ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
1606
0
  return true;
1607
0
#else
1608
0
  return false;
1609
0
#endif
1610
0
}
1611
1612
inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1613
                         const GenerationType* generation_ptr,
1614
16
                         const char* operation) {
1615
16
  if (!SwisstableDebugEnabled()) return;
1616
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1617
  // enabled. To minimize their impact in those builds:
1618
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1619
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1620
  //   the chances that the hot paths will be inlined.
1621
16
  if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
1622
0
    ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
1623
0
  }
1624
16
  if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
1625
0
    ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
1626
0
                 operation);
1627
0
  }
1628
16
  if (SwisstableGenerationsEnabled()) {
1629
0
    if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1630
0
      ABSL_RAW_LOG(FATAL,
1631
0
                   "%s called on invalid iterator. The table could have "
1632
0
                   "rehashed or moved since this iterator was initialized.",
1633
0
                   operation);
1634
0
    }
1635
0
    if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1636
0
      ABSL_RAW_LOG(
1637
0
          FATAL,
1638
0
          "%s called on invalid iterator. The element was likely erased.",
1639
0
          operation);
1640
0
    }
1641
16
  } else {
1642
16
    if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1643
0
      ABSL_RAW_LOG(
1644
0
          FATAL,
1645
0
          "%s called on invalid iterator. The element might have been erased "
1646
0
          "or the table might have rehashed. Consider running with "
1647
0
          "--config=asan to diagnose rehashing issues.",
1648
0
          operation);
1649
0
    }
1650
16
  }
1651
16
}
1652
1653
// Note that for comparisons, null/end iterators are valid.
1654
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1655
                                       GenerationType generation,
1656
32
                                       const GenerationType* generation_ptr) {
1657
32
  if (!SwisstableDebugEnabled()) return;
1658
32
  const bool ctrl_is_valid_for_comparison =
1659
32
      ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
1660
32
  if (SwisstableGenerationsEnabled()) {
1661
0
    if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1662
0
      ABSL_RAW_LOG(FATAL,
1663
0
                   "Invalid iterator comparison. The table could have rehashed "
1664
0
                   "or moved since this iterator was initialized.");
1665
0
    }
1666
0
    if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
1667
0
      ABSL_RAW_LOG(
1668
0
          FATAL, "Invalid iterator comparison. The element was likely erased.");
1669
0
    }
1670
32
  } else {
1671
32
    ABSL_HARDENING_ASSERT(
1672
32
        ctrl_is_valid_for_comparison &&
1673
32
        "Invalid iterator comparison. The element might have been erased or "
1674
32
        "the table might have rehashed. Consider running with --config=asan to "
1675
32
        "diagnose rehashing issues.");
1676
32
  }
1677
32
}
1678
1679
// If the two iterators come from the same container, then their pointers will
1680
// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1681
// Note: we take slots by reference so that it's not UB if they're uninitialized
1682
// as long as we don't read them (when ctrl is null).
1683
inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1684
                                      const ctrl_t* ctrl_b,
1685
                                      const void* const& slot_a,
1686
16
                                      const void* const& slot_b) {
1687
  // If either control byte is null, then we can't tell.
1688
16
  if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1689
16
  const bool a_is_soo = IsSooControl(ctrl_a);
1690
16
  if (a_is_soo != IsSooControl(ctrl_b)) return false;
1691
16
  if (a_is_soo) return slot_a == slot_b;
1692
1693
16
  const void* low_slot = slot_a;
1694
16
  const void* hi_slot = slot_b;
1695
16
  if (ctrl_a > ctrl_b) {
1696
0
    std::swap(ctrl_a, ctrl_b);
1697
0
    std::swap(low_slot, hi_slot);
1698
0
  }
1699
16
  return ctrl_b < low_slot && low_slot <= hi_slot;
1700
16
}
1701
1702
// Asserts that two iterators come from the same container.
1703
// Note: we take slots by reference so that it's not UB if they're uninitialized
1704
// as long as we don't read them (when ctrl is null).
1705
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1706
                                const void* const& slot_a,
1707
                                const void* const& slot_b,
1708
                                const GenerationType* generation_ptr_a,
1709
16
                                const GenerationType* generation_ptr_b) {
1710
16
  if (!SwisstableDebugEnabled()) return;
1711
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1712
  // enabled. To minimize their impact in those builds:
1713
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1714
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1715
  //   the chances that the hot paths will be inlined.
1716
1717
  // fail_if(is_invalid, message) crashes when is_invalid is true and provides
1718
  // an error message based on `message`.
1719
16
  const auto fail_if = [](bool is_invalid, const char* message) {
1720
16
    if (ABSL_PREDICT_FALSE(is_invalid)) {
1721
0
      ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
1722
0
    }
1723
16
  };
1724
1725
16
  const bool a_is_default = ctrl_a == EmptyGroup();
1726
16
  const bool b_is_default = ctrl_b == EmptyGroup();
1727
16
  if (a_is_default && b_is_default) return;
1728
16
  fail_if(a_is_default != b_is_default,
1729
16
          "Comparing default-constructed hashtable iterator with a "
1730
16
          "non-default-constructed hashtable iterator.");
1731
1732
16
  if (SwisstableGenerationsEnabled()) {
1733
0
    if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
1734
    // Users don't need to know whether the tables are SOO so don't mention SOO
1735
    // in the debug message.
1736
0
    const bool a_is_soo = IsSooControl(ctrl_a);
1737
0
    const bool b_is_soo = IsSooControl(ctrl_b);
1738
0
    fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
1739
0
            "Comparing iterators from different hashtables.");
1740
1741
0
    const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1742
0
    const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1743
0
    fail_if(a_is_empty != b_is_empty,
1744
0
            "Comparing an iterator from an empty hashtable with an iterator "
1745
0
            "from a non-empty hashtable.");
1746
0
    fail_if(a_is_empty && b_is_empty,
1747
0
            "Comparing iterators from different empty hashtables.");
1748
1749
0
    const bool a_is_end = ctrl_a == nullptr;
1750
0
    const bool b_is_end = ctrl_b == nullptr;
1751
0
    fail_if(a_is_end || b_is_end,
1752
0
            "Comparing iterator with an end() iterator from a different "
1753
0
            "hashtable.");
1754
0
    fail_if(true, "Comparing non-end() iterators from different hashtables.");
1755
16
  } else {
1756
16
    ABSL_HARDENING_ASSERT(
1757
16
        AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1758
16
        "Invalid iterator comparison. The iterators may be from different "
1759
16
        "containers or the container might have rehashed or moved. Consider "
1760
16
        "running with --config=asan to diagnose issues.");
1761
16
  }
1762
16
}
1763
1764
struct FindInfo {
1765
  size_t offset;
1766
  size_t probe_length;
1767
};
1768
1769
// Whether a table is "small". A small table fits entirely into a probing
1770
// group, i.e., has a capacity < `Group::kWidth`.
1771
//
1772
// In small mode we are able to use the whole capacity. The extra control
1773
// bytes give us at least one "empty" control byte to stop the iteration.
1774
// This is important to make 1 a valid capacity.
1775
//
1776
// In small mode only the first `capacity` control bytes after the sentinel
1777
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
1778
// represent a real slot. This is important to take into account on
1779
// `find_first_non_full()`, where we never try
1780
// `ShouldInsertBackwards()` for small tables.
1781
16
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1782
1783
// Whether a table fits entirely into a probing group.
1784
// Arbitrary order of elements in such tables is correct.
1785
34
inline bool is_single_group(size_t capacity) {
1786
34
  return capacity <= Group::kWidth;
1787
34
}
1788
1789
// Begins a probing operation on `common.control`, using `hash`.
1790
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
1791
40
                                      size_t hash) {
1792
40
  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
1793
40
}
1794
40
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1795
40
  return probe(common.control(), common.capacity(), hash);
1796
40
}
1797
1798
// Probes an array of control bits using a probe sequence derived from `hash`,
1799
// and returns the offset corresponding to the first deleted or empty slot.
1800
//
1801
// Behavior when the entire table is full is undefined.
1802
//
1803
// NOTE: this function must work with tables having both empty and deleted
1804
// slots in the same group. Such tables appear during `erase()`.
1805
template <typename = void>
1806
0
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1807
0
  auto seq = probe(common, hash);
1808
0
  const ctrl_t* ctrl = common.control();
1809
0
  if (IsEmptyOrDeleted(ctrl[seq.offset()]) &&
1810
0
      !ShouldInsertBackwards(common.capacity(), hash, ctrl)) {
1811
0
    return {seq.offset(), /*probe_length=*/0};
1812
0
  }
1813
0
  while (true) {
1814
0
    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
1815
0
    auto mask = g.MaskEmptyOrDeleted();
1816
0
    if (mask) {
1817
0
      return {
1818
0
          seq.offset(GetInsertionOffset(mask, common.capacity(), hash, ctrl)),
1819
0
          seq.index()};
1820
0
    }
1821
0
    seq.next();
1822
0
    assert(seq.index() <= common.capacity() && "full table!");
1823
0
  }
1824
0
}
1825
1826
// Extern template for inline function keep possibility of inlining.
1827
// When compiler decided to not inline, no symbols will be added to the
1828
// corresponding translation unit.
1829
extern template FindInfo find_first_non_full(const CommonFields&, size_t);
1830
1831
// Non-inlined version of find_first_non_full for use in less
1832
// performance critical routines.
1833
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
1834
1835
8
inline void ResetGrowthLeft(CommonFields& common) {
1836
8
  common.growth_info().InitGrowthLeftNoDeleted(
1837
8
      CapacityToGrowth(common.capacity()) - common.size());
1838
8
}
1839
1840
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
1841
// array as marked as empty.
1842
2
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1843
2
  const size_t capacity = common.capacity();
1844
2
  ctrl_t* ctrl = common.control();
1845
2
  std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
1846
2
              capacity + 1 + NumClonedBytes());
1847
2
  ctrl[capacity] = ctrl_t::kSentinel;
1848
2
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
1849
2
}
1850
1851
// Sets sanitizer poisoning for slot corresponding to control byte being set.
1852
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
1853
16
                                size_t slot_size) {
1854
16
  assert(i < c.capacity());
1855
16
  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
1856
16
  if (IsFull(h)) {
1857
16
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1858
16
  } else {
1859
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
1860
0
  }
1861
16
}
1862
1863
// Sets `ctrl[i]` to `h`.
1864
//
1865
// Unlike setting it directly, this function will perform bounds checks and
1866
// mirror the value to the cloned tail if necessary.
1867
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
1868
16
                    size_t slot_size) {
1869
16
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
1870
16
  ctrl_t* ctrl = c.control();
1871
16
  ctrl[i] = h;
1872
16
  ctrl[((i - NumClonedBytes()) & c.capacity()) +
1873
16
       (NumClonedBytes() & c.capacity())] = h;
1874
16
}
1875
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1876
16
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
1877
16
  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
1878
16
}
1879
1880
// Like SetCtrl, but in a single group table, we can save some operations when
1881
// setting the cloned control byte.
1882
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
1883
0
                                      size_t slot_size) {
1884
0
  assert(is_single_group(c.capacity()));
1885
0
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
1886
0
  ctrl_t* ctrl = c.control();
1887
0
  ctrl[i] = h;
1888
0
  ctrl[i + c.capacity() + 1] = h;
1889
0
}
1890
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1891
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
1892
0
                                      size_t slot_size) {
1893
0
  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
1894
0
}
1895
1896
// growth_info (which is a size_t) is stored with the backing array.
1897
0
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1898
0
  return (std::max)(align_of_slot, alignof(GrowthInfo));
1899
0
}
1900
1901
// Returns the address of the ith slot in slots where each slot occupies
1902
// slot_size.
1903
40
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
1904
40
  return static_cast<void*>(static_cast<char*>(slot_array) +
1905
40
                            (slot * slot_size));
1906
40
}
1907
1908
// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
1909
// No insertion to the table allowed during Callback call.
1910
// Erasure is allowed only for the element passed to the callback.
1911
template <class SlotType, class Callback>
1912
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
1913
0
    const CommonFields& c, SlotType* slot, Callback cb) {
1914
0
  const size_t cap = c.capacity();
1915
0
  const ctrl_t* ctrl = c.control();
1916
0
  if (is_small(cap)) {
1917
0
    // Mirrored/cloned control bytes in small table are also located in the
1918
0
    // first group (starting from position 0). We are taking group from position
1919
0
    // `capacity` in order to avoid duplicates.
1920
0
1921
0
    // Small tables capacity fits into portable group, where
1922
0
    // GroupPortableImpl::MaskFull is more efficient for the
1923
0
    // capacity <= GroupPortableImpl::kWidth.
1924
0
    assert(cap <= GroupPortableImpl::kWidth &&
1925
0
           "unexpectedly large small capacity");
1926
0
    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
1927
0
                  "unexpected group width");
1928
0
    // Group starts from kSentinel slot, so indices in the mask will
1929
0
    // be increased by 1.
1930
0
    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
1931
0
    --ctrl;
1932
0
    --slot;
1933
0
    for (uint32_t i : mask) {
1934
0
      cb(ctrl + i, slot + i);
1935
0
    }
1936
0
    return;
1937
0
  }
1938
0
  size_t remaining = c.size();
1939
0
  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
1940
0
  while (remaining != 0) {
1941
0
    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
1942
0
      assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
1943
0
      cb(ctrl + i, slot + i);
1944
0
      --remaining;
1945
0
    }
1946
0
    ctrl += Group::kWidth;
1947
0
    slot += Group::kWidth;
1948
0
    assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
1949
0
           "hash table was modified unexpectedly");
1950
0
  }
1951
0
  // NOTE: erasure of the current element is allowed in callback for
1952
0
  // absl::erase_if specialization. So we use `>=`.
1953
0
  assert(original_size_for_assert >= c.size() &&
1954
0
         "hash table was modified unexpectedly");
1955
0
}
Unexecuted instantiation: void absl::container_internal::IterateOverFullSlots<absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::AssertHashEqConsistent<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}>(absl::container_internal::CommonFields const&, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::AssertHashEqConsistent<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1})
Unexecuted instantiation: void absl::container_internal::IterateOverFullSlots<absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy_slots()::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1}>(absl::container_internal::CommonFields const&, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::destroy_slots()::{lambda(absl::container_internal::ctrl_t const*, absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*)#1})
1956
1957
template <typename CharAlloc>
1958
8
constexpr bool ShouldSampleHashtablezInfo() {
1959
  // Folks with custom allocators often make unwarranted assumptions about the
1960
  // behavior of their classes vis-a-vis trivial destructability and what
1961
  // calls they will or won't make.  Avoid sampling for people with custom
1962
  // allocators to get us out of this mess.  This is not a hard guarantee but
1963
  // a workaround while we plan the exact guarantee we want to provide.
1964
8
  return std::is_same<CharAlloc, std::allocator<char>>::value;
1965
8
}
1966
1967
template <bool kSooEnabled>
1968
HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
1969
                                          size_t sizeof_value,
1970
                                          size_t old_capacity, bool was_soo,
1971
                                          HashtablezInfoHandle forced_infoz,
1972
8
                                          CommonFields& c) {
1973
8
  if (forced_infoz.IsSampled()) return forced_infoz;
1974
  // In SOO, we sample on the first insertion so if this is an empty SOO case
1975
  // (e.g. when reserve is called), then we still need to sample.
1976
8
  if (kSooEnabled && was_soo && c.size() == 0) {
1977
0
    return Sample(sizeof_slot, sizeof_key, sizeof_value, SooCapacity());
1978
0
  }
1979
  // For non-SOO cases, we sample whenever the capacity is increasing from zero
1980
  // to non-zero.
1981
8
  if (!kSooEnabled && old_capacity == 0) {
1982
2
    return Sample(sizeof_slot, sizeof_key, sizeof_value, 0);
1983
2
  }
1984
6
  return c.infoz();
1985
8
}
1986
1987
// Helper class to perform resize of the hash set.
1988
//
1989
// It contains special optimizations for small group resizes.
1990
// See GrowIntoSingleGroupShuffleControlBytes for details.
1991
class HashSetResizeHelper {
1992
 public:
1993
  explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot,
1994
                               HashtablezInfoHandle forced_infoz)
1995
      : old_capacity_(c.capacity()),
1996
        had_infoz_(c.has_infoz()),
1997
        was_soo_(was_soo),
1998
        had_soo_slot_(had_soo_slot),
1999
8
        forced_infoz_(forced_infoz) {}
2000
2001
  // Optimized for small groups version of `find_first_non_full`.
2002
  // Beneficial only right after calling `raw_hash_set::resize`.
2003
  // It is safe to call in case capacity is big or was not changed, but there
2004
  // will be no performance benefit.
2005
  // It has implicit assumption that `resize` will call
2006
  // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
2007
  // Falls back to `find_first_non_full` in case of big groups.
2008
  static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
2009
                                              size_t old_capacity,
2010
8
                                              size_t hash) {
2011
8
    if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
2012
0
      return find_first_non_full(c, hash);
2013
0
    }
2014
    // Find a location for the new element non-deterministically.
2015
    // Note that any position is correct.
2016
    // It will located at `half_old_capacity` or one of the other
2017
    // empty slots with approximately 50% probability each.
2018
8
    size_t offset = probe(c, hash).offset();
2019
2020
    // Note that we intentionally use unsigned int underflow.
2021
8
    if (offset - (old_capacity + 1) >= old_capacity) {
2022
      // Offset fall on kSentinel or into the mostly occupied first half.
2023
5
      offset = old_capacity / 2;
2024
5
    }
2025
8
    assert(IsEmpty(c.control()[offset]));
2026
8
    return FindInfo{offset, 0};
2027
8
  }
2028
2029
8
  HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
2030
0
  void* old_soo_data() { return old_heap_or_soo_.get_soo_data(); }
2031
12
  ctrl_t* old_ctrl() const {
2032
12
    assert(!was_soo_);
2033
12
    return old_heap_or_soo_.control();
2034
12
  }
2035
24
  void* old_slots() const {
2036
24
    assert(!was_soo_);
2037
24
    return old_heap_or_soo_.slot_array().get();
2038
24
  }
2039
14
  size_t old_capacity() const { return old_capacity_; }
2040
2041
  // Returns the index of the SOO slot when growing from SOO to non-SOO in a
2042
  // single group. See also InitControlBytesAfterSoo(). It's important to use
2043
  // index 1 so that when resizing from capacity 1 to 3, we can still have
2044
  // random iteration order between the first two inserted elements.
2045
  // I.e. it allows inserting the second element at either index 0 or 2.
2046
0
  static size_t SooSlotIndex() { return 1; }
2047
2048
  // Allocates a backing array for the hashtable.
2049
  // Reads `capacity` and updates all other fields based on the result of
2050
  // the allocation.
2051
  //
2052
  // It also may do the following actions:
2053
  // 1. initialize control bytes
2054
  // 2. initialize slots
2055
  // 3. deallocate old slots.
2056
  //
2057
  // We are bundling a lot of functionality
2058
  // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
2059
  // duplication in raw_hash_set<>::resize.
2060
  //
2061
  // `c.capacity()` must be nonzero.
2062
  // POSTCONDITIONS:
2063
  //  1. CommonFields is initialized.
2064
  //
2065
  //  if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
2066
  //    Both control bytes and slots are fully initialized.
2067
  //    old_slots are deallocated.
2068
  //    infoz.RecordRehash is called.
2069
  //
2070
  //  if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
2071
  //    Control bytes are fully initialized.
2072
  //    infoz.RecordRehash is called.
2073
  //    GrowSizeIntoSingleGroup must be called to finish slots initialization.
2074
  //
2075
  //  if !IsGrowingIntoSingleGroupApplicable
2076
  //    Control bytes are initialized to empty table via ResetCtrl.
2077
  //    raw_hash_set<>::resize must insert elements regularly.
2078
  //    infoz.RecordRehash is called if old_capacity == 0.
2079
  //
2080
  //  Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
2081
  template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
2082
            bool SooEnabled, size_t AlignOfSlot>
2083
  ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
2084
                                               ctrl_t soo_slot_h2,
2085
                                               size_t key_size,
2086
8
                                               size_t value_size) {
2087
8
    assert(c.capacity());
2088
8
    HashtablezInfoHandle infoz =
2089
8
        ShouldSampleHashtablezInfo<Alloc>()
2090
8
            ? SampleHashtablezInfo<SooEnabled>(SizeOfSlot, key_size, value_size,
2091
8
                                               old_capacity_, was_soo_,
2092
8
                                               forced_infoz_, c)
2093
8
            : HashtablezInfoHandle{};
2094
2095
8
    const bool has_infoz = infoz.IsSampled();
2096
8
    RawHashSetLayout layout(c.capacity(), AlignOfSlot, has_infoz);
2097
8
    char* mem = static_cast<char*>(Allocate<BackingArrayAlignment(AlignOfSlot)>(
2098
8
        &alloc, layout.alloc_size(SizeOfSlot)));
2099
8
    const GenerationType old_generation = c.generation();
2100
8
    c.set_generation_ptr(
2101
8
        reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
2102
8
    c.set_generation(NextGeneration(old_generation));
2103
8
    c.set_control(reinterpret_cast<ctrl_t*>(mem + layout.control_offset()));
2104
8
    c.set_slots(mem + layout.slot_offset());
2105
8
    ResetGrowthLeft(c);
2106
2107
8
    const bool grow_single_group =
2108
8
        IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
2109
8
    if (SooEnabled && was_soo_ && grow_single_group) {
2110
0
      InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
2111
0
      if (TransferUsesMemcpy && had_soo_slot_) {
2112
0
        TransferSlotAfterSoo(c, SizeOfSlot);
2113
0
      }
2114
      // SooEnabled implies that old_capacity_ != 0.
2115
8
    } else if ((SooEnabled || old_capacity_ != 0) && grow_single_group) {
2116
6
      if (TransferUsesMemcpy) {
2117
6
        GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
2118
6
        DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
2119
6
      } else {
2120
0
        GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
2121
0
      }
2122
6
    } else {
2123
2
      ResetCtrl(c, SizeOfSlot);
2124
2
    }
2125
2126
8
    c.set_has_infoz(has_infoz);
2127
8
    if (has_infoz) {
2128
0
      infoz.RecordStorageChanged(c.size(), layout.capacity());
2129
0
      if ((SooEnabled && was_soo_) || grow_single_group || old_capacity_ == 0) {
2130
0
        infoz.RecordRehash(0);
2131
0
      }
2132
0
      c.set_infoz(infoz);
2133
0
    }
2134
8
    return grow_single_group;
2135
8
  }
2136
2137
  // Relocates slots into new single group consistent with
2138
  // GrowIntoSingleGroupShuffleControlBytes.
2139
  //
2140
  // PRECONDITIONS:
2141
  // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
2142
  template <class PolicyTraits, class Alloc>
2143
0
  void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
2144
0
    assert(old_capacity_ < Group::kWidth / 2);
2145
0
    assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
2146
0
    using slot_type = typename PolicyTraits::slot_type;
2147
0
    assert(is_single_group(c.capacity()));
2148
0
2149
0
    auto* new_slots = static_cast<slot_type*>(c.slot_array());
2150
0
    auto* old_slots_ptr = static_cast<slot_type*>(old_slots());
2151
0
2152
0
    size_t shuffle_bit = old_capacity_ / 2 + 1;
2153
0
    for (size_t i = 0; i < old_capacity_; ++i) {
2154
0
      if (IsFull(old_ctrl()[i])) {
2155
0
        size_t new_i = i ^ shuffle_bit;
2156
0
        SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
2157
0
        PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
2158
0
                               old_slots_ptr + i);
2159
0
      }
2160
0
    }
2161
0
    PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
2162
0
  }
2163
2164
  // Deallocates old backing array.
2165
  template <size_t AlignOfSlot, class CharAlloc>
2166
6
  void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
2167
6
    SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
2168
6
    auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
2169
6
    Deallocate<BackingArrayAlignment(AlignOfSlot)>(
2170
6
        &alloc_ref, old_ctrl() - layout.control_offset(),
2171
6
        layout.alloc_size(slot_size));
2172
6
  }
2173
2174
 private:
2175
  // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
2176
  static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
2177
22
                                                 size_t new_capacity) {
2178
    // NOTE that `old_capacity < new_capacity` in order to have
2179
    // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
2180
22
    return is_single_group(new_capacity) && old_capacity < new_capacity;
2181
22
  }
2182
2183
  // Relocates control bytes and slots into new single group for
2184
  // transferable objects.
2185
  // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
2186
  void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
2187
2188
  // If there was an SOO slot and slots are transferable, transfers the SOO slot
2189
  // into the new heap allocation. Must be called only if
2190
  // IsGrowingIntoSingleGroupApplicable returned true.
2191
  void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
2192
2193
  // Shuffle control bits deterministically to the next capacity.
2194
  // Returns offset for newly added element with given hash.
2195
  //
2196
  // PRECONDITIONs:
2197
  // 1. new_ctrl is allocated for new_capacity,
2198
  //    but not initialized.
2199
  // 2. new_capacity is a single group.
2200
  //
2201
  // All elements are transferred into the first `old_capacity + 1` positions
2202
  // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
2203
  // in order to change an order and keep it non deterministic.
2204
  // Although rotation itself deterministic, position of the new added element
2205
  // will be based on `H1` and is not deterministic.
2206
  //
2207
  // Examples:
2208
  // S = kSentinel, E = kEmpty
2209
  //
2210
  // old_ctrl = SEEEEEEEE...
2211
  // new_ctrl = ESEEEEEEE...
2212
  //
2213
  // old_ctrl = 0SEEEEEEE...
2214
  // new_ctrl = E0ESE0EEE...
2215
  //
2216
  // old_ctrl = 012S012EEEEEEEEE...
2217
  // new_ctrl = 2E01EEES2E01EEE...
2218
  //
2219
  // old_ctrl = 0123456S0123456EEEEEEEEEEE...
2220
  // new_ctrl = 456E0123EEEEEES456E0123EEE...
2221
  void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
2222
                                              size_t new_capacity) const;
2223
2224
  // If the table was SOO, initializes new control bytes. `h2` is the control
2225
  // byte corresponding to the full slot. Must be called only if
2226
  // IsGrowingIntoSingleGroupApplicable returned true.
2227
  // Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
2228
  void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
2229
                                size_t new_capacity);
2230
2231
  // Shuffle trivially transferable slots in the way consistent with
2232
  // GrowIntoSingleGroupShuffleControlBytes.
2233
  //
2234
  // PRECONDITIONs:
2235
  // 1. old_capacity must be non-zero.
2236
  // 2. new_ctrl is fully initialized using
2237
  //    GrowIntoSingleGroupShuffleControlBytes.
2238
  // 3. new_slots is allocated and *not* poisoned.
2239
  //
2240
  // POSTCONDITIONS:
2241
  // 1. new_slots are transferred from old_slots_ consistent with
2242
  //    GrowIntoSingleGroupShuffleControlBytes.
2243
  // 2. Empty new_slots are *not* poisoned.
2244
  void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
2245
                                                   size_t slot_size) const;
2246
2247
  // Poison empty slots that were transferred using the deterministic algorithm
2248
  // described above.
2249
  // PRECONDITIONs:
2250
  // 1. new_ctrl is fully initialized using
2251
  //    GrowIntoSingleGroupShuffleControlBytes.
2252
  // 2. new_slots is fully initialized consistent with
2253
  //    GrowIntoSingleGroupShuffleControlBytes.
2254
6
  void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
2255
    // poison non full items
2256
56
    for (size_t i = 0; i < c.capacity(); ++i) {
2257
50
      if (!IsFull(c.control()[i])) {
2258
28
        SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
2259
28
                                    slot_size);
2260
28
      }
2261
50
    }
2262
6
  }
2263
2264
  HeapOrSoo old_heap_or_soo_;
2265
  size_t old_capacity_;
2266
  bool had_infoz_;
2267
  bool was_soo_;
2268
  bool had_soo_slot_;
2269
  // Either null infoz or a pre-sampled forced infoz for SOO tables.
2270
  HashtablezInfoHandle forced_infoz_;
2271
};
2272
2273
16
inline void PrepareInsertCommon(CommonFields& common) {
2274
16
  common.increment_size();
2275
16
  common.maybe_increment_generation_on_insert();
2276
16
}
2277
2278
// Like prepare_insert, but for the case of inserting into a full SOO table.
2279
size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
2280
                             CommonFields& common);
2281
2282
// PolicyFunctions bundles together some information for a particular
2283
// raw_hash_set<T, ...> instantiation. This information is passed to
2284
// type-erased functions that want to do small amounts of type-specific
2285
// work.
2286
struct PolicyFunctions {
2287
  size_t slot_size;
2288
2289
  // Returns the pointer to the hash function stored in the set.
2290
  const void* (*hash_fn)(const CommonFields& common);
2291
2292
  // Returns the hash of the pointed-to slot.
2293
  size_t (*hash_slot)(const void* hash_fn, void* slot);
2294
2295
  // Transfers the contents of src_slot to dst_slot.
2296
  void (*transfer)(void* set, void* dst_slot, void* src_slot);
2297
2298
  // Deallocates the backing store from common.
2299
  void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
2300
2301
  // Resizes set to the new capacity.
2302
  // Arguments are used as in raw_hash_set::resize_impl.
2303
  void (*resize)(CommonFields& common, size_t new_capacity,
2304
                 HashtablezInfoHandle forced_infoz);
2305
};
2306
2307
// ClearBackingArray clears the backing array, either modifying it in place,
2308
// or creating a new one based on the value of "reuse".
2309
// REQUIRES: c.capacity > 0
2310
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
2311
                       bool reuse, bool soo_enabled);
2312
2313
// Type-erased version of raw_hash_set::erase_meta_only.
2314
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
2315
2316
// Function to place in PolicyFunctions::dealloc for raw_hash_sets
2317
// that are using std::allocator. This allows us to share the same
2318
// function body for raw_hash_set instantiations that have the
2319
// same slot alignment.
2320
template <size_t AlignOfSlot>
2321
ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
2322
0
                                                const PolicyFunctions& policy) {
2323
  // Unpoison before returning the memory to the allocator.
2324
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(),
2325
0
                                policy.slot_size * common.capacity());
2326
2327
0
  std::allocator<char> alloc;
2328
0
  common.infoz().Unregister();
2329
0
  Deallocate<BackingArrayAlignment(AlignOfSlot)>(
2330
0
      &alloc, common.backing_array_start(),
2331
0
      common.alloc_size(policy.slot_size, AlignOfSlot));
2332
0
}
2333
2334
// For trivially relocatable types we use memcpy directly. This allows us to
2335
// share the same function body for raw_hash_set instantiations that have the
2336
// same slot size as long as they are relocatable.
2337
template <size_t SizeOfSlot>
2338
0
ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
2339
0
  memcpy(dst, src, SizeOfSlot);
2340
0
}
2341
2342
// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
2343
const void* GetHashRefForEmptyHasher(const CommonFields& common);
2344
2345
// Given the hash of a value not currently in the table and the first empty
2346
// slot in the probe sequence, finds a viable slot index to insert it at.
2347
//
2348
// In case there's no space left, the table can be resized or rehashed
2349
// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
2350
//
2351
// In the case of absence of deleted slots and positive growth_left, the element
2352
// can be inserted in the provided `target` position.
2353
//
2354
// When the table has deleted slots (according to GrowthInfo), the target
2355
// position will be searched one more time using `find_first_non_full`.
2356
//
2357
// REQUIRES: Table is not SOO.
2358
// REQUIRES: At least one non-full slot available.
2359
// REQUIRES: `target` is a valid empty position to insert.
2360
size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
2361
                           const PolicyFunctions& policy);
2362
2363
// A SwissTable.
2364
//
2365
// Policy: a policy defines how to perform different operations on
2366
// the slots of the hashtable (see hash_policy_traits.h for the full interface
2367
// of policy).
2368
//
2369
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
2370
// functor should accept a key and return size_t as hash. For best performance
2371
// it is important that the hash function provides high entropy across all bits
2372
// of the hash.
2373
//
2374
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
2375
// should accept two (of possibly different type) keys and return a bool: true
2376
// if they are equal, false if they are not. If two keys compare equal, then
2377
// their hash values as defined by Hash MUST be equal.
2378
//
2379
// Allocator: an Allocator
2380
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
2381
// the storage of the hashtable will be allocated and the elements will be
2382
// constructed and destroyed.
2383
template <class Policy, class Hash, class Eq, class Alloc>
2384
class raw_hash_set {
2385
  using PolicyTraits = hash_policy_traits<Policy>;
2386
  using KeyArgImpl =
2387
      KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
2388
2389
 public:
2390
  using init_type = typename PolicyTraits::init_type;
2391
  using key_type = typename PolicyTraits::key_type;
2392
  // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
2393
  // code fixes!
2394
  using slot_type = typename PolicyTraits::slot_type;
2395
  using allocator_type = Alloc;
2396
  using size_type = size_t;
2397
  using difference_type = ptrdiff_t;
2398
  using hasher = Hash;
2399
  using key_equal = Eq;
2400
  using policy_type = Policy;
2401
  using value_type = typename PolicyTraits::value_type;
2402
  using reference = value_type&;
2403
  using const_reference = const value_type&;
2404
  using pointer = typename absl::allocator_traits<
2405
      allocator_type>::template rebind_traits<value_type>::pointer;
2406
  using const_pointer = typename absl::allocator_traits<
2407
      allocator_type>::template rebind_traits<value_type>::const_pointer;
2408
2409
  // Alias used for heterogeneous lookup functions.
2410
  // `key_arg<K>` evaluates to `K` when the functors are transparent and to
2411
  // `key_type` otherwise. It permits template argument deduction on `K` for the
2412
  // transparent case.
2413
  template <class K>
2414
  using key_arg = typename KeyArgImpl::template type<K, key_type>;
2415
2416
 private:
2417
  // TODO(b/289225379): we could add extra SOO space inside raw_hash_set
2418
  // after CommonFields to allow inlining larger slot_types (e.g. std::string),
2419
  // but it's a bit complicated if we want to support incomplete mapped_type in
2420
  // flat_hash_map. We could potentially do this for flat_hash_set and for an
2421
  // allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
2422
  // types, strings, cords, and pairs/tuples of allowlisted types.
2423
0
  constexpr static bool SooEnabled() {
2424
0
    return PolicyTraits::soo_enabled() &&
2425
0
           sizeof(slot_type) <= sizeof(HeapOrSoo) &&
2426
0
           alignof(slot_type) <= alignof(HeapOrSoo);
2427
0
  }
2428
2429
0
  constexpr static size_t DefaultCapacity() {
2430
0
    return SooEnabled() ? SooCapacity() : 0;
2431
0
  }
2432
2433
  // Whether `size` fits in the SOO capacity of this table.
2434
281
  bool fits_in_soo(size_t size) const {
2435
281
    return SooEnabled() && size <= SooCapacity();
2436
281
  }
2437
  // Whether this table is in SOO mode or non-SOO mode.
2438
273
  bool is_soo() const { return fits_in_soo(capacity()); }
2439
0
  bool is_full_soo() const { return is_soo() && !empty(); }
2440
2441
  // Give an early error when key_type is not hashable/eq.
2442
  auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
2443
  auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
2444
2445
  using AllocTraits = absl::allocator_traits<allocator_type>;
2446
  using SlotAlloc = typename absl::allocator_traits<
2447
      allocator_type>::template rebind_alloc<slot_type>;
2448
  // People are often sloppy with the exact type of their allocator (sometimes
2449
  // it has an extra const or is missing the pair, but rebinds made it work
2450
  // anyway).
2451
  using CharAlloc =
2452
      typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
2453
  using SlotAllocTraits = typename absl::allocator_traits<
2454
      allocator_type>::template rebind_traits<slot_type>;
2455
2456
  static_assert(std::is_lvalue_reference<reference>::value,
2457
                "Policy::element() must return a reference");
2458
2459
  template <typename T>
2460
  struct SameAsElementReference
2461
      : std::is_same<typename std::remove_cv<
2462
                         typename std::remove_reference<reference>::type>::type,
2463
                     typename std::remove_cv<
2464
                         typename std::remove_reference<T>::type>::type> {};
2465
2466
  // An enabler for insert(T&&): T must be convertible to init_type or be the
2467
  // same as [cv] value_type [ref].
2468
  // Note: we separate SameAsElementReference into its own type to avoid using
2469
  // reference unless we need to. MSVC doesn't seem to like it in some
2470
  // cases.
2471
  template <class T>
2472
  using RequiresInsertable = typename std::enable_if<
2473
      absl::disjunction<std::is_convertible<T, init_type>,
2474
                        SameAsElementReference<T>>::value,
2475
      int>::type;
2476
2477
  // RequiresNotInit is a workaround for gcc prior to 7.1.
2478
  // See https://godbolt.org/g/Y4xsUh.
2479
  template <class T>
2480
  using RequiresNotInit =
2481
      typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
2482
2483
  template <class... Ts>
2484
  using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
2485
2486
 public:
2487
  static_assert(std::is_same<pointer, value_type*>::value,
2488
                "Allocators with custom pointer types are not supported");
2489
  static_assert(std::is_same<const_pointer, const value_type*>::value,
2490
                "Allocators with custom pointer types are not supported");
2491
2492
  class iterator : private HashSetIteratorGenerationInfo {
2493
    friend class raw_hash_set;
2494
    friend struct HashtableFreeFunctionsAccess;
2495
2496
   public:
2497
    using iterator_category = std::forward_iterator_tag;
2498
    using value_type = typename raw_hash_set::value_type;
2499
    using reference =
2500
        absl::conditional_t<PolicyTraits::constant_iterators::value,
2501
                            const value_type&, value_type&>;
2502
    using pointer = absl::remove_reference_t<reference>*;
2503
    using difference_type = typename raw_hash_set::difference_type;
2504
2505
    iterator() {}
2506
2507
    // PRECONDITION: not an end() iterator.
2508
16
    reference operator*() const {
2509
16
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
2510
16
      return unchecked_deref();
2511
16
    }
2512
2513
    // PRECONDITION: not an end() iterator.
2514
0
    pointer operator->() const {
2515
0
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
2516
0
      return &operator*();
2517
0
    }
2518
2519
    // PRECONDITION: not an end() iterator.
2520
0
    iterator& operator++() {
2521
0
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
2522
0
      ++ctrl_;
2523
0
      ++slot_;
2524
0
      skip_empty_or_deleted();
2525
0
      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
2526
0
      return *this;
2527
0
    }
2528
    // PRECONDITION: not an end() iterator.
2529
    iterator operator++(int) {
2530
      auto tmp = *this;
2531
      ++*this;
2532
      return tmp;
2533
    }
2534
2535
16
    friend bool operator==(const iterator& a, const iterator& b) {
2536
16
      AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
2537
16
      AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
2538
16
      AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
2539
16
                          a.generation_ptr(), b.generation_ptr());
2540
16
      return a.ctrl_ == b.ctrl_;
2541
16
    }
2542
0
    friend bool operator!=(const iterator& a, const iterator& b) {
2543
0
      return !(a == b);
2544
0
    }
2545
2546
   private:
2547
    iterator(ctrl_t* ctrl, slot_type* slot,
2548
             const GenerationType* generation_ptr)
2549
        : HashSetIteratorGenerationInfo(generation_ptr),
2550
          ctrl_(ctrl),
2551
32
          slot_(slot) {
2552
      // This assumption helps the compiler know that any non-end iterator is
2553
      // not equal to any end iterator.
2554
32
      ABSL_ASSUME(ctrl != nullptr);
2555
32
    }
2556
    // This constructor is used in begin() to avoid an MSan
2557
    // use-of-uninitialized-value error. Delegating from this constructor to
2558
    // the previous one doesn't avoid the error.
2559
    iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
2560
             const GenerationType* generation_ptr)
2561
        : HashSetIteratorGenerationInfo(generation_ptr),
2562
          ctrl_(ctrl),
2563
0
          slot_(to_slot(slot.get())) {
2564
      // This assumption helps the compiler know that any non-end iterator is
2565
      // not equal to any end iterator.
2566
0
      ABSL_ASSUME(ctrl != nullptr);
2567
0
    }
2568
    // For end() iterators.
2569
    explicit iterator(const GenerationType* generation_ptr)
2570
0
        : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
2571
2572
    // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and
2573
    // `slot_` until they reach one.
2574
0
    void skip_empty_or_deleted() {
2575
0
      while (IsEmptyOrDeleted(*ctrl_)) {
2576
0
        uint32_t shift =
2577
0
            GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
2578
0
        ctrl_ += shift;
2579
0
        slot_ += shift;
2580
0
      }
2581
0
    }
2582
2583
0
    ctrl_t* control() const { return ctrl_; }
2584
16
    slot_type* slot() const { return slot_; }
2585
2586
    // We use EmptyGroup() for default-constructed iterators so that they can
2587
    // be distinguished from end iterators, which have nullptr ctrl_.
2588
    ctrl_t* ctrl_ = EmptyGroup();
2589
    // To avoid uninitialized member warnings, put slot_ in an anonymous union.
2590
    // The member is not initialized on singleton and end iterators.
2591
    union {
2592
      slot_type* slot_;
2593
    };
2594
2595
    // An equality check which skips ABSL Hardening iterator invalidation
2596
    // checks.
2597
    // Should be used when the lifetimes of the iterators are well-enough
2598
    // understood to prove that they cannot be invalid.
2599
    bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
2600
2601
    // Dereferences the iterator without ABSL Hardening iterator invalidation
2602
    // checks.
2603
16
    reference unchecked_deref() const { return PolicyTraits::element(slot_); }
2604
  };
2605
2606
  class const_iterator {
2607
    friend class raw_hash_set;
2608
    template <class Container, typename Enabler>
2609
    friend struct absl::container_internal::hashtable_debug_internal::
2610
        HashtableDebugAccess;
2611
2612
   public:
2613
    using iterator_category = typename iterator::iterator_category;
2614
    using value_type = typename raw_hash_set::value_type;
2615
    using reference = typename raw_hash_set::const_reference;
2616
    using pointer = typename raw_hash_set::const_pointer;
2617
    using difference_type = typename raw_hash_set::difference_type;
2618
2619
    const_iterator() = default;
2620
    // Implicit construction from iterator.
2621
32
    const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
2622
2623
    reference operator*() const { return *inner_; }
2624
    pointer operator->() const { return inner_.operator->(); }
2625
2626
    const_iterator& operator++() {
2627
      ++inner_;
2628
      return *this;
2629
    }
2630
    const_iterator operator++(int) { return inner_++; }
2631
2632
16
    friend bool operator==(const const_iterator& a, const const_iterator& b) {
2633
16
      return a.inner_ == b.inner_;
2634
16
    }
2635
    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
2636
      return !(a == b);
2637
    }
2638
2639
   private:
2640
    const_iterator(const ctrl_t* ctrl, const slot_type* slot,
2641
                   const GenerationType* gen)
2642
        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
2643
    }
2644
    ctrl_t* control() const { return inner_.control(); }
2645
    slot_type* slot() const { return inner_.slot(); }
2646
2647
    iterator inner_;
2648
2649
    bool unchecked_equals(const const_iterator& b) {
2650
      return inner_.unchecked_equals(b.inner_);
2651
    }
2652
  };
2653
2654
  using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
2655
  using insert_return_type = InsertReturnType<iterator, node_type>;
2656
2657
  // Note: can't use `= default` due to non-default noexcept (causes
2658
  // problems for some compilers). NOLINTNEXTLINE
2659
  raw_hash_set() noexcept(
2660
      std::is_nothrow_default_constructible<hasher>::value &&
2661
      std::is_nothrow_default_constructible<key_equal>::value &&
2662
2
      std::is_nothrow_default_constructible<allocator_type>::value) {}
2663
2664
  ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
2665
      size_t bucket_count, const hasher& hash = hasher(),
2666
      const key_equal& eq = key_equal(),
2667
      const allocator_type& alloc = allocator_type())
2668
      : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
2669
                  alloc) {
2670
    if (bucket_count > DefaultCapacity()) {
2671
      resize(NormalizeCapacity(bucket_count));
2672
    }
2673
  }
2674
2675
  raw_hash_set(size_t bucket_count, const hasher& hash,
2676
               const allocator_type& alloc)
2677
      : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
2678
2679
  raw_hash_set(size_t bucket_count, const allocator_type& alloc)
2680
      : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
2681
2682
  explicit raw_hash_set(const allocator_type& alloc)
2683
      : raw_hash_set(0, hasher(), key_equal(), alloc) {}
2684
2685
  template <class InputIter>
2686
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
2687
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2688
               const allocator_type& alloc = allocator_type())
2689
      : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
2690
                     hash, eq, alloc) {
2691
    insert(first, last);
2692
  }
2693
2694
  template <class InputIter>
2695
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2696
               const hasher& hash, const allocator_type& alloc)
2697
      : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
2698
2699
  template <class InputIter>
2700
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2701
               const allocator_type& alloc)
2702
      : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
2703
2704
  template <class InputIter>
2705
  raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
2706
      : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
2707
2708
  // Instead of accepting std::initializer_list<value_type> as the first
2709
  // argument like std::unordered_set<value_type> does, we have two overloads
2710
  // that accept std::initializer_list<T> and std::initializer_list<init_type>.
2711
  // This is advantageous for performance.
2712
  //
2713
  //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
2714
  //   // copies the strings into the set.
2715
  //   std::unordered_set<std::string> s = {"abc", "def"};
2716
  //
2717
  //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
2718
  //   // copies the strings into the set.
2719
  //   absl::flat_hash_set<std::string> s = {"abc", "def"};
2720
  //
2721
  // The same trick is used in insert().
2722
  //
2723
  // The enabler is necessary to prevent this constructor from triggering where
2724
  // the copy constructor is meant to be called.
2725
  //
2726
  //   absl::flat_hash_set<int> a, b{a};
2727
  //
2728
  // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
2729
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2730
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
2731
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2732
               const allocator_type& alloc = allocator_type())
2733
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2734
2735
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
2736
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2737
               const allocator_type& alloc = allocator_type())
2738
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2739
2740
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2741
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2742
               const hasher& hash, const allocator_type& alloc)
2743
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2744
2745
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2746
               const hasher& hash, const allocator_type& alloc)
2747
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2748
2749
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2750
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2751
               const allocator_type& alloc)
2752
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2753
2754
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2755
               const allocator_type& alloc)
2756
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2757
2758
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2759
  raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
2760
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2761
2762
  raw_hash_set(std::initializer_list<init_type> init,
2763
               const allocator_type& alloc)
2764
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2765
2766
  raw_hash_set(const raw_hash_set& that)
2767
      : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
2768
                               that.alloc_ref())) {}
2769
2770
  raw_hash_set(const raw_hash_set& that, const allocator_type& a)
2771
      : raw_hash_set(GrowthToLowerboundCapacity(that.size()), that.hash_ref(),
2772
                     that.eq_ref(), a) {
2773
    that.AssertNotDebugCapacity();
2774
    const size_t size = that.size();
2775
    if (size == 0) {
2776
      return;
2777
    }
2778
    // We don't use `that.is_soo()` here because `that` can have non-SOO
2779
    // capacity but have a size that fits into SOO capacity.
2780
    if (fits_in_soo(size)) {
2781
      assert(size == 1);
2782
      common().set_full_soo();
2783
      emplace_at(soo_iterator(), *that.begin());
2784
      const HashtablezInfoHandle infoz = try_sample_soo();
2785
      if (infoz.IsSampled()) resize_with_soo_infoz(infoz);
2786
      return;
2787
    }
2788
    assert(!that.is_soo());
2789
    const size_t cap = capacity();
2790
    // Note about single group tables:
2791
    // 1. It is correct to have any order of elements.
2792
    // 2. Order has to be non deterministic.
2793
    // 3. We are assigning elements with arbitrary `shift` starting from
2794
    //    `capacity + shift` position.
2795
    // 4. `shift` must be coprime with `capacity + 1` in order to be able to use
2796
    //     modular arithmetic to traverse all positions, instead if cycling
2797
    //     through a subset of positions. Odd numbers are coprime with any
2798
    //     `capacity + 1` (2^N).
2799
    size_t offset = cap;
2800
    const size_t shift =
2801
        is_single_group(cap) ? (PerTableSalt(control()) | 1) : 0;
2802
    IterateOverFullSlots(
2803
        that.common(), that.slot_array(),
2804
        [&](const ctrl_t* that_ctrl,
2805
            slot_type* that_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
2806
          if (shift == 0) {
2807
            // Big tables case. Position must be searched via probing.
2808
            // The table is guaranteed to be empty, so we can do faster than
2809
            // a full `insert`.
2810
            const size_t hash = PolicyTraits::apply(
2811
                HashElement{hash_ref()}, PolicyTraits::element(that_slot));
2812
            FindInfo target = find_first_non_full_outofline(common(), hash);
2813
            infoz().RecordInsert(hash, target.probe_length);
2814
            offset = target.offset;
2815
          } else {
2816
            // Small tables case. Next position is computed via shift.
2817
            offset = (offset + shift) & cap;
2818
          }
2819
          const h2_t h2 = static_cast<h2_t>(*that_ctrl);
2820
          assert(  // We rely that hash is not changed for small tables.
2821
              H2(PolicyTraits::apply(HashElement{hash_ref()},
2822
                                     PolicyTraits::element(that_slot))) == h2 &&
2823
              "hash function value changed unexpectedly during the copy");
2824
          SetCtrl(common(), offset, h2, sizeof(slot_type));
2825
          emplace_at(iterator_at(offset), PolicyTraits::element(that_slot));
2826
          common().maybe_increment_generation_on_insert();
2827
        });
2828
    if (shift != 0) {
2829
      // On small table copy we do not record individual inserts.
2830
      // RecordInsert requires hash, but it is unknown for small tables.
2831
      infoz().RecordStorageChanged(size, cap);
2832
    }
2833
    common().set_size(size);
2834
    growth_info().OverwriteManyEmptyAsFull(size);
2835
  }
2836
2837
  ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
2838
      std::is_nothrow_copy_constructible<hasher>::value &&
2839
      std::is_nothrow_copy_constructible<key_equal>::value &&
2840
      std::is_nothrow_copy_constructible<allocator_type>::value)
2841
      :  // Hash, equality and allocator are copied instead of moved because
2842
         // `that` must be left valid. If Hash is std::function<Key>, moving it
2843
         // would create a nullptr functor that cannot be called.
2844
         // Note: we avoid using exchange for better generated code.
2845
        settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
2846
                      ? std::move(that.common())
2847
                      : CommonFields{full_soo_tag_t{}},
2848
                  that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
2849
    if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
2850
      transfer(soo_slot(), that.soo_slot());
2851
    }
2852
    that.common() = CommonFields::CreateMovedFrom<SooEnabled()>();
2853
    annotate_for_bug_detection_on_move(that);
2854
  }
2855
2856
  raw_hash_set(raw_hash_set&& that, const allocator_type& a)
2857
      : settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
2858
                  that.eq_ref(), a) {
2859
    if (a == that.alloc_ref()) {
2860
      swap_common(that);
2861
      annotate_for_bug_detection_on_move(that);
2862
    } else {
2863
      move_elements_allocs_unequal(std::move(that));
2864
    }
2865
  }
2866
2867
  raw_hash_set& operator=(const raw_hash_set& that) {
2868
    that.AssertNotDebugCapacity();
2869
    if (ABSL_PREDICT_FALSE(this == &that)) return *this;
2870
    constexpr bool propagate_alloc =
2871
        AllocTraits::propagate_on_container_copy_assignment::value;
2872
    // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
2873
    // is an exact match for that.size(). If this->capacity() is too big, then
2874
    // it would make iteration very slow to reuse the allocation. Maybe we can
2875
    // do the same heuristic as clear() and reuse if it's small enough.
2876
    raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
2877
    // NOLINTNEXTLINE: not returning *this for performance.
2878
    return assign_impl<propagate_alloc>(std::move(tmp));
2879
  }
2880
2881
  raw_hash_set& operator=(raw_hash_set&& that) noexcept(
2882
      absl::allocator_traits<allocator_type>::is_always_equal::value &&
2883
      std::is_nothrow_move_assignable<hasher>::value &&
2884
      std::is_nothrow_move_assignable<key_equal>::value) {
2885
    // TODO(sbenza): We should only use the operations from the noexcept clause
2886
    // to make sure we actually adhere to that contract.
2887
    // NOLINTNEXTLINE: not returning *this for performance.
2888
    return move_assign(
2889
        std::move(that),
2890
        typename AllocTraits::propagate_on_container_move_assignment());
2891
  }
2892
2893
0
  ~raw_hash_set() {
2894
0
    destructor_impl();
2895
0
#ifndef NDEBUG
2896
0
    common().set_capacity(InvalidCapacity::kDestroyed);
2897
0
#endif
2898
0
  }
2899
2900
0
  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2901
0
    if (ABSL_PREDICT_FALSE(empty())) return end();
2902
0
    if (is_soo()) return soo_iterator();
2903
0
    iterator it = {control(), common().slots_union(),
2904
0
                   common().generation_ptr()};
2905
0
    it.skip_empty_or_deleted();
2906
0
    assert(IsFull(*it.control()));
2907
0
    return it;
2908
0
  }
2909
0
  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2910
0
    AssertNotDebugCapacity();
2911
0
    return iterator(common().generation_ptr());
2912
0
  }
2913
2914
  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2915
    return const_cast<raw_hash_set*>(this)->begin();
2916
  }
2917
  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2918
    return const_cast<raw_hash_set*>(this)->end();
2919
  }
2920
  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2921
    return begin();
2922
  }
2923
  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
2924
2925
0
  bool empty() const { return !size(); }
2926
0
  size_t size() const {
2927
0
    AssertNotDebugCapacity();
2928
0
    return common().size();
2929
0
  }
2930
321
  size_t capacity() const {
2931
321
    const size_t cap = common().capacity();
2932
    // Compiler complains when using functions in ASSUME so use local variable.
2933
321
    ABSL_ATTRIBUTE_UNUSED static constexpr size_t kDefaultCapacity =
2934
321
        DefaultCapacity();
2935
321
    ABSL_ASSUME(cap >= kDefaultCapacity);
2936
321
    return cap;
2937
321
  }
2938
  size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
2939
2940
0
  ABSL_ATTRIBUTE_REINITIALIZES void clear() {
2941
0
    if (SwisstableGenerationsEnabled() &&
2942
0
        capacity() >= InvalidCapacity::kMovedFrom) {
2943
0
      common().set_capacity(DefaultCapacity());
2944
0
    }
2945
0
    AssertNotDebugCapacity();
2946
    // Iterating over this container is O(bucket_count()). When bucket_count()
2947
    // is much greater than size(), iteration becomes prohibitively expensive.
2948
    // For clear() it is more important to reuse the allocated array when the
2949
    // container is small because allocation takes comparatively long time
2950
    // compared to destruction of the elements of the container. So we pick the
2951
    // largest bucket_count() threshold for which iteration is still fast and
2952
    // past that we simply deallocate the array.
2953
0
    const size_t cap = capacity();
2954
0
    if (cap == 0) {
2955
0
      common().reinitialize_moved_from_non_soo();
2956
0
    } else if (is_soo()) {
2957
0
      if (!empty()) destroy(soo_slot());
2958
0
      common().set_empty_soo();
2959
0
    } else {
2960
0
      destroy_slots();
2961
0
      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
2962
0
                        SooEnabled());
2963
0
    }
2964
0
    common().set_reserved_growth(0);
2965
0
    common().set_reservation_size(0);
2966
0
  }
2967
2968
  // This overload kicks in when the argument is an rvalue of insertable and
2969
  // decomposable type other than init_type.
2970
  //
2971
  //   flat_hash_map<std::string, int> m;
2972
  //   m.insert(std::make_pair("abc", 42));
2973
  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
2974
  // bug.
2975
  template <class T, RequiresInsertable<T> = 0, class T2 = T,
2976
            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
2977
            T* = nullptr>
2978
16
  std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2979
16
    return emplace(std::forward<T>(value));
2980
16
  }
2981
2982
  // This overload kicks in when the argument is a bitfield or an lvalue of
2983
  // insertable and decomposable type.
2984
  //
2985
  //   union { int n : 1; };
2986
  //   flat_hash_set<int> s;
2987
  //   s.insert(n);
2988
  //
2989
  //   flat_hash_set<std::string> s;
2990
  //   const char* p = "hello";
2991
  //   s.insert(p);
2992
  //
2993
  template <
2994
      class T, RequiresInsertable<const T&> = 0,
2995
      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2996
  std::pair<iterator, bool> insert(const T& value)
2997
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2998
    return emplace(value);
2999
  }
3000
3001
  // This overload kicks in when the argument is an rvalue of init_type. Its
3002
  // purpose is to handle brace-init-list arguments.
3003
  //
3004
  //   flat_hash_map<std::string, int> s;
3005
  //   s.insert({"abc", 42});
3006
  std::pair<iterator, bool> insert(init_type&& value)
3007
0
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3008
0
    return emplace(std::move(value));
3009
0
  }
3010
3011
  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
3012
  // bug.
3013
  template <class T, RequiresInsertable<T> = 0, class T2 = T,
3014
            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
3015
            T* = nullptr>
3016
  iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3017
    return insert(std::forward<T>(value)).first;
3018
  }
3019
3020
  template <
3021
      class T, RequiresInsertable<const T&> = 0,
3022
      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
3023
  iterator insert(const_iterator,
3024
                  const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3025
    return insert(value).first;
3026
  }
3027
3028
  iterator insert(const_iterator,
3029
                  init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3030
    return insert(std::move(value)).first;
3031
  }
3032
3033
  template <class InputIt>
3034
  void insert(InputIt first, InputIt last) {
3035
    for (; first != last; ++first) emplace(*first);
3036
  }
3037
3038
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
3039
  void insert(std::initializer_list<T> ilist) {
3040
    insert(ilist.begin(), ilist.end());
3041
  }
3042
3043
  void insert(std::initializer_list<init_type> ilist) {
3044
    insert(ilist.begin(), ilist.end());
3045
  }
3046
3047
  insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3048
    if (!node) return {end(), false, node_type()};
3049
    const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
3050
    auto res = PolicyTraits::apply(
3051
        InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
3052
        elem);
3053
    if (res.second) {
3054
      CommonAccess::Reset(&node);
3055
      return {res.first, true, node_type()};
3056
    } else {
3057
      return {res.first, false, std::move(node)};
3058
    }
3059
  }
3060
3061
  iterator insert(const_iterator,
3062
                  node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3063
    auto res = insert(std::move(node));
3064
    node = std::move(res.node);
3065
    return res.position;
3066
  }
3067
3068
  // This overload kicks in if we can deduce the key from args. This enables us
3069
  // to avoid constructing value_type if an entry with the same key already
3070
  // exists.
3071
  //
3072
  // For example:
3073
  //
3074
  //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
3075
  //   // Creates no std::string copies and makes no heap allocations.
3076
  //   m.emplace("abc", "xyz");
3077
  template <class... Args, typename std::enable_if<
3078
                               IsDecomposable<Args...>::value, int>::type = 0>
3079
  std::pair<iterator, bool> emplace(Args&&... args)
3080
16
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3081
16
    return PolicyTraits::apply(EmplaceDecomposable{*this},
3082
16
                               std::forward<Args>(args)...);
3083
16
  }
std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*>, 0>(std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*>&&)
Line
Count
Source
3080
16
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3081
16
    return PolicyTraits::apply(EmplaceDecomposable{*this},
3082
16
                               std::forward<Args>(args)...);
3083
16
  }
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, 0>(std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>&&)
3084
3085
  // This overload kicks in if we cannot deduce the key from args. It constructs
3086
  // value_type unconditionally and then either moves it into the table or
3087
  // destroys.
3088
  template <class... Args, typename std::enable_if<
3089
                               !IsDecomposable<Args...>::value, int>::type = 0>
3090
  std::pair<iterator, bool> emplace(Args&&... args)
3091
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3092
    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
3093
    slot_type* slot = to_slot(&raw);
3094
3095
    construct(slot, std::forward<Args>(args)...);
3096
    const auto& elem = PolicyTraits::element(slot);
3097
    return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
3098
  }
3099
3100
  template <class... Args>
3101
  iterator emplace_hint(const_iterator,
3102
                        Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3103
    return emplace(std::forward<Args>(args)...).first;
3104
  }
3105
3106
  // Extension API: support for lazy emplace.
3107
  //
3108
  // Looks up key in the table. If found, returns the iterator to the element.
3109
  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
3110
  // and returns an iterator to the new element.
3111
  //
3112
  // `f` must abide by several restrictions:
3113
  //  - it MUST call `raw_hash_set::constructor` with arguments as if a
3114
  //    `raw_hash_set::value_type` is constructed,
3115
  //  - it MUST NOT access the container before the call to
3116
  //    `raw_hash_set::constructor`, and
3117
  //  - it MUST NOT erase the lazily emplaced element.
3118
  // Doing any of these is undefined behavior.
3119
  //
3120
  // For example:
3121
  //
3122
  //   std::unordered_set<ArenaString> s;
3123
  //   // Makes ArenaStr even if "abc" is in the map.
3124
  //   s.insert(ArenaString(&arena, "abc"));
3125
  //
3126
  //   flat_hash_set<ArenaStr> s;
3127
  //   // Makes ArenaStr only if "abc" is not in the map.
3128
  //   s.lazy_emplace("abc", [&](const constructor& ctor) {
3129
  //     ctor(&arena, "abc");
3130
  //   });
3131
  //
3132
  // WARNING: This API is currently experimental. If there is a way to implement
3133
  // the same thing with the rest of the API, prefer that.
3134
  class constructor {
3135
    friend class raw_hash_set;
3136
3137
   public:
3138
    template <class... Args>
3139
    void operator()(Args&&... args) const {
3140
      assert(*slot_);
3141
      PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
3142
      *slot_ = nullptr;
3143
    }
3144
3145
   private:
3146
    constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
3147
3148
    allocator_type* alloc_;
3149
    slot_type** slot_;
3150
  };
3151
3152
  template <class K = key_type, class F>
3153
  iterator lazy_emplace(const key_arg<K>& key,
3154
                        F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3155
    auto res = find_or_prepare_insert(key);
3156
    if (res.second) {
3157
      slot_type* slot = res.first.slot();
3158
      std::forward<F>(f)(constructor(&alloc_ref(), &slot));
3159
      assert(!slot);
3160
    }
3161
    return res.first;
3162
  }
3163
3164
  // Extension API: support for heterogeneous keys.
3165
  //
3166
  //   std::unordered_set<std::string> s;
3167
  //   // Turns "abc" into std::string.
3168
  //   s.erase("abc");
3169
  //
3170
  //   flat_hash_set<std::string> s;
3171
  //   // Uses "abc" directly without copying it into std::string.
3172
  //   s.erase("abc");
3173
  template <class K = key_type>
3174
  size_type erase(const key_arg<K>& key) {
3175
    auto it = find(key);
3176
    if (it == end()) return 0;
3177
    erase(it);
3178
    return 1;
3179
  }
3180
3181
  // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
3182
  // this method returns void to reduce algorithmic complexity to O(1).  The
3183
  // iterator is invalidated, so any increment should be done before calling
3184
  // erase.  In order to erase while iterating across a map, use the following
3185
  // idiom (which also works for some standard containers):
3186
  //
3187
  // for (auto it = m.begin(), end = m.end(); it != end;) {
3188
  //   // `erase()` will invalidate `it`, so advance `it` first.
3189
  //   auto copy_it = it++;
3190
  //   if (<pred>) {
3191
  //     m.erase(copy_it);
3192
  //   }
3193
  // }
3194
  void erase(const_iterator cit) { erase(cit.inner_); }
3195
3196
  // This overload is necessary because otherwise erase<K>(const K&) would be
3197
  // a better match if non-const iterator is passed as an argument.
3198
  void erase(iterator it) {
3199
    AssertNotDebugCapacity();
3200
    AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
3201
    destroy(it.slot());
3202
    if (is_soo()) {
3203
      common().set_empty_soo();
3204
    } else {
3205
      erase_meta_only(it);
3206
    }
3207
  }
3208
3209
  iterator erase(const_iterator first,
3210
                 const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3211
    AssertNotDebugCapacity();
3212
    // We check for empty first because ClearBackingArray requires that
3213
    // capacity() > 0 as a precondition.
3214
    if (empty()) return end();
3215
    if (first == last) return last.inner_;
3216
    if (is_soo()) {
3217
      destroy(soo_slot());
3218
      common().set_empty_soo();
3219
      return end();
3220
    }
3221
    if (first == begin() && last == end()) {
3222
      // TODO(ezb): we access control bytes in destroy_slots so it could make
3223
      // sense to combine destroy_slots and ClearBackingArray to avoid cache
3224
      // misses when the table is large. Note that we also do this in clear().
3225
      destroy_slots();
3226
      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
3227
                        SooEnabled());
3228
      common().set_reserved_growth(common().reservation_size());
3229
      return end();
3230
    }
3231
    while (first != last) {
3232
      erase(first++);
3233
    }
3234
    return last.inner_;
3235
  }
3236
3237
  // Moves elements from `src` into `this`.
3238
  // If the element already exists in `this`, it is left unmodified in `src`.
3239
  template <typename H, typename E>
3240
  void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
3241
    AssertNotDebugCapacity();
3242
    src.AssertNotDebugCapacity();
3243
    assert(this != &src);
3244
    // Returns whether insertion took place.
3245
    const auto insert_slot = [this](slot_type* src_slot) {
3246
      return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
3247
                                 PolicyTraits::element(src_slot))
3248
          .second;
3249
    };
3250
3251
    if (src.is_soo()) {
3252
      if (src.empty()) return;
3253
      if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
3254
      return;
3255
    }
3256
    for (auto it = src.begin(), e = src.end(); it != e;) {
3257
      auto next = std::next(it);
3258
      if (insert_slot(it.slot())) src.erase_meta_only(it);
3259
      it = next;
3260
    }
3261
  }
3262
3263
  template <typename H, typename E>
3264
  void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
3265
    merge(src);
3266
  }
3267
3268
  node_type extract(const_iterator position) {
3269
    AssertNotDebugCapacity();
3270
    AssertIsFull(position.control(), position.inner_.generation(),
3271
                 position.inner_.generation_ptr(), "extract()");
3272
    auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
3273
    if (is_soo()) {
3274
      common().set_empty_soo();
3275
    } else {
3276
      erase_meta_only(position);
3277
    }
3278
    return node;
3279
  }
3280
3281
  template <
3282
      class K = key_type,
3283
      typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
3284
  node_type extract(const key_arg<K>& key) {
3285
    auto it = find(key);
3286
    return it == end() ? node_type() : extract(const_iterator{it});
3287
  }
3288
3289
  void swap(raw_hash_set& that) noexcept(
3290
      IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
3291
      IsNoThrowSwappable<allocator_type>(
3292
          typename AllocTraits::propagate_on_container_swap{})) {
3293
    AssertNotDebugCapacity();
3294
    that.AssertNotDebugCapacity();
3295
    using std::swap;
3296
    swap_common(that);
3297
    swap(hash_ref(), that.hash_ref());
3298
    swap(eq_ref(), that.eq_ref());
3299
    SwapAlloc(alloc_ref(), that.alloc_ref(),
3300
              typename AllocTraits::propagate_on_container_swap{});
3301
  }
3302
3303
  void rehash(size_t n) {
3304
    const size_t cap = capacity();
3305
    if (n == 0) {
3306
      if (cap == 0 || is_soo()) return;
3307
      if (empty()) {
3308
        ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
3309
                          SooEnabled());
3310
        return;
3311
      }
3312
      if (fits_in_soo(size())) {
3313
        // When the table is already sampled, we keep it sampled.
3314
        if (infoz().IsSampled()) {
3315
          const size_t kInitialSampledCapacity = NextCapacity(SooCapacity());
3316
          if (capacity() > kInitialSampledCapacity) {
3317
            resize(kInitialSampledCapacity);
3318
          }
3319
          // This asserts that we didn't lose sampling coverage in `resize`.
3320
          assert(infoz().IsSampled());
3321
          return;
3322
        }
3323
        alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
3324
        slot_type* tmp_slot = to_slot(slot_space);
3325
        transfer(tmp_slot, begin().slot());
3326
        ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
3327
                          SooEnabled());
3328
        transfer(soo_slot(), tmp_slot);
3329
        common().set_full_soo();
3330
        return;
3331
      }
3332
    }
3333
3334
    // bitor is a faster way of doing `max` here. We will round up to the next
3335
    // power-of-2-minus-1, so bitor is good enough.
3336
    auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
3337
    // n == 0 unconditionally rehashes as per the standard.
3338
    if (n == 0 || m > cap) {
3339
      resize(m);
3340
3341
      // This is after resize, to ensure that we have completed the allocation
3342
      // and have potentially sampled the hashtable.
3343
      infoz().RecordReservation(n);
3344
    }
3345
  }
3346
3347
  void reserve(size_t n) {
3348
    const size_t max_size_before_growth =
3349
        is_soo() ? SooCapacity() : size() + growth_left();
3350
    if (n > max_size_before_growth) {
3351
      size_t m = GrowthToLowerboundCapacity(n);
3352
      resize(NormalizeCapacity(m));
3353
3354
      // This is after resize, to ensure that we have completed the allocation
3355
      // and have potentially sampled the hashtable.
3356
      infoz().RecordReservation(n);
3357
    }
3358
    common().reset_reserved_growth(n);
3359
    common().set_reservation_size(n);
3360
  }
3361
3362
  // Extension API: support for heterogeneous keys.
3363
  //
3364
  //   std::unordered_set<std::string> s;
3365
  //   // Turns "abc" into std::string.
3366
  //   s.count("abc");
3367
  //
3368
  //   ch_set<std::string> s;
3369
  //   // Uses "abc" directly without copying it into std::string.
3370
  //   s.count("abc");
3371
  template <class K = key_type>
3372
  size_t count(const key_arg<K>& key) const {
3373
    return find(key) == end() ? 0 : 1;
3374
  }
3375
3376
  // Issues CPU prefetch instructions for the memory needed to find or insert
3377
  // a key.  Like all lookup functions, this support heterogeneous keys.
3378
  //
3379
  // NOTE: This is a very low level operation and should not be used without
3380
  // specific benchmarks indicating its importance.
3381
  template <class K = key_type>
3382
  void prefetch(const key_arg<K>& key) const {
3383
    if (capacity() == DefaultCapacity()) return;
3384
    (void)key;
3385
    // Avoid probing if we won't be able to prefetch the addresses received.
3386
#ifdef ABSL_HAVE_PREFETCH
3387
    prefetch_heap_block();
3388
    auto seq = probe(common(), hash_ref()(key));
3389
    PrefetchToLocalCache(control() + seq.offset());
3390
    PrefetchToLocalCache(slot_array() + seq.offset());
3391
#endif  // ABSL_HAVE_PREFETCH
3392
  }
3393
3394
  // The API of find() has two extensions.
3395
  //
3396
  // 1. The hash can be passed by the user. It must be equal to the hash of the
3397
  // key.
3398
  //
3399
  // 2. The type of the key argument doesn't have to be key_type. This is so
3400
  // called heterogeneous key support.
3401
  template <class K = key_type>
3402
  iterator find(const key_arg<K>& key,
3403
                size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3404
    AssertOnFind(key);
3405
    if (is_soo()) return find_soo(key);
3406
    return find_non_soo(key, hash);
3407
  }
3408
  template <class K = key_type>
3409
16
  iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3410
16
    AssertOnFind(key);
3411
16
    if (is_soo()) return find_soo(key);
3412
16
    prefetch_heap_block();
3413
16
    return find_non_soo(key, hash_ref()(key));
3414
16
  }
3415
3416
  template <class K = key_type>
3417
  const_iterator find(const key_arg<K>& key,
3418
                      size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
3419
    return const_cast<raw_hash_set*>(this)->find(key, hash);
3420
  }
3421
  template <class K = key_type>
3422
  const_iterator find(const key_arg<K>& key) const
3423
16
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3424
16
    return const_cast<raw_hash_set*>(this)->find(key);
3425
16
  }
3426
3427
  template <class K = key_type>
3428
  bool contains(const key_arg<K>& key) const {
3429
    // Here neither the iterator returned by `find()` nor `end()` can be invalid
3430
    // outside of potential thread-safety issues.
3431
    // `find()`'s return value is constructed, used, and then destructed
3432
    // all in this context.
3433
    return !find(key).unchecked_equals(end());
3434
  }
3435
3436
  template <class K = key_type>
3437
  std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
3438
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
3439
    auto it = find(key);
3440
    if (it != end()) return {it, std::next(it)};
3441
    return {it, it};
3442
  }
3443
  template <class K = key_type>
3444
  std::pair<const_iterator, const_iterator> equal_range(
3445
      const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
3446
    auto it = find(key);
3447
    if (it != end()) return {it, std::next(it)};
3448
    return {it, it};
3449
  }
3450
3451
  size_t bucket_count() const { return capacity(); }
3452
  float load_factor() const {
3453
    return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
3454
  }
3455
  float max_load_factor() const { return 1.0f; }
3456
  void max_load_factor(float) {
3457
    // Does nothing.
3458
  }
3459
3460
  hasher hash_function() const { return hash_ref(); }
3461
  key_equal key_eq() const { return eq_ref(); }
3462
  allocator_type get_allocator() const { return alloc_ref(); }
3463
3464
  friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
3465
    if (a.size() != b.size()) return false;
3466
    const raw_hash_set* outer = &a;
3467
    const raw_hash_set* inner = &b;
3468
    if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
3469
    for (const value_type& elem : *outer) {
3470
      auto it = PolicyTraits::apply(FindElement{*inner}, elem);
3471
      if (it == inner->end() || !(*it == elem)) return false;
3472
    }
3473
    return true;
3474
  }
3475
3476
  friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
3477
    return !(a == b);
3478
  }
3479
3480
  template <typename H>
3481
  friend typename std::enable_if<H::template is_hashable<value_type>::value,
3482
                                 H>::type
3483
  AbslHashValue(H h, const raw_hash_set& s) {
3484
    return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
3485
                      s.size());
3486
  }
3487
3488
  friend void swap(raw_hash_set& a,
3489
                   raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
3490
    a.swap(b);
3491
  }
3492
3493
 private:
3494
  template <class Container, typename Enabler>
3495
  friend struct absl::container_internal::hashtable_debug_internal::
3496
      HashtableDebugAccess;
3497
3498
  friend struct absl::container_internal::HashtableFreeFunctionsAccess;
3499
3500
  struct FindElement {
3501
    template <class K, class... Args>
3502
16
    const_iterator operator()(const K& key, Args&&...) const {
3503
16
      return s.find(key);
3504
16
    }
3505
    const raw_hash_set& s;
3506
  };
3507
3508
  struct HashElement {
3509
    template <class K, class... Args>
3510
0
    size_t operator()(const K& key, Args&&...) const {
3511
0
      return h(key);
3512
0
    }
3513
    const hasher& h;
3514
  };
3515
3516
  template <class K1>
3517
  struct EqualElement {
3518
    template <class K2, class... Args>
3519
19
    bool operator()(const K2& lhs, Args&&...) const {
3520
19
      return eq(lhs, rhs);
3521
19
    }
3522
    const K1& rhs;
3523
    const key_equal& eq;
3524
  };
3525
3526
  struct EmplaceDecomposable {
3527
    template <class K, class... Args>
3528
16
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
3529
16
      auto res = s.find_or_prepare_insert(key);
3530
16
      if (res.second) {
3531
16
        s.emplace_at(res.first, std::forward<Args>(args)...);
3532
16
      }
3533
16
      return res;
3534
16
    }
std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
Line
Count
Source
3528
16
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
3529
16
      auto res = s.find_or_prepare_insert(key);
3530
16
      if (res.second) {
3531
16
        s.emplace_at(res.first, std::forward<Args>(args)...);
3532
16
      }
3533
16
      return res;
3534
16
    }
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<std::__1::basic_string_view<char, std::__1::char_traits<char> >, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
3535
    raw_hash_set& s;
3536
  };
3537
3538
  template <bool do_destroy>
3539
  struct InsertSlot {
3540
    template <class K, class... Args>
3541
    std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
3542
      auto res = s.find_or_prepare_insert(key);
3543
      if (res.second) {
3544
        s.transfer(res.first.slot(), &slot);
3545
      } else if (do_destroy) {
3546
        s.destroy(&slot);
3547
      }
3548
      return res;
3549
    }
3550
    raw_hash_set& s;
3551
    // Constructed slot. Either moved into place or destroyed.
3552
    slot_type&& slot;
3553
  };
3554
3555
  template <typename... Args>
3556
16
  inline void construct(slot_type* slot, Args&&... args) {
3557
16
    common().RunWithReentrancyGuard([&] {
3558
16
      PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
3559
16
    });
absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}::operator()() const
Line
Count
Source
3557
16
    common().RunWithReentrancyGuard([&] {
3558
16
      PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
3559
16
    });
Unexecuted instantiation: absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)::{lambda()#1}::operator()() const
3560
16
  }
void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
Line
Count
Source
3556
16
  inline void construct(slot_type* slot, Args&&... args) {
3557
16
    common().RunWithReentrancyGuard([&] {
3558
16
      PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
3559
16
    });
3560
16
  }
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::construct<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::map_slot_type<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>*, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
3561
0
  inline void destroy(slot_type* slot) {
3562
0
    common().RunWithReentrancyGuard(
3563
0
        [&] { PolicyTraits::destroy(&alloc_ref(), slot); });
3564
0
  }
3565
0
  inline void transfer(slot_type* to, slot_type* from) {
3566
0
    common().RunWithReentrancyGuard(
3567
0
        [&] { PolicyTraits::transfer(&alloc_ref(), to, from); });
3568
0
  }
3569
3570
  // TODO(b/289225379): consider having a helper class that has the impls for
3571
  // SOO functionality.
3572
  template <class K = key_type>
3573
0
  iterator find_soo(const key_arg<K>& key) {
3574
0
    assert(is_soo());
3575
0
    return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
3576
0
                                           PolicyTraits::element(soo_slot()))
3577
0
               ? end()
3578
0
               : soo_iterator();
3579
0
  }
3580
3581
  template <class K = key_type>
3582
16
  iterator find_non_soo(const key_arg<K>& key, size_t hash) {
3583
16
    assert(!is_soo());
3584
16
    auto seq = probe(common(), hash);
3585
16
    const ctrl_t* ctrl = control();
3586
16
    while (true) {
3587
16
      Group g{ctrl + seq.offset()};
3588
17
      for (uint32_t i : g.Match(H2(hash))) {
3589
17
        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
3590
17
                EqualElement<K>{key, eq_ref()},
3591
17
                PolicyTraits::element(slot_array() + seq.offset(i)))))
3592
16
          return iterator_at(seq.offset(i));
3593
17
      }
3594
0
      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
3595
0
      seq.next();
3596
0
      assert(seq.index() <= capacity() && "full table!");
3597
0
    }
3598
16
  }
3599
3600
  // Conditionally samples hashtablez for SOO tables. This should be called on
3601
  // insertion into an empty SOO table and in copy construction when the size
3602
  // can fit in SOO capacity.
3603
0
  inline HashtablezInfoHandle try_sample_soo() {
3604
0
    assert(is_soo());
3605
0
    if (!ShouldSampleHashtablezInfo<CharAlloc>()) return HashtablezInfoHandle{};
3606
0
    return Sample(sizeof(slot_type), sizeof(key_type), sizeof(value_type),
3607
0
                  SooCapacity());
3608
0
  }
3609
3610
0
  inline void destroy_slots() {
3611
0
    assert(!is_soo());
3612
0
    if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
3613
0
    IterateOverFullSlots(
3614
0
        common(), slot_array(),
3615
0
        [&](const ctrl_t*, slot_type* slot)
3616
0
            ABSL_ATTRIBUTE_ALWAYS_INLINE { this->destroy(slot); });
3617
0
  }
3618
3619
0
  inline void dealloc() {
3620
0
    assert(capacity() != 0);
3621
    // Unpoison before returning the memory to the allocator.
3622
0
    SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
3623
0
    infoz().Unregister();
3624
0
    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
3625
0
        &alloc_ref(), common().backing_array_start(),
3626
0
        common().alloc_size(sizeof(slot_type), alignof(slot_type)));
3627
0
  }
3628
3629
0
  inline void destructor_impl() {
3630
0
    if (SwisstableGenerationsEnabled() &&
3631
0
        capacity() >= InvalidCapacity::kMovedFrom) {
3632
0
      return;
3633
0
    }
3634
0
    if (capacity() == 0) return;
3635
0
    if (is_soo()) {
3636
0
      if (!empty()) {
3637
0
        ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot()));
3638
0
      }
3639
0
      return;
3640
0
    }
3641
0
    destroy_slots();
3642
0
    dealloc();
3643
0
  }
3644
3645
  // Erases, but does not destroy, the value pointed to by `it`.
3646
  //
3647
  // This merely updates the pertinent control byte. This can be used in
3648
  // conjunction with Policy::transfer to move the object to another place.
3649
  void erase_meta_only(const_iterator it) {
3650
    assert(!is_soo());
3651
    EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
3652
                  sizeof(slot_type));
3653
  }
3654
3655
0
  size_t hash_of(slot_type* slot) const {
3656
0
    return PolicyTraits::apply(HashElement{hash_ref()},
3657
0
                               PolicyTraits::element(slot));
3658
0
  }
3659
3660
  // Resizes table to the new capacity and move all elements to the new
3661
  // positions accordingly.
3662
  //
3663
  // Note that for better performance instead of
3664
  // find_first_non_full(common(), hash),
3665
  // HashSetResizeHelper::FindFirstNonFullAfterResize(
3666
  //    common(), old_capacity, hash)
3667
  // can be called right after `resize`.
3668
0
  void resize(size_t new_capacity) {
3669
0
    raw_hash_set::resize_impl(common(), new_capacity, HashtablezInfoHandle{});
3670
0
  }
3671
3672
  // As above, except that we also accept a pre-sampled, forced infoz for
3673
  // SOO tables, since they need to switch from SOO to heap in order to
3674
  // store the infoz.
3675
0
  void resize_with_soo_infoz(HashtablezInfoHandle forced_infoz) {
3676
0
    assert(forced_infoz.IsSampled());
3677
0
    raw_hash_set::resize_impl(common(), NextCapacity(SooCapacity()),
3678
0
                              forced_infoz);
3679
0
  }
3680
3681
  // Resizes set to the new capacity.
3682
  // It is a static function in order to use its pointer in GetPolicyFunctions.
3683
  ABSL_ATTRIBUTE_NOINLINE static void resize_impl(
3684
      CommonFields& common, size_t new_capacity,
3685
8
      HashtablezInfoHandle forced_infoz) {
3686
8
    raw_hash_set* set = reinterpret_cast<raw_hash_set*>(&common);
3687
8
    assert(IsValidCapacity(new_capacity));
3688
8
    assert(!set->fits_in_soo(new_capacity));
3689
8
    const bool was_soo = set->is_soo();
3690
8
    const bool had_soo_slot = was_soo && !set->empty();
3691
8
    const ctrl_t soo_slot_h2 =
3692
8
        had_soo_slot ? static_cast<ctrl_t>(H2(set->hash_of(set->soo_slot())))
3693
8
                     : ctrl_t::kEmpty;
3694
8
    HashSetResizeHelper resize_helper(common, was_soo, had_soo_slot,
3695
8
                                      forced_infoz);
3696
    // Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
3697
    // HashSetResizeHelper constructor because it can't transfer slots when
3698
    // transfer_uses_memcpy is false.
3699
    // TODO(b/289225379): try to handle more of the SOO cases inside
3700
    // InitializeSlots. See comment on cl/555990034 snapshot #63.
3701
8
    if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
3702
8
      resize_helper.old_heap_or_soo() = common.heap_or_soo();
3703
8
    } else {
3704
0
      set->transfer(set->to_slot(resize_helper.old_soo_data()),
3705
0
                    set->soo_slot());
3706
0
    }
3707
8
    common.set_capacity(new_capacity);
3708
    // Note that `InitializeSlots` does different number initialization steps
3709
    // depending on the values of `transfer_uses_memcpy` and capacities.
3710
    // Refer to the comment in `InitializeSlots` for more details.
3711
8
    const bool grow_single_group =
3712
8
        resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
3713
8
                                      PolicyTraits::transfer_uses_memcpy(),
3714
8
                                      SooEnabled(), alignof(slot_type)>(
3715
8
            common, CharAlloc(set->alloc_ref()), soo_slot_h2, sizeof(key_type),
3716
8
            sizeof(value_type));
3717
3718
    // In the SooEnabled() case, capacity is never 0 so we don't check.
3719
8
    if (!SooEnabled() && resize_helper.old_capacity() == 0) {
3720
      // InitializeSlots did all the work including infoz().RecordRehash().
3721
2
      return;
3722
2
    }
3723
6
    assert(resize_helper.old_capacity() > 0);
3724
    // Nothing more to do in this case.
3725
6
    if (was_soo && !had_soo_slot) return;
3726
3727
6
    slot_type* new_slots = set->slot_array();
3728
6
    if (grow_single_group) {
3729
6
      if (PolicyTraits::transfer_uses_memcpy()) {
3730
        // InitializeSlots did all the work.
3731
6
        return;
3732
6
      }
3733
0
      if (was_soo) {
3734
0
        set->transfer(new_slots + resize_helper.SooSlotIndex(),
3735
0
                      to_slot(resize_helper.old_soo_data()));
3736
0
        return;
3737
0
      } else {
3738
        // We want GrowSizeIntoSingleGroup to be called here in order to make
3739
        // InitializeSlots not depend on PolicyTraits.
3740
0
        resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common,
3741
0
                                                            set->alloc_ref());
3742
0
      }
3743
0
    } else {
3744
      // InitializeSlots prepares control bytes to correspond to empty table.
3745
0
      const auto insert_slot = [&](slot_type* slot) {
3746
0
        size_t hash = PolicyTraits::apply(HashElement{set->hash_ref()},
3747
0
                                          PolicyTraits::element(slot));
3748
0
        auto target = find_first_non_full(common, hash);
3749
0
        SetCtrl(common, target.offset, H2(hash), sizeof(slot_type));
3750
0
        set->transfer(new_slots + target.offset, slot);
3751
0
        return target.probe_length;
3752
0
      };
3753
0
      if (was_soo) {
3754
0
        insert_slot(to_slot(resize_helper.old_soo_data()));
3755
0
        return;
3756
0
      } else {
3757
0
        auto* old_slots = static_cast<slot_type*>(resize_helper.old_slots());
3758
0
        size_t total_probe_length = 0;
3759
0
        for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
3760
0
          if (IsFull(resize_helper.old_ctrl()[i])) {
3761
0
            total_probe_length += insert_slot(old_slots + i);
3762
0
          }
3763
0
        }
3764
0
        common.infoz().RecordRehash(total_probe_length);
3765
0
      }
3766
0
    }
3767
0
    resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(set->alloc_ref()),
3768
0
                                                    sizeof(slot_type));
3769
0
  }
3770
3771
  // Casting directly from e.g. char* to slot_type* can cause compilation errors
3772
  // on objective-C. This function converts to void* first, avoiding the issue.
3773
0
  static slot_type* to_slot(void* buf) { return static_cast<slot_type*>(buf); }
3774
3775
  // Requires that lhs does not have a full SOO slot.
3776
  static void move_common(bool rhs_is_full_soo, allocator_type& rhs_alloc,
3777
                          CommonFields& lhs, CommonFields&& rhs) {
3778
    if (PolicyTraits::transfer_uses_memcpy() || !rhs_is_full_soo) {
3779
      lhs = std::move(rhs);
3780
    } else {
3781
      lhs.move_non_heap_or_soo_fields(rhs);
3782
      rhs.RunWithReentrancyGuard([&] {
3783
        lhs.RunWithReentrancyGuard([&] {
3784
          PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
3785
                                 to_slot(rhs.soo_data()));
3786
        });
3787
      });
3788
    }
3789
  }
3790
3791
  // Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
3792
  // aren't allowed to do so.
3793
  void swap_common(raw_hash_set& that) {
3794
    using std::swap;
3795
    if (PolicyTraits::transfer_uses_memcpy()) {
3796
      swap(common(), that.common());
3797
      return;
3798
    }
3799
    CommonFields tmp = CommonFields(uninitialized_tag_t{});
3800
    const bool that_is_full_soo = that.is_full_soo();
3801
    move_common(that_is_full_soo, that.alloc_ref(), tmp,
3802
                std::move(that.common()));
3803
    move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
3804
    move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
3805
  }
3806
3807
  void annotate_for_bug_detection_on_move(
3808
0
      ABSL_ATTRIBUTE_UNUSED raw_hash_set& that) {
3809
0
    // We only enable moved-from validation when generations are enabled (rather
3810
0
    // than using NDEBUG) to avoid issues in which NDEBUG is enabled in some
3811
0
    // translation units but not in others.
3812
0
    if (SwisstableGenerationsEnabled()) {
3813
0
      that.common().set_capacity(this == &that ? InvalidCapacity::kSelfMovedFrom
3814
0
                                               : InvalidCapacity::kMovedFrom);
3815
0
    }
3816
0
    if (!SwisstableGenerationsEnabled() || capacity() == DefaultCapacity() ||
3817
0
        capacity() > kAboveMaxValidCapacity) {
3818
0
      return;
3819
0
    }
3820
0
    common().increment_generation();
3821
0
    if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
3822
0
      resize(capacity());
3823
0
    }
3824
0
  }
3825
3826
  template <bool propagate_alloc>
3827
  raw_hash_set& assign_impl(raw_hash_set&& that) {
3828
    // We don't bother checking for this/that aliasing. We just need to avoid
3829
    // breaking the invariants in that case.
3830
    destructor_impl();
3831
    move_common(that.is_full_soo(), that.alloc_ref(), common(),
3832
                std::move(that.common()));
3833
    hash_ref() = that.hash_ref();
3834
    eq_ref() = that.eq_ref();
3835
    CopyAlloc(alloc_ref(), that.alloc_ref(),
3836
              std::integral_constant<bool, propagate_alloc>());
3837
    that.common() = CommonFields::CreateMovedFrom<SooEnabled()>();
3838
    annotate_for_bug_detection_on_move(that);
3839
    return *this;
3840
  }
3841
3842
  raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
3843
    const size_t size = that.size();
3844
    if (size == 0) return *this;
3845
    reserve(size);
3846
    for (iterator it = that.begin(); it != that.end(); ++it) {
3847
      insert(std::move(PolicyTraits::element(it.slot())));
3848
      that.destroy(it.slot());
3849
    }
3850
    if (!that.is_soo()) that.dealloc();
3851
    that.common() = CommonFields::CreateMovedFrom<SooEnabled()>();
3852
    annotate_for_bug_detection_on_move(that);
3853
    return *this;
3854
  }
3855
3856
  raw_hash_set& move_assign(raw_hash_set&& that,
3857
                            std::true_type /*propagate_alloc*/) {
3858
    return assign_impl<true>(std::move(that));
3859
  }
3860
  raw_hash_set& move_assign(raw_hash_set&& that,
3861
                            std::false_type /*propagate_alloc*/) {
3862
    if (alloc_ref() == that.alloc_ref()) {
3863
      return assign_impl<false>(std::move(that));
3864
    }
3865
    // Aliasing can't happen here because allocs would compare equal above.
3866
    assert(this != &that);
3867
    destructor_impl();
3868
    // We can't take over that's memory so we need to move each element.
3869
    // While moving elements, this should have that's hash/eq so copy hash/eq
3870
    // before moving elements.
3871
    hash_ref() = that.hash_ref();
3872
    eq_ref() = that.eq_ref();
3873
    return move_elements_allocs_unequal(std::move(that));
3874
  }
3875
3876
  template <class K>
3877
0
  std::pair<iterator, bool> find_or_prepare_insert_soo(const K& key) {
3878
0
    if (empty()) {
3879
0
      const HashtablezInfoHandle infoz = try_sample_soo();
3880
0
      if (infoz.IsSampled()) {
3881
0
        resize_with_soo_infoz(infoz);
3882
0
      } else {
3883
0
        common().set_full_soo();
3884
0
        return {soo_iterator(), true};
3885
0
      }
3886
0
    } else if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
3887
0
                                   PolicyTraits::element(soo_slot()))) {
3888
0
      return {soo_iterator(), false};
3889
0
    } else {
3890
0
      resize(NextCapacity(SooCapacity()));
3891
0
    }
3892
0
    const size_t index =
3893
0
        PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
3894
0
    return {iterator_at(index), true};
3895
0
  }
3896
3897
  template <class K>
3898
16
  std::pair<iterator, bool> find_or_prepare_insert_non_soo(const K& key) {
3899
16
    assert(!is_soo());
3900
16
    prefetch_heap_block();
3901
16
    auto hash = hash_ref()(key);
3902
16
    auto seq = probe(common(), hash);
3903
16
    const ctrl_t* ctrl = control();
3904
16
    while (true) {
3905
16
      Group g{ctrl + seq.offset()};
3906
16
      for (uint32_t i : g.Match(H2(hash))) {
3907
2
        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
3908
2
                EqualElement<K>{key, eq_ref()},
3909
2
                PolicyTraits::element(slot_array() + seq.offset(i)))))
3910
0
          return {iterator_at(seq.offset(i)), false};
3911
2
      }
3912
16
      auto mask_empty = g.MaskEmpty();
3913
16
      if (ABSL_PREDICT_TRUE(mask_empty)) {
3914
16
        size_t target = seq.offset(
3915
16
            GetInsertionOffset(mask_empty, capacity(), hash, control()));
3916
16
        return {iterator_at(PrepareInsertNonSoo(common(), hash,
3917
16
                                                FindInfo{target, seq.index()},
3918
16
                                                GetPolicyFunctions())),
3919
16
                true};
3920
16
      }
3921
0
      seq.next();
3922
0
      assert(seq.index() <= capacity() && "full table!");
3923
0
    }
3924
16
  }
3925
3926
 protected:
3927
  // Asserts for correctness that we run on find/find_or_prepare_insert.
3928
  template <class K>
3929
32
  void AssertOnFind(ABSL_ATTRIBUTE_UNUSED const K& key) {
3930
32
    AssertHashEqConsistent(key);
3931
32
    AssertNotDebugCapacity();
3932
32
  }
3933
3934
  // Asserts that the capacity is not a sentinel invalid value.
3935
32
  void AssertNotDebugCapacity() const {
3936
32
    if (ABSL_PREDICT_TRUE(capacity() <
3937
32
                          InvalidCapacity::kAboveMaxValidCapacity)) {
3938
32
      return;
3939
32
    }
3940
0
    assert(capacity() != InvalidCapacity::kReentrance &&
3941
0
           "Reentrant container access during element construction/destruction "
3942
0
           "is not allowed.");
3943
0
    assert(capacity() != InvalidCapacity::kDestroyed &&
3944
0
           "Use of destroyed hash table.");
3945
0
    if (SwisstableGenerationsEnabled() &&
3946
0
        ABSL_PREDICT_FALSE(capacity() >= InvalidCapacity::kMovedFrom)) {
3947
0
      if (capacity() == InvalidCapacity::kSelfMovedFrom) {
3948
        // If this log triggers, then a hash table was move-assigned to itself
3949
        // and then used again later without being reinitialized.
3950
0
        ABSL_RAW_LOG(FATAL, "Use of self-move-assigned hash table.");
3951
0
      }
3952
0
      ABSL_RAW_LOG(FATAL, "Use of moved-from hash table.");
3953
0
    }
3954
0
  }
3955
3956
  // Asserts that hash and equal functors provided by the user are consistent,
3957
  // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
3958
  template <class K>
3959
32
  void AssertHashEqConsistent(const K& key) {
3960
#ifdef NDEBUG
3961
    return;
3962
#endif
3963
    // If the hash/eq functors are known to be consistent, then skip validation.
3964
32
    if (std::is_same<hasher, absl::container_internal::StringHash>::value &&
3965
32
        std::is_same<key_equal, absl::container_internal::StringEq>::value) {
3966
32
      return;
3967
32
    }
3968
0
    if (std::is_scalar<key_type>::value &&
3969
0
        std::is_same<hasher, absl::Hash<key_type>>::value &&
3970
0
        std::is_same<key_equal, std::equal_to<key_type>>::value) {
3971
0
      return;
3972
0
    }
3973
0
    if (empty()) return;
3974
3975
0
    const size_t hash_of_arg = hash_ref()(key);
3976
0
    const auto assert_consistent = [&](const ctrl_t*, slot_type* slot) {
3977
0
      const value_type& element = PolicyTraits::element(slot);
3978
0
      const bool is_key_equal =
3979
0
          PolicyTraits::apply(EqualElement<K>{key, eq_ref()}, element);
3980
0
      if (!is_key_equal) return;
3981
3982
0
      const size_t hash_of_slot =
3983
0
          PolicyTraits::apply(HashElement{hash_ref()}, element);
3984
0
      ABSL_ATTRIBUTE_UNUSED const bool is_hash_equal =
3985
0
          hash_of_arg == hash_of_slot;
3986
0
      assert((!is_key_equal || is_hash_equal) &&
3987
0
             "eq(k1, k2) must imply that hash(k1) == hash(k2). "
3988
0
             "hash/eq functors are inconsistent.");
3989
0
    };
3990
3991
0
    if (is_soo()) {
3992
0
      assert_consistent(/*unused*/ nullptr, soo_slot());
3993
0
      return;
3994
0
    }
3995
    // We only do validation for small tables so that it's constant time.
3996
0
    if (capacity() > 16) return;
3997
0
    IterateOverFullSlots(common(), slot_array(), assert_consistent);
3998
0
  }
3999
4000
  // Attempts to find `key` in the table; if it isn't found, returns an iterator
4001
  // where the value can be inserted into, with the control byte already set to
4002
  // `key`'s H2. Returns a bool indicating whether an insertion can take place.
4003
  template <class K>
4004
16
  std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
4005
16
    AssertOnFind(key);
4006
16
    if (is_soo()) return find_or_prepare_insert_soo(key);
4007
16
    return find_or_prepare_insert_non_soo(key);
4008
16
  }
4009
4010
  // Constructs the value in the space pointed by the iterator. This only works
4011
  // after an unsuccessful find_or_prepare_insert() and before any other
4012
  // modifications happen in the raw_hash_set.
4013
  //
4014
  // PRECONDITION: iter was returned from find_or_prepare_insert(k), where k is
4015
  // the key decomposed from `forward<Args>(args)...`, and the bool returned by
4016
  // find_or_prepare_insert(k) was true.
4017
  // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
4018
  template <class... Args>
4019
16
  void emplace_at(iterator iter, Args&&... args) {
4020
16
    construct(iter.slot(), std::forward<Args>(args)...);
4021
4022
16
    assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
4023
16
           "constructed value does not match the lookup key");
4024
16
  }
void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
Line
Count
Source
4019
16
  void emplace_at(iterator iter, Args&&... args) {
4020
16
    construct(iter.slot(), std::forward<Args>(args)...);
4021
4022
16
    assert(PolicyTraits::apply(FindElement{*this}, *iter) == iter &&
4023
16
           "constructed value does not match the lookup key");
4024
16
  }
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<std::__1::basic_string_view<char, std::__1::char_traits<char> >, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<std::__1::basic_string_view<char, std::__1::char_traits<char> > const, absl::CommandLineFlag*> > >::iterator, std::__1::piecewise_construct_t const&, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> >&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
4025
4026
32
  iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
4027
32
    return {control() + i, slot_array() + i, common().generation_ptr()};
4028
32
  }
4029
  const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
4030
    return const_cast<raw_hash_set*>(this)->iterator_at(i);
4031
  }
4032
4033
  reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
4034
4035
 private:
4036
  friend struct RawHashSetTestOnlyAccess;
4037
4038
  // The number of slots we can still fill without needing to rehash.
4039
  //
4040
  // This is stored separately due to tombstones: we do not include tombstones
4041
  // in the growth capacity, because we'd like to rehash when the table is
4042
  // otherwise filled with tombstones: otherwise, probe sequences might get
4043
  // unacceptably long without triggering a rehash. Callers can also force a
4044
  // rehash via the standard `rehash(0)`, which will recompute this value as a
4045
  // side-effect.
4046
  //
4047
  // See `CapacityToGrowth()`.
4048
  size_t growth_left() const {
4049
    assert(!is_soo());
4050
    return common().growth_left();
4051
  }
4052
4053
  GrowthInfo& growth_info() {
4054
    assert(!is_soo());
4055
    return common().growth_info();
4056
  }
4057
  GrowthInfo growth_info() const {
4058
    assert(!is_soo());
4059
    return common().growth_info();
4060
  }
4061
4062
  // Prefetch the heap-allocated memory region to resolve potential TLB and
4063
  // cache misses. This is intended to overlap with execution of calculating the
4064
  // hash for a key.
4065
32
  void prefetch_heap_block() const {
4066
32
    assert(!is_soo());
4067
32
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
4068
32
    __builtin_prefetch(control(), 0, 1);
4069
32
#endif
4070
32
  }
4071
4072
96
  CommonFields& common() { return settings_.template get<0>(); }
4073
490
  const CommonFields& common() const { return settings_.template get<0>(); }
4074
4075
112
  ctrl_t* control() const {
4076
112
    assert(!is_soo());
4077
112
    return common().control();
4078
112
  }
4079
57
  slot_type* slot_array() const {
4080
57
    assert(!is_soo());
4081
57
    return static_cast<slot_type*>(common().slot_array());
4082
57
  }
4083
0
  slot_type* soo_slot() {
4084
0
    assert(is_soo());
4085
0
    return static_cast<slot_type*>(common().soo_data());
4086
0
  }
4087
  const slot_type* soo_slot() const {
4088
    return const_cast<raw_hash_set*>(this)->soo_slot();
4089
  }
4090
0
  iterator soo_iterator() {
4091
0
    return {SooControl(), soo_slot(), common().generation_ptr()};
4092
0
  }
4093
  const_iterator soo_iterator() const {
4094
    return const_cast<raw_hash_set*>(this)->soo_iterator();
4095
  }
4096
0
  HashtablezInfoHandle infoz() {
4097
0
    assert(!is_soo());
4098
0
    return common().infoz();
4099
0
  }
4100
4101
32
  hasher& hash_ref() { return settings_.template get<1>(); }
4102
0
  const hasher& hash_ref() const { return settings_.template get<1>(); }
4103
19
  key_equal& eq_ref() { return settings_.template get<2>(); }
4104
  const key_equal& eq_ref() const { return settings_.template get<2>(); }
4105
24
  allocator_type& alloc_ref() { return settings_.template get<3>(); }
4106
  const allocator_type& alloc_ref() const {
4107
    return settings_.template get<3>();
4108
  }
4109
4110
0
  static const void* get_hash_ref_fn(const CommonFields& common) {
4111
0
    auto* h = reinterpret_cast<const raw_hash_set*>(&common);
4112
0
    return &h->hash_ref();
4113
0
  }
4114
0
  static void transfer_slot_fn(void* set, void* dst, void* src) {
4115
0
    auto* h = static_cast<raw_hash_set*>(set);
4116
0
    h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
4117
0
  }
4118
  // Note: dealloc_fn will only be used if we have a non-standard allocator.
4119
0
  static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
4120
0
    auto* set = reinterpret_cast<raw_hash_set*>(&common);
4121
0
4122
0
    // Unpoison before returning the memory to the allocator.
4123
0
    SanitizerUnpoisonMemoryRegion(common.slot_array(),
4124
0
                                  sizeof(slot_type) * common.capacity());
4125
0
4126
0
    common.infoz().Unregister();
4127
0
    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
4128
0
        &set->alloc_ref(), common.backing_array_start(),
4129
0
        common.alloc_size(sizeof(slot_type), alignof(slot_type)));
4130
0
  }
4131
4132
16
  static const PolicyFunctions& GetPolicyFunctions() {
4133
16
    static constexpr PolicyFunctions value = {
4134
16
        sizeof(slot_type),
4135
        // TODO(b/328722020): try to type erase
4136
        // for standard layout and alignof(Hash) <= alignof(CommonFields).
4137
16
        std::is_empty<hasher>::value ? &GetHashRefForEmptyHasher
4138
16
                                     : &raw_hash_set::get_hash_ref_fn,
4139
16
        PolicyTraits::template get_hash_slot_fn<hasher>(),
4140
16
        PolicyTraits::transfer_uses_memcpy()
4141
16
            ? TransferRelocatable<sizeof(slot_type)>
4142
16
            : &raw_hash_set::transfer_slot_fn,
4143
16
        (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
4144
16
             ? &DeallocateStandard<alignof(slot_type)>
4145
16
             : &raw_hash_set::dealloc_fn),
4146
16
        &raw_hash_set::resize_impl,
4147
16
    };
4148
16
    return value;
4149
16
  }
4150
4151
  // Bundle together CommonFields plus other objects which might be empty.
4152
  // CompressedTuple will ensure that sizeof is not affected by any of the empty
4153
  // fields that occur after CommonFields.
4154
  absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
4155
                                            allocator_type>
4156
      settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
4157
                key_equal{}, allocator_type{}};
4158
};
4159
4160
// Friend access for free functions in raw_hash_set.h.
4161
struct HashtableFreeFunctionsAccess {
4162
  template <class Predicate, typename Set>
4163
  static typename Set::size_type EraseIf(Predicate& pred, Set* c) {
4164
    if (c->empty()) {
4165
      return 0;
4166
    }
4167
    if (c->is_soo()) {
4168
      auto it = c->soo_iterator();
4169
      if (!pred(*it)) {
4170
        assert(c->size() == 1 && "hash table was modified unexpectedly");
4171
        return 0;
4172
      }
4173
      c->destroy(it.slot());
4174
      c->common().set_empty_soo();
4175
      return 1;
4176
    }
4177
    ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
4178
    size_t num_deleted = 0;
4179
    IterateOverFullSlots(
4180
        c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
4181
          if (pred(Set::PolicyTraits::element(slot))) {
4182
            c->destroy(slot);
4183
            EraseMetaOnly(c->common(), static_cast<size_t>(ctrl - c->control()),
4184
                          sizeof(*slot));
4185
            ++num_deleted;
4186
          }
4187
        });
4188
    // NOTE: IterateOverFullSlots allow removal of the current element, so we
4189
    // verify the size additionally here.
4190
    assert(original_size_for_assert - num_deleted == c->size() &&
4191
           "hash table was modified unexpectedly");
4192
    return num_deleted;
4193
  }
4194
4195
  template <class Callback, typename Set>
4196
  static void ForEach(Callback& cb, Set* c) {
4197
    if (c->empty()) {
4198
      return;
4199
    }
4200
    if (c->is_soo()) {
4201
      cb(*c->soo_iterator());
4202
      return;
4203
    }
4204
    using ElementTypeWithConstness = decltype(*c->begin());
4205
    IterateOverFullSlots(
4206
        c->common(), c->slot_array(), [&cb](const ctrl_t*, auto* slot) {
4207
          ElementTypeWithConstness& element = Set::PolicyTraits::element(slot);
4208
          cb(element);
4209
        });
4210
  }
4211
};
4212
4213
// Erases all elements that satisfy the predicate `pred` from the container `c`.
4214
template <typename P, typename H, typename E, typename A, typename Predicate>
4215
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
4216
    Predicate& pred, raw_hash_set<P, H, E, A>* c) {
4217
  return HashtableFreeFunctionsAccess::EraseIf(pred, c);
4218
}
4219
4220
// Calls `cb` for all elements in the container `c`.
4221
template <typename P, typename H, typename E, typename A, typename Callback>
4222
void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {
4223
  return HashtableFreeFunctionsAccess::ForEach(cb, c);
4224
}
4225
template <typename P, typename H, typename E, typename A, typename Callback>
4226
void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {
4227
  return HashtableFreeFunctionsAccess::ForEach(cb, c);
4228
}
4229
4230
namespace hashtable_debug_internal {
4231
template <typename Set>
4232
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
4233
  using Traits = typename Set::PolicyTraits;
4234
  using Slot = typename Traits::slot_type;
4235
4236
  static size_t GetNumProbes(const Set& set,
4237
                             const typename Set::key_type& key) {
4238
    if (set.is_soo()) return 0;
4239
    size_t num_probes = 0;
4240
    size_t hash = set.hash_ref()(key);
4241
    auto seq = probe(set.common(), hash);
4242
    const ctrl_t* ctrl = set.control();
4243
    while (true) {
4244
      container_internal::Group g{ctrl + seq.offset()};
4245
      for (uint32_t i : g.Match(container_internal::H2(hash))) {
4246
        if (Traits::apply(
4247
                typename Set::template EqualElement<typename Set::key_type>{
4248
                    key, set.eq_ref()},
4249
                Traits::element(set.slot_array() + seq.offset(i))))
4250
          return num_probes;
4251
        ++num_probes;
4252
      }
4253
      if (g.MaskEmpty()) return num_probes;
4254
      seq.next();
4255
      ++num_probes;
4256
    }
4257
  }
4258
4259
  static size_t AllocatedByteSize(const Set& c) {
4260
    size_t capacity = c.capacity();
4261
    if (capacity == 0) return 0;
4262
    size_t m =
4263
        c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
4264
4265
    size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
4266
    if (per_slot != ~size_t{}) {
4267
      m += per_slot * c.size();
4268
    } else {
4269
      for (auto it = c.begin(); it != c.end(); ++it) {
4270
        m += Traits::space_used(it.slot());
4271
      }
4272
    }
4273
    return m;
4274
  }
4275
};
4276
4277
}  // namespace hashtable_debug_internal
4278
}  // namespace container_internal
4279
ABSL_NAMESPACE_END
4280
}  // namespace absl
4281
4282
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
4283
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
4284
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
4285
4286
#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_