Coverage Report

Created: 2023-09-25 06:27

/src/abseil-cpp/absl/container/internal/raw_hash_set.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// An open-addressing
16
// hashtable with quadratic probing.
17
//
18
// This is a low level hashtable on top of which different interfaces can be
19
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20
//
21
// The table interface is similar to that of std::unordered_set. Notable
22
// differences are that most member functions support heterogeneous keys when
23
// BOTH the hash and eq functions are marked as transparent. They do so by
24
// providing a typedef called `is_transparent`.
25
//
26
// When heterogeneous lookup is enabled, functions that take key_type act as if
27
// they have an overload set like:
28
//
29
//   iterator find(const key_type& key);
30
//   template <class K>
31
//   iterator find(const K& key);
32
//
33
//   size_type erase(const key_type& key);
34
//   template <class K>
35
//   size_type erase(const K& key);
36
//
37
//   std::pair<iterator, iterator> equal_range(const key_type& key);
38
//   template <class K>
39
//   std::pair<iterator, iterator> equal_range(const K& key);
40
//
41
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42
// exist.
43
//
44
// find() also supports passing the hash explicitly:
45
//
46
//   iterator find(const key_type& key, size_t hash);
47
//   template <class U>
48
//   iterator find(const U& key, size_t hash);
49
//
50
// In addition the pointer to element and iterator stability guarantees are
51
// weaker: all iterators and pointers are invalidated after a new element is
52
// inserted.
53
//
54
// IMPLEMENTATION DETAILS
55
//
56
// # Table Layout
57
//
58
// A raw_hash_set's backing array consists of control bytes followed by slots
59
// that may or may not contain objects.
60
//
61
// The layout of the backing array, for `capacity` slots, is thus, as a
62
// pseudo-struct:
63
//
64
//   struct BackingArray {
65
//     // Sampling handler. This field isn't present when the sampling is
66
//     // disabled or this allocation hasn't been selected for sampling.
67
//     HashtablezInfoHandle infoz_;
68
//     // The number of elements we can insert before growing the capacity.
69
//     size_t growth_left;
70
//     // Control bytes for the "real" slots.
71
//     ctrl_t ctrl[capacity];
72
//     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
73
//     // stop and serves no other purpose.
74
//     ctrl_t sentinel;
75
//     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
76
//     // that if a probe sequence picks a value near the end of `ctrl`,
77
//     // `Group` will have valid control bytes to look at.
78
//     ctrl_t clones[kWidth - 1];
79
//     // The actual slot data.
80
//     slot_type slots[capacity];
81
//   };
82
//
83
// The length of this array is computed by `AllocSize()` below.
84
//
85
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
86
// platform-specific size) that define the state of the corresponding slot in
87
// the slot array. Group manipulation is tightly optimized to be as efficient
88
// as possible: SSE and friends on x86, clever bit operations on other arches.
89
//
90
//      Group 1         Group 2        Group 3
91
// +---------------+---------------+---------------+
92
// | | | | | | | | | | | | | | | | | | | | | | | | |
93
// +---------------+---------------+---------------+
94
//
95
// Each control byte is either a special value for empty slots, deleted slots
96
// (sometimes called *tombstones*), and a special end-of-table marker used by
97
// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
98
// corresponding slot.
99
//
100
// Storing control bytes in a separate array also has beneficial cache effects,
101
// since more logical slots will fit into a cache line.
102
//
103
// # Hashing
104
//
105
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
106
// `H1(hash(x))` is an index into `slots`, and essentially the starting point
107
// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
108
// objects that cannot possibly be the one we are looking for.
109
//
110
// # Table operations.
111
//
112
// The key operations are `insert`, `find`, and `erase`.
113
//
114
// Since `insert` and `erase` are implemented in terms of `find`, we describe
115
// `find` first. To `find` a value `x`, we compute `hash(x)`. From
116
// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
117
// group of slots in some interesting order.
118
//
119
// We now walk through these indices. At each index, we select the entire group
120
// starting with that index and extract potential candidates: occupied slots
121
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
122
// group, we stop and return an error. Each candidate slot `y` is compared with
123
// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
124
// next probe index. Tombstones effectively behave like full slots that never
125
// match the value we're looking for.
126
//
127
// The `H2` bits ensure when we compare a slot to an object with `==`, we are
128
// likely to have actually found the object.  That is, the chance is low that
129
// `==` is called and returns `false`.  Thus, when we search for an object, we
130
// are unlikely to call `==` many times.  This likelyhood can be analyzed as
131
// follows (assuming that H2 is a random enough hash function).
132
//
133
// Let's assume that there are `k` "wrong" objects that must be examined in a
134
// probe sequence.  For example, when doing a `find` on an object that is in the
135
// table, `k` is the number of objects between the start of the probe sequence
136
// and the final found object (not including the final found object).  The
137
// expected number of objects with an H2 match is then `k/128`.  Measurements
138
// and analysis indicate that even at high load factors, `k` is less than 32,
139
// meaning that the number of "false positive" comparisons we must perform is
140
// less than 1/8 per `find`.
141
142
// `insert` is implemented in terms of `unchecked_insert`, which inserts a
143
// value presumed to not be in the table (violating this requirement will cause
144
// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
145
// it, we construct a `probe_seq` once again, and use it to find the first
146
// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
147
// first such slot in the group and mark it as full with `x`'s H2.
148
//
149
// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
150
// perform a `find` to see if it's already present; if it is, we're done. If
151
// it's not, we may decide the table is getting overcrowded (i.e. the load
152
// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
153
// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
154
// each element of the table into the new array (we know that no insertion here
155
// will insert an already-present value), and discard the old backing array. At
156
// this point, we may `unchecked_insert` the value `x`.
157
//
158
// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
159
// presents a viable, initialized slot pointee to the caller.
160
//
161
// `erase` is implemented in terms of `erase_at`, which takes an index to a
162
// slot. Given an offset, we simply create a tombstone and destroy its contents.
163
// If we can prove that the slot would not appear in a probe sequence, we can
164
// make the slot as empty, instead. We can prove this by observing that if a
165
// group has any empty slots, it has never been full (assuming we never create
166
// an empty slot in a group with no empties, which this heuristic guarantees we
167
// never do) and find would stop at this group anyways (since it does not probe
168
// beyond groups with empties).
169
//
170
// `erase` is `erase_at` composed with `find`: if we
171
// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
172
// slot.
173
//
174
// To iterate, we simply traverse the array, skipping empty and deleted slots
175
// and stopping when we hit a `kSentinel`.
176
177
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
178
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
179
180
#include <algorithm>
181
#include <cassert>
182
#include <cmath>
183
#include <cstddef>
184
#include <cstdint>
185
#include <cstring>
186
#include <initializer_list>
187
#include <iterator>
188
#include <limits>
189
#include <memory>
190
#include <string>
191
#include <tuple>
192
#include <type_traits>
193
#include <utility>
194
195
#include "absl/base/attributes.h"
196
#include "absl/base/config.h"
197
#include "absl/base/internal/endian.h"
198
#include "absl/base/internal/raw_logging.h"
199
#include "absl/base/macros.h"
200
#include "absl/base/optimization.h"
201
#include "absl/base/options.h"
202
#include "absl/base/port.h"
203
#include "absl/base/prefetch.h"
204
#include "absl/container/internal/common.h"
205
#include "absl/container/internal/compressed_tuple.h"
206
#include "absl/container/internal/container_memory.h"
207
#include "absl/container/internal/hash_policy_traits.h"
208
#include "absl/container/internal/hashtable_debug_hooks.h"
209
#include "absl/container/internal/hashtablez_sampler.h"
210
#include "absl/memory/memory.h"
211
#include "absl/meta/type_traits.h"
212
#include "absl/numeric/bits.h"
213
#include "absl/utility/utility.h"
214
215
#ifdef ABSL_INTERNAL_HAVE_SSE2
216
#include <emmintrin.h>
217
#endif
218
219
#ifdef ABSL_INTERNAL_HAVE_SSSE3
220
#include <tmmintrin.h>
221
#endif
222
223
#ifdef _MSC_VER
224
#include <intrin.h>
225
#endif
226
227
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
228
#include <arm_neon.h>
229
#endif
230
231
namespace absl {
232
ABSL_NAMESPACE_BEGIN
233
namespace container_internal {
234
235
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
236
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
237
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
238
    defined(ABSL_HAVE_MEMORY_SANITIZER)
239
// When compiled in sanitizer mode, we add generation integers to the backing
240
// array and iterators. In the backing array, we store the generation between
241
// the control bytes and the slots. When iterators are dereferenced, we assert
242
// that the container has not been mutated in a way that could cause iterator
243
// invalidation since the iterator was initialized.
244
#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
245
#endif
246
247
// We use uint8_t so we don't need to worry about padding.
248
using GenerationType = uint8_t;
249
250
// A sentinel value for empty generations. Using 0 makes it easy to constexpr
251
// initialize an array of this value.
252
8
constexpr GenerationType SentinelEmptyGeneration() { return 0; }
253
254
8
constexpr GenerationType NextGeneration(GenerationType generation) {
255
8
  return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
256
8
}
257
258
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
259
constexpr bool SwisstableGenerationsEnabled() { return true; }
260
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
261
#else
262
0
constexpr bool SwisstableGenerationsEnabled() { return false; }
263
22
constexpr size_t NumGenerationBytes() { return 0; }
264
#endif
265
266
template <typename AllocType>
267
void SwapAlloc(AllocType& lhs, AllocType& rhs,
268
               std::true_type /* propagate_on_container_swap */) {
269
  using std::swap;
270
  swap(lhs, rhs);
271
}
272
template <typename AllocType>
273
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
274
               std::false_type /* propagate_on_container_swap */) {}
275
276
// The state for a probe sequence.
277
//
278
// Currently, the sequence is a triangular progression of the form
279
//
280
//   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
281
//
282
// The use of `Width` ensures that each probe step does not overlap groups;
283
// the sequence effectively outputs the addresses of *groups* (although not
284
// necessarily aligned to any boundary). The `Group` machinery allows us
285
// to check an entire group with minimal branching.
286
//
287
// Wrapping around at `mask + 1` is important, but not for the obvious reason.
288
// As described above, the first few entries of the control byte array
289
// are mirrored at the end of the array, which `Group` will find and use
290
// for selecting candidates. However, when those candidates' slots are
291
// actually inspected, there are no corresponding slots for the cloned bytes,
292
// so we need to make sure we've treated those offsets as "wrapping around".
293
//
294
// It turns out that this probe sequence visits every group exactly once if the
295
// number of groups is a power of two, since (i^2+i)/2 is a bijection in
296
// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
297
template <size_t Width>
298
class probe_seq {
299
 public:
300
  // Creates a new probe sequence using `hash` as the initial value of the
301
  // sequence and `mask` (usually the capacity of the table) as the mask to
302
  // apply to each value in the progression.
303
90
  probe_seq(size_t hash, size_t mask) {
304
90
    assert(((mask + 1) & mask) == 0 && "not a mask");
305
0
    mask_ = mask;
306
90
    offset_ = hash & mask_;
307
90
  }
308
309
  // The offset within the table, i.e., the value `p(i)` above.
310
90
  size_t offset() const { return offset_; }
311
90
  size_t offset(size_t i) const { return (offset_ + i) & mask_; }
312
313
0
  void next() {
314
0
    index_ += Width;
315
0
    offset_ += index_;
316
0
    offset_ &= mask_;
317
0
  }
318
  // 0-based probe index, a multiple of `Width`.
319
50
  size_t index() const { return index_; }
320
321
 private:
322
  size_t mask_;
323
  size_t offset_;
324
  size_t index_ = 0;
325
};
326
327
template <class ContainerKey, class Hash, class Eq>
328
struct RequireUsableKey {
329
  template <class PassedKey, class... Args>
330
  std::pair<
331
      decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
332
      decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
333
                                         std::declval<const PassedKey&>()))>*
334
  operator()(const PassedKey&, const Args&...) const;
335
};
336
337
template <class E, class Policy, class Hash, class Eq, class... Ts>
338
struct IsDecomposable : std::false_type {};
339
340
template <class Policy, class Hash, class Eq, class... Ts>
341
struct IsDecomposable<
342
    absl::void_t<decltype(Policy::apply(
343
        RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
344
        std::declval<Ts>()...))>,
345
    Policy, Hash, Eq, Ts...> : std::true_type {};
346
347
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
348
template <class T>
349
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
350
  using std::swap;
351
  return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
352
}
353
template <class T>
354
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
355
  return false;
356
}
357
358
template <typename T>
359
60
uint32_t TrailingZeros(T x) {
360
60
  ABSL_ASSUME(x != 0);
361
0
  return static_cast<uint32_t>(countr_zero(x));
362
60
}
363
364
// An abstract bitmask, such as that emitted by a SIMD instruction.
365
//
366
// Specifically, this type implements a simple bitset whose representation is
367
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
368
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
369
// width of an abstract bit in the representation.
370
// This mask provides operations for any number of real bits set in an abstract
371
// bit. To add iteration on top of that, implementation must guarantee no more
372
// than the most significant real bit is set in a set abstract bit.
373
template <class T, int SignificantBits, int Shift = 0>
374
class NonIterableBitMask {
375
 public:
376
150
  explicit NonIterableBitMask(T mask) : mask_(mask) {}
377
378
70
  explicit operator bool() const { return this->mask_ != 0; }
379
380
  // Returns the index of the lowest *abstract* bit set in `self`.
381
60
  uint32_t LowestBitSet() const {
382
60
    return container_internal::TrailingZeros(mask_) >> Shift;
383
60
  }
384
385
  // Returns the index of the highest *abstract* bit set in `self`.
386
10
  uint32_t HighestBitSet() const {
387
10
    return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
388
10
  }
389
390
  // Returns the number of trailing zero *abstract* bits.
391
0
  uint32_t TrailingZeros() const {
392
0
    return container_internal::TrailingZeros(mask_) >> Shift;
393
0
  }
394
395
  // Returns the number of leading zero *abstract* bits.
396
0
  uint32_t LeadingZeros() const {
397
0
    constexpr int total_significant_bits = SignificantBits << Shift;
398
0
    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
399
0
    return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
400
0
  }
401
402
  T mask_;
403
};
404
405
// Mask that can be iterable
406
//
407
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
408
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
409
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
410
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
411
//
412
// For example:
413
//   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
414
//   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
415
template <class T, int SignificantBits, int Shift = 0>
416
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
417
  using Base = NonIterableBitMask<T, SignificantBits, Shift>;
418
  static_assert(std::is_unsigned<T>::value, "");
419
  static_assert(Shift == 0 || Shift == 3, "");
420
421
 public:
422
80
  explicit BitMask(T mask) : Base(mask) {}
423
  // BitMask is an iterator over the indices of its abstract bits.
424
  using value_type = int;
425
  using iterator = BitMask;
426
  using const_iterator = BitMask;
427
428
0
  BitMask& operator++() {
429
0
    if (Shift == 3) {
430
0
      constexpr uint64_t msbs = 0x8080808080808080ULL;
431
0
      this->mask_ &= msbs;
432
0
    }
433
0
    this->mask_ &= (this->mask_ - 1);
434
0
    return *this;
435
0
  }
436
437
20
  uint32_t operator*() const { return Base::LowestBitSet(); }
438
439
40
  BitMask begin() const { return *this; }
440
40
  BitMask end() const { return BitMask(0); }
441
442
 private:
443
  friend bool operator==(const BitMask& a, const BitMask& b) {
444
    return a.mask_ == b.mask_;
445
  }
446
40
  friend bool operator!=(const BitMask& a, const BitMask& b) {
447
40
    return a.mask_ != b.mask_;
448
40
  }
449
};
450
451
using h2_t = uint8_t;
452
453
// The values here are selected for maximum performance. See the static asserts
454
// below for details.
455
456
// A `ctrl_t` is a single control byte, which can have one of four
457
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
458
// and the sentinel. They have the following bit patterns:
459
//
460
//      empty: 1 0 0 0 0 0 0 0
461
//    deleted: 1 1 1 1 1 1 1 0
462
//       full: 0 h h h h h h h  // h represents the hash bits.
463
//   sentinel: 1 1 1 1 1 1 1 1
464
//
465
// These values are specifically tuned for SSE-flavored SIMD.
466
// The static_asserts below detail the source of these choices.
467
//
468
// We use an enum class so that when strict aliasing is enabled, the compiler
469
// knows ctrl_t doesn't alias other types.
470
enum class ctrl_t : int8_t {
471
  kEmpty = -128,   // 0b10000000
472
  kDeleted = -2,   // 0b11111110
473
  kSentinel = -1,  // 0b11111111
474
};
475
static_assert(
476
    (static_cast<int8_t>(ctrl_t::kEmpty) &
477
     static_cast<int8_t>(ctrl_t::kDeleted) &
478
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
479
    "Special markers need to have the MSB to make checking for them efficient");
480
static_assert(
481
    ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
482
    "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
483
    "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
484
static_assert(
485
    ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
486
    "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
487
    "registers (pcmpeqd xmm, xmm)");
488
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
489
              "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
490
              "existence efficient (psignb xmm, xmm)");
491
static_assert(
492
    (~static_cast<int8_t>(ctrl_t::kEmpty) &
493
     ~static_cast<int8_t>(ctrl_t::kDeleted) &
494
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
495
    "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
496
    "shared by ctrl_t::kSentinel to make the scalar test for "
497
    "MaskEmptyOrDeleted() efficient");
498
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
499
              "ctrl_t::kDeleted must be -2 to make the implementation of "
500
              "ConvertSpecialToEmptyAndFullToDeleted efficient");
501
502
// See definition comment for why this is size 32.
503
ABSL_DLL extern const ctrl_t kEmptyGroup[32];
504
505
// Returns a pointer to a control byte group that can be used by empty tables.
506
104
inline ctrl_t* EmptyGroup() {
507
  // Const must be cast away here; no uses of this function will actually write
508
  // to it, because it is only used for empty tables.
509
104
  return const_cast<ctrl_t*>(kEmptyGroup + 16);
510
104
}
511
512
// Returns a pointer to a generation to use for an empty hashtable.
513
GenerationType* EmptyGeneration();
514
515
// Returns whether `generation` is a generation for an empty hashtable that
516
// could be returned by EmptyGeneration().
517
0
inline bool IsEmptyGeneration(const GenerationType* generation) {
518
0
  return *generation == SentinelEmptyGeneration();
519
0
}
520
521
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
522
// randomize insertion order within groups.
523
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
524
525
// Returns a per-table, hash salt, which changes on resize. This gets mixed into
526
// H1 to randomize iteration order per-table.
527
//
528
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
529
// non-determinism of iteration order in most cases.
530
110
inline size_t PerTableSalt(const ctrl_t* ctrl) {
531
  // The low bits of the pointer have little or no entropy because of
532
  // alignment. We shift the pointer to try to use higher entropy bits. A
533
  // good number seems to be 12 bits, because that aligns with page size.
534
110
  return reinterpret_cast<uintptr_t>(ctrl) >> 12;
535
110
}
536
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
537
110
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
538
110
  return (hash >> 7) ^ PerTableSalt(ctrl);
539
110
}
540
541
// Extracts the H2 portion of a hash: the 7 bits not used for H1.
542
//
543
// These are used as an occupied control byte.
544
82
inline h2_t H2(size_t hash) { return hash & 0x7F; }
545
546
// Helpers for checking the state of a control byte.
547
20
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
548
124
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
549
8
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
550
0
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
551
552
#ifdef ABSL_INTERNAL_HAVE_SSE2
553
// Quick reference guide for intrinsics used below:
554
//
555
// * __m128i: An XMM (128-bit) word.
556
//
557
// * _mm_setzero_si128: Returns a zero vector.
558
// * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
559
//
560
// * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
561
// * _mm_and_si128:    Ands two i128s together.
562
// * _mm_or_si128:     Ors two i128s together.
563
// * _mm_andnot_si128: And-nots two i128s together.
564
//
565
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
566
//                   filling each lane with 0x00 or 0xff.
567
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
568
//
569
// * _mm_loadu_si128:  Performs an unaligned load of an i128.
570
// * _mm_storeu_si128: Performs an unaligned store of an i128.
571
//
572
// * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
573
//                      argument if the corresponding lane of the second
574
//                      argument is positive, negative, or zero, respectively.
575
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
576
//                      bitmask consisting of those bits.
577
// * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
578
//                      four bits of each i8 lane in the second argument as
579
//                      indices.
580
581
// https://github.com/abseil/abseil-cpp/issues/209
582
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
583
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
584
// Work around this by using the portable implementation of Group
585
// when using -funsigned-char under GCC.
586
50
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
587
#if defined(__GNUC__) && !defined(__clang__)
588
  if (std::is_unsigned<char>::value) {
589
    const __m128i mask = _mm_set1_epi8(0x80);
590
    const __m128i diff = _mm_subs_epi8(b, a);
591
    return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
592
  }
593
#endif
594
50
  return _mm_cmpgt_epi8(a, b);
595
50
}
596
597
struct GroupSse2Impl {
598
  static constexpr size_t kWidth = 16;  // the number of slots per group
599
600
90
  explicit GroupSse2Impl(const ctrl_t* pos) {
601
90
    ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
602
90
  }
603
604
  // Returns a bitmask representing the positions of slots that match hash.
605
40
  BitMask<uint32_t, kWidth> Match(h2_t hash) const {
606
40
    auto match = _mm_set1_epi8(static_cast<char>(hash));
607
40
    return BitMask<uint32_t, kWidth>(
608
40
        static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
609
40
  }
610
611
  // Returns a bitmask representing the positions of empty slots.
612
20
  NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
613
#ifdef ABSL_INTERNAL_HAVE_SSSE3
614
    // This only works because ctrl_t::kEmpty is -128.
615
    return NonIterableBitMask<uint32_t, kWidth>(
616
        static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
617
#else
618
20
    auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
619
20
    return NonIterableBitMask<uint32_t, kWidth>(
620
20
        static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
621
20
#endif
622
20
  }
623
624
  // Returns a bitmask representing the positions of empty or deleted slots.
625
50
  NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
626
50
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
627
50
    return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
628
50
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
629
50
  }
630
631
  // Returns the number of trailing empty or deleted elements in the group.
632
0
  uint32_t CountLeadingEmptyOrDeleted() const {
633
0
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
634
0
    return TrailingZeros(static_cast<uint32_t>(
635
0
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
636
0
  }
637
638
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
639
0
    auto msbs = _mm_set1_epi8(static_cast<char>(-128));
640
0
    auto x126 = _mm_set1_epi8(126);
641
#ifdef ABSL_INTERNAL_HAVE_SSSE3
642
    auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
643
#else
644
0
    auto zero = _mm_setzero_si128();
645
0
    auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
646
0
    auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
647
0
#endif
648
0
    _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
649
0
  }
650
651
  __m128i ctrl;
652
};
653
#endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
654
655
#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
656
struct GroupAArch64Impl {
657
  static constexpr size_t kWidth = 8;
658
659
  explicit GroupAArch64Impl(const ctrl_t* pos) {
660
    ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
661
  }
662
663
  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
664
    uint8x8_t dup = vdup_n_u8(hash);
665
    auto mask = vceq_u8(ctrl, dup);
666
    return BitMask<uint64_t, kWidth, 3>(
667
        vget_lane_u64(vreinterpret_u64_u8(mask), 0));
668
  }
669
670
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
671
    uint64_t mask =
672
        vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
673
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
674
                          vreinterpret_s8_u8(ctrl))),
675
                      0);
676
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
677
  }
678
679
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
680
    uint64_t mask =
681
        vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
682
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
683
                          vreinterpret_s8_u8(ctrl))),
684
                      0);
685
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
686
  }
687
688
  uint32_t CountLeadingEmptyOrDeleted() const {
689
    uint64_t mask =
690
        vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
691
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
692
                          vreinterpret_s8_u8(ctrl))),
693
                      0);
694
    // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
695
    // produced bitfield. We then count number of trailing zeros.
696
    // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
697
    // so we should be fine.
698
    return static_cast<uint32_t>(countr_zero(mask)) >> 3;
699
  }
700
701
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
702
    uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
703
    constexpr uint64_t msbs = 0x8080808080808080ULL;
704
    constexpr uint64_t slsbs = 0x0202020202020202ULL;
705
    constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
706
    auto x = slsbs & (mask >> 6);
707
    auto res = (x + midbs) | msbs;
708
    little_endian::Store64(dst, res);
709
  }
710
711
  uint8x8_t ctrl;
712
};
713
#endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
714
715
struct GroupPortableImpl {
716
  static constexpr size_t kWidth = 8;
717
718
  explicit GroupPortableImpl(const ctrl_t* pos)
719
0
      : ctrl(little_endian::Load64(pos)) {}
720
721
0
  BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
722
0
    // For the technique, see:
723
0
    // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
724
0
    // (Determine if a word has a byte equal to n).
725
0
    //
726
0
    // Caveat: there are false positives but:
727
0
    // - they only occur if there is a real match
728
0
    // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
729
0
    // - they will be handled gracefully by subsequent checks in code
730
0
    //
731
0
    // Example:
732
0
    //   v = 0x1716151413121110
733
0
    //   hash = 0x12
734
0
    //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
735
0
    constexpr uint64_t msbs = 0x8080808080808080ULL;
736
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
737
0
    auto x = ctrl ^ (lsbs * hash);
738
0
    return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
739
0
  }
740
741
0
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
742
0
    constexpr uint64_t msbs = 0x8080808080808080ULL;
743
0
    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
744
0
                                                   msbs);
745
0
  }
746
747
0
  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
748
0
    constexpr uint64_t msbs = 0x8080808080808080ULL;
749
0
    return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
750
0
                                                   msbs);
751
0
  }
752
753
0
  uint32_t CountLeadingEmptyOrDeleted() const {
754
0
    // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
755
0
    // kDeleted. We lower all other bits and count number of trailing zeros.
756
0
    constexpr uint64_t bits = 0x0101010101010101ULL;
757
0
    return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
758
0
                                 3);
759
0
  }
760
761
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
762
0
    constexpr uint64_t msbs = 0x8080808080808080ULL;
763
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
764
0
    auto x = ctrl & msbs;
765
0
    auto res = (~x + (x >> 7)) & ~lsbs;
766
0
    little_endian::Store64(dst, res);
767
0
  }
768
769
  uint64_t ctrl;
770
};
771
772
#ifdef ABSL_INTERNAL_HAVE_SSE2
773
using Group = GroupSse2Impl;
774
using GroupEmptyOrDeleted = GroupSse2Impl;
775
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
776
using Group = GroupAArch64Impl;
777
// For Aarch64, we use the portable implementation for counting and masking
778
// empty or deleted group elements. This is to avoid the latency of moving
779
// between data GPRs and Neon registers when it does not provide a benefit.
780
// Using Neon is profitable when we call Match(), but is not when we don't,
781
// which is the case when we do *EmptyOrDeleted operations. It is difficult to
782
// make a similar approach beneficial on other architectures such as x86 since
783
// they have much lower GPR <-> vector register transfer latency and 16-wide
784
// Groups.
785
using GroupEmptyOrDeleted = GroupPortableImpl;
786
#else
787
using Group = GroupPortableImpl;
788
using GroupEmptyOrDeleted = GroupPortableImpl;
789
#endif
790
791
// When there is an insertion with no reserved growth, we rehash with
792
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
793
// constant divided by capacity ensures that inserting N elements is still O(N)
794
// in the average case. Using the constant 16 means that we expect to rehash ~8
795
// times more often than when generations are disabled. We are adding expected
796
// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
797
// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
798
0
inline size_t RehashProbabilityConstant() { return 16; }
799
800
class CommonFieldsGenerationInfoEnabled {
801
  // A sentinel value for reserved_growth_ indicating that we just ran out of
802
  // reserved growth on the last insertion. When reserve is called and then
803
  // insertions take place, reserved_growth_'s state machine is N, ..., 1,
804
  // kReservedGrowthJustRanOut, 0.
805
  static constexpr size_t kReservedGrowthJustRanOut =
806
      (std::numeric_limits<size_t>::max)();
807
808
 public:
809
  CommonFieldsGenerationInfoEnabled() = default;
810
  CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
811
      : reserved_growth_(that.reserved_growth_),
812
        reservation_size_(that.reservation_size_),
813
0
        generation_(that.generation_) {
814
0
    that.reserved_growth_ = 0;
815
0
    that.reservation_size_ = 0;
816
0
    that.generation_ = EmptyGeneration();
817
0
  }
818
  CommonFieldsGenerationInfoEnabled& operator=(
819
      CommonFieldsGenerationInfoEnabled&&) = default;
820
821
  // Whether we should rehash on insert in order to detect bugs of using invalid
822
  // references. We rehash on the first insertion after reserved_growth_ reaches
823
  // 0 after a call to reserve. We also do a rehash with low probability
824
  // whenever reserved_growth_ is zero.
825
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
826
                                                 size_t capacity) const;
827
0
  void maybe_increment_generation_on_insert() {
828
0
    if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
829
0
830
0
    if (reserved_growth_ > 0) {
831
0
      if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
832
0
    } else {
833
0
      *generation_ = NextGeneration(*generation_);
834
0
    }
835
0
  }
836
0
  void reset_reserved_growth(size_t reservation, size_t size) {
837
0
    reserved_growth_ = reservation - size;
838
0
  }
839
0
  size_t reserved_growth() const { return reserved_growth_; }
840
0
  void set_reserved_growth(size_t r) { reserved_growth_ = r; }
841
0
  size_t reservation_size() const { return reservation_size_; }
842
0
  void set_reservation_size(size_t r) { reservation_size_ = r; }
843
0
  GenerationType generation() const { return *generation_; }
844
0
  void set_generation(GenerationType g) { *generation_ = g; }
845
0
  GenerationType* generation_ptr() const { return generation_; }
846
0
  void set_generation_ptr(GenerationType* g) { generation_ = g; }
847
848
 private:
849
  // The number of insertions remaining that are guaranteed to not rehash due to
850
  // a prior call to reserve. Note: we store reserved growth in addition to
851
  // reservation size because calls to erase() decrease size_ but don't decrease
852
  // reserved growth.
853
  size_t reserved_growth_ = 0;
854
  // The maximum argument to reserve() since the container was cleared. We need
855
  // to keep track of this, in addition to reserved growth, because we reset
856
  // reserved growth to this when erase(begin(), end()) is called.
857
  size_t reservation_size_ = 0;
858
  // Pointer to the generation counter, which is used to validate iterators and
859
  // is stored in the backing array between the control bytes and the slots.
860
  // Note that we can't store the generation inside the container itself and
861
  // keep a pointer to the container in the iterators because iterators must
862
  // remain valid when the container is moved.
863
  // Note: we could derive this pointer from the control pointer, but it makes
864
  // the code more complicated, and there's a benefit in having the sizes of
865
  // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
866
  // which is that tests are less likely to rely on the size remaining the same.
867
  GenerationType* generation_ = EmptyGeneration();
868
};
869
870
class CommonFieldsGenerationInfoDisabled {
871
 public:
872
  CommonFieldsGenerationInfoDisabled() = default;
873
  CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
874
      default;
875
  CommonFieldsGenerationInfoDisabled& operator=(
876
      CommonFieldsGenerationInfoDisabled&&) = default;
877
878
20
  bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
879
20
    return false;
880
20
  }
881
20
  void maybe_increment_generation_on_insert() {}
882
0
  void reset_reserved_growth(size_t, size_t) {}
883
0
  size_t reserved_growth() const { return 0; }
884
0
  void set_reserved_growth(size_t) {}
885
0
  size_t reservation_size() const { return 0; }
886
0
  void set_reservation_size(size_t) {}
887
8
  GenerationType generation() const { return 0; }
888
8
  void set_generation(GenerationType) {}
889
80
  GenerationType* generation_ptr() const { return nullptr; }
890
8
  void set_generation_ptr(GenerationType*) {}
891
};
892
893
class HashSetIteratorGenerationInfoEnabled {
894
 public:
895
  HashSetIteratorGenerationInfoEnabled() = default;
896
  explicit HashSetIteratorGenerationInfoEnabled(
897
      const GenerationType* generation_ptr)
898
0
      : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
899
900
0
  GenerationType generation() const { return generation_; }
901
0
  void reset_generation() { generation_ = *generation_ptr_; }
902
0
  const GenerationType* generation_ptr() const { return generation_ptr_; }
903
0
  void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
904
905
 private:
906
  const GenerationType* generation_ptr_ = EmptyGeneration();
907
  GenerationType generation_ = *generation_ptr_;
908
};
909
910
class HashSetIteratorGenerationInfoDisabled {
911
 public:
912
  HashSetIteratorGenerationInfoDisabled() = default;
913
80
  explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
914
915
60
  GenerationType generation() const { return 0; }
916
0
  void reset_generation() {}
917
100
  const GenerationType* generation_ptr() const { return nullptr; }
918
0
  void set_generation_ptr(const GenerationType*) {}
919
};
920
921
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
922
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
923
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
924
#else
925
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
926
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
927
#endif
928
929
// Returns whether `n` is a valid capacity (i.e., number of slots).
930
//
931
// A valid capacity is a non-zero integer `2^m - 1`.
932
84
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
933
934
// Computes the offset from the start of the backing allocation of control.
935
// infoz and growth_left are stored at the beginning of the backing array.
936
44
inline size_t ControlOffset(bool has_infoz) {
937
44
  return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
938
44
}
939
940
// Returns the number of "cloned control bytes".
941
//
942
// This is the number of control bytes that are present both at the beginning
943
// of the control byte array and at the end, such that we can create a
944
// `Group::kWidth`-width probe window starting from any control byte.
945
122
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
946
947
// Given the capacity of a table, computes the offset (from the start of the
948
// backing allocation) of the generation counter (if it exists).
949
30
inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
950
30
  assert(IsValidCapacity(capacity));
951
0
  const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
952
30
  return ControlOffset(has_infoz) + num_control_bytes;
953
30
}
954
955
// Given the capacity of a table, computes the offset (from the start of the
956
// backing allocation) at which the slots begin.
957
22
inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
958
22
  assert(IsValidCapacity(capacity));
959
0
  return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
960
22
          slot_align - 1) &
961
22
         (~slot_align + 1);
962
22
}
963
964
// Given the capacity of a table, computes the total size of the backing
965
// array.
966
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
967
14
                        bool has_infoz) {
968
14
  return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
969
14
}
970
971
// CommonFields hold the fields in raw_hash_set that do not depend
972
// on template parameters. This allows us to conveniently pass all
973
// of this state to helper functions as a single argument.
974
class CommonFields : public CommonFieldsGenerationInfo {
975
 public:
976
2
  CommonFields() = default;
977
978
  // Not copyable
979
  CommonFields(const CommonFields&) = delete;
980
  CommonFields& operator=(const CommonFields&) = delete;
981
982
  // Movable
983
  CommonFields(CommonFields&& that)
984
      : CommonFieldsGenerationInfo(
985
            std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
986
        // Explicitly copying fields into "this" and then resetting "that"
987
        // fields generates less code then calling absl::exchange per field.
988
        control_(that.control()),
989
        slots_(that.slot_array()),
990
        capacity_(that.capacity()),
991
2
        size_(that.size_) {
992
2
    that.set_control(EmptyGroup());
993
2
    that.set_slots(nullptr);
994
2
    that.set_capacity(0);
995
2
    that.size_ = 0;
996
2
  }
997
  CommonFields& operator=(CommonFields&&) = default;
998
999
476
  ctrl_t* control() const { return control_; }
1000
10
  void set_control(ctrl_t* c) { control_ = c; }
1001
0
  void* backing_array_start() const {
1002
    // growth_left (and maybe infoz) is stored before control bytes.
1003
0
    assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
1004
0
    return control() - ControlOffset(has_infoz());
1005
0
  }
1006
1007
  // Note: we can't use slots() because Qt defines "slots" as a macro.
1008
196
  void* slot_array() const { return slots_; }
1009
10
  void set_slots(void* s) { slots_ = s; }
1010
1011
  // The number of filled slots.
1012
36
  size_t size() const { return size_ >> HasInfozShift(); }
1013
0
  void set_size(size_t s) {
1014
0
    size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
1015
0
  }
1016
20
  void increment_size() {
1017
20
    assert(size() < capacity());
1018
0
    size_ += size_t{1} << HasInfozShift();
1019
20
  }
1020
0
  void decrement_size() {
1021
0
    assert(size() > 0);
1022
0
    size_ -= size_t{1} << HasInfozShift();
1023
0
  }
1024
1025
  // The total number of available slots.
1026
272
  size_t capacity() const { return capacity_; }
1027
10
  void set_capacity(size_t c) {
1028
10
    assert(c == 0 || IsValidCapacity(c));
1029
0
    capacity_ = c;
1030
10
  }
1031
1032
  // The number of slots we can still fill without needing to rehash.
1033
  // This is stored in the heap allocation before the control bytes.
1034
40
  size_t growth_left() const {
1035
40
    const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1036
40
    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1037
0
    return *gl_ptr;
1038
40
  }
1039
28
  void set_growth_left(size_t gl) {
1040
28
    size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1041
28
    assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1042
0
    *gl_ptr = gl;
1043
28
  }
1044
1045
42
  bool has_infoz() const {
1046
42
    return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
1047
42
  }
1048
8
  void set_has_infoz(bool has_infoz) {
1049
8
    size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
1050
8
  }
1051
1052
34
  HashtablezInfoHandle infoz() {
1053
34
    return has_infoz()
1054
34
               ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
1055
34
               : HashtablezInfoHandle();
1056
34
  }
1057
0
  void set_infoz(HashtablezInfoHandle infoz) {
1058
0
    assert(has_infoz());
1059
0
    *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
1060
0
  }
1061
1062
20
  bool should_rehash_for_bug_detection_on_insert() const {
1063
20
    return CommonFieldsGenerationInfo::
1064
20
        should_rehash_for_bug_detection_on_insert(control(), capacity());
1065
20
  }
1066
0
  void reset_reserved_growth(size_t reservation) {
1067
0
    CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1068
0
  }
1069
1070
  // The size of the backing array allocation.
1071
0
  size_t alloc_size(size_t slot_size, size_t slot_align) const {
1072
0
    return AllocSize(capacity(), slot_size, slot_align, has_infoz());
1073
0
  }
1074
1075
  // Returns the number of control bytes set to kDeleted. For testing only.
1076
0
  size_t TombstonesCount() const {
1077
0
    return static_cast<size_t>(
1078
0
        std::count(control(), control() + capacity(), ctrl_t::kDeleted));
1079
0
  }
1080
1081
 private:
1082
  // We store the has_infoz bit in the lowest bit of size_.
1083
106
  static constexpr size_t HasInfozShift() { return 1; }
1084
42
  static constexpr size_t HasInfozMask() {
1085
42
    return (size_t{1} << HasInfozShift()) - 1;
1086
42
  }
1087
1088
  // TODO(b/182800944): Investigate removing some of these fields:
1089
  // - control/slots can be derived from each other
1090
1091
  // The control bytes (and, also, a pointer near to the base of the backing
1092
  // array).
1093
  //
1094
  // This contains `capacity + 1 + NumClonedBytes()` entries, even
1095
  // when the table is empty (hence EmptyGroup).
1096
  //
1097
  // Note that growth_left is stored immediately before this pointer.
1098
  ctrl_t* control_ = EmptyGroup();
1099
1100
  // The beginning of the slots, located at `SlotOffset()` bytes after
1101
  // `control`. May be null for empty tables.
1102
  void* slots_ = nullptr;
1103
1104
  // The number of slots in the backing array. This is always 2^N-1 for an
1105
  // integer N. NOTE: we tried experimenting with compressing the capacity and
1106
  // storing it together with size_: (a) using 6 bits to store the corresponding
1107
  // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
1108
  // size_ and storing size in the low bits. Both of these experiments were
1109
  // regressions, presumably because we need capacity to do find operations.
1110
  size_t capacity_ = 0;
1111
1112
  // The size and also has one bit that stores whether we have infoz.
1113
  size_t size_ = 0;
1114
};
1115
1116
template <class Policy, class Hash, class Eq, class Alloc>
1117
class raw_hash_set;
1118
1119
// Returns the next valid capacity after `n`.
1120
8
inline size_t NextCapacity(size_t n) {
1121
8
  assert(IsValidCapacity(n) || n == 0);
1122
0
  return n * 2 + 1;
1123
8
}
1124
1125
// Applies the following mapping to every byte in the control array:
1126
//   * kDeleted -> kEmpty
1127
//   * kEmpty -> kEmpty
1128
//   * _ -> kDeleted
1129
// PRECONDITION:
1130
//   IsValidCapacity(capacity)
1131
//   ctrl[capacity] == ctrl_t::kSentinel
1132
//   ctrl[i] != ctrl_t::kSentinel for all i < capacity
1133
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
1134
1135
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
1136
0
inline size_t NormalizeCapacity(size_t n) {
1137
0
  return n ? ~size_t{} >> countl_zero(n) : 1;
1138
0
}
1139
1140
// General notes on capacity/growth methods below:
1141
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
1142
//   average of two empty slots per group.
1143
// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
1144
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
1145
//   never need to probe (the whole table fits in one group) so we don't need a
1146
//   load factor less than 1.
1147
1148
// Given `capacity`, applies the load factor; i.e., it returns the maximum
1149
// number of values we should put into the table before a resizing rehash.
1150
8
inline size_t CapacityToGrowth(size_t capacity) {
1151
8
  assert(IsValidCapacity(capacity));
1152
  // `capacity*7/8`
1153
8
  if (Group::kWidth == 8 && capacity == 7) {
1154
    // x-x/8 does not work when x==7.
1155
0
    return 6;
1156
0
  }
1157
8
  return capacity - capacity / 8;
1158
8
}
1159
1160
// Given `growth`, "unapplies" the load factor to find how large the capacity
1161
// should be to stay within the load factor.
1162
//
1163
// This might not be a valid capacity and `NormalizeCapacity()` should be
1164
// called on this.
1165
0
inline size_t GrowthToLowerboundCapacity(size_t growth) {
1166
0
  // `growth*8/7`
1167
0
  if (Group::kWidth == 8 && growth == 7) {
1168
0
    // x+(x-1)/7 does not work when x==7.
1169
0
    return 8;
1170
0
  }
1171
0
  return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
1172
0
}
1173
1174
template <class InputIter>
1175
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1176
                                     size_t bucket_count) {
1177
  if (bucket_count != 0) {
1178
    return bucket_count;
1179
  }
1180
  using InputIterCategory =
1181
      typename std::iterator_traits<InputIter>::iterator_category;
1182
  if (std::is_base_of<std::random_access_iterator_tag,
1183
                      InputIterCategory>::value) {
1184
    return GrowthToLowerboundCapacity(
1185
        static_cast<size_t>(std::distance(first, last)));
1186
  }
1187
  return 0;
1188
}
1189
1190
0
constexpr bool SwisstableDebugEnabled() {
1191
0
#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
1192
0
    ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
1193
0
  return true;
1194
0
#else
1195
0
  return false;
1196
0
#endif
1197
0
}
1198
1199
inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1200
                         const GenerationType* generation_ptr,
1201
20
                         const char* operation) {
1202
20
  if (!SwisstableDebugEnabled()) return;
1203
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1204
  // enabled. To minimize their impact in those builds:
1205
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1206
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1207
  //   the chances that the hot paths will be inlined.
1208
20
  if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
1209
0
    ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
1210
0
  }
1211
20
  if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
1212
0
    ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
1213
0
                 operation);
1214
0
  }
1215
20
  if (SwisstableGenerationsEnabled()) {
1216
0
    if (generation != *generation_ptr) {
1217
0
      ABSL_INTERNAL_LOG(FATAL,
1218
0
                        std::string(operation) +
1219
0
                            " called on invalid iterator. The table could have "
1220
0
                            "rehashed since this iterator was initialized.");
1221
0
    }
1222
0
    if (!IsFull(*ctrl)) {
1223
0
      ABSL_INTERNAL_LOG(
1224
0
          FATAL,
1225
0
          std::string(operation) +
1226
0
              " called on invalid iterator. The element was likely erased.");
1227
0
    }
1228
20
  } else {
1229
20
    if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1230
0
      ABSL_RAW_LOG(
1231
0
          FATAL,
1232
0
          "%s called on invalid iterator. The element might have been erased "
1233
0
          "or the table might have rehashed. Consider running with "
1234
0
          "--config=asan to diagnose rehashing issues.",
1235
0
          operation);
1236
0
    }
1237
20
  }
1238
20
}
1239
1240
// Note that for comparisons, null/end iterators are valid.
1241
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1242
                                       GenerationType generation,
1243
40
                                       const GenerationType* generation_ptr) {
1244
40
  if (!SwisstableDebugEnabled()) return;
1245
40
  const bool ctrl_is_valid_for_comparison =
1246
40
      ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
1247
40
  if (SwisstableGenerationsEnabled()) {
1248
0
    if (generation != *generation_ptr) {
1249
0
      ABSL_INTERNAL_LOG(FATAL,
1250
0
                        "Invalid iterator comparison. The table could have "
1251
0
                        "rehashed since this iterator was initialized.");
1252
0
    }
1253
0
    if (!ctrl_is_valid_for_comparison) {
1254
0
      ABSL_INTERNAL_LOG(
1255
0
          FATAL, "Invalid iterator comparison. The element was likely erased.");
1256
0
    }
1257
40
  } else {
1258
40
    ABSL_HARDENING_ASSERT(
1259
40
        ctrl_is_valid_for_comparison &&
1260
40
        "Invalid iterator comparison. The element might have been erased or "
1261
40
        "the table might have rehashed. Consider running with --config=asan to "
1262
40
        "diagnose rehashing issues.");
1263
40
  }
1264
40
}
1265
1266
// If the two iterators come from the same container, then their pointers will
1267
// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1268
// Note: we take slots by reference so that it's not UB if they're uninitialized
1269
// as long as we don't read them (when ctrl is null).
1270
inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1271
                                      const ctrl_t* ctrl_b,
1272
                                      const void* const& slot_a,
1273
20
                                      const void* const& slot_b) {
1274
  // If either control byte is null, then we can't tell.
1275
20
  if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1276
20
  const void* low_slot = slot_a;
1277
20
  const void* hi_slot = slot_b;
1278
20
  if (ctrl_a > ctrl_b) {
1279
0
    std::swap(ctrl_a, ctrl_b);
1280
0
    std::swap(low_slot, hi_slot);
1281
0
  }
1282
20
  return ctrl_b < low_slot && low_slot <= hi_slot;
1283
20
}
1284
1285
// Asserts that two iterators come from the same container.
1286
// Note: we take slots by reference so that it's not UB if they're uninitialized
1287
// as long as we don't read them (when ctrl is null).
1288
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1289
                                const void* const& slot_a,
1290
                                const void* const& slot_b,
1291
                                const GenerationType* generation_ptr_a,
1292
20
                                const GenerationType* generation_ptr_b) {
1293
20
  if (!SwisstableDebugEnabled()) return;
1294
  // `SwisstableDebugEnabled()` is also true for release builds with hardening
1295
  // enabled. To minimize their impact in those builds:
1296
  // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1297
  // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1298
  //   the chances that the hot paths will be inlined.
1299
20
  const bool a_is_default = ctrl_a == EmptyGroup();
1300
20
  const bool b_is_default = ctrl_b == EmptyGroup();
1301
20
  if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
1302
0
    ABSL_RAW_LOG(
1303
0
        FATAL,
1304
0
        "Invalid iterator comparison. Comparing default-constructed iterator "
1305
0
        "with non-default-constructed iterator.");
1306
0
  }
1307
20
  if (a_is_default && b_is_default) return;
1308
1309
20
  if (SwisstableGenerationsEnabled()) {
1310
0
    if (generation_ptr_a == generation_ptr_b) return;
1311
0
    const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1312
0
    const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1313
0
    if (a_is_empty != b_is_empty) {
1314
0
      ABSL_INTERNAL_LOG(FATAL,
1315
0
                        "Invalid iterator comparison. Comparing iterator from "
1316
0
                        "a non-empty hashtable with an iterator from an empty "
1317
0
                        "hashtable.");
1318
0
    }
1319
0
    if (a_is_empty && b_is_empty) {
1320
0
      ABSL_INTERNAL_LOG(FATAL,
1321
0
                        "Invalid iterator comparison. Comparing iterators from "
1322
0
                        "different empty hashtables.");
1323
0
    }
1324
0
    const bool a_is_end = ctrl_a == nullptr;
1325
0
    const bool b_is_end = ctrl_b == nullptr;
1326
0
    if (a_is_end || b_is_end) {
1327
0
      ABSL_INTERNAL_LOG(FATAL,
1328
0
                        "Invalid iterator comparison. Comparing iterator with "
1329
0
                        "an end() iterator from a different hashtable.");
1330
0
    }
1331
0
    ABSL_INTERNAL_LOG(FATAL,
1332
0
                      "Invalid iterator comparison. Comparing non-end() "
1333
0
                      "iterators from different hashtables.");
1334
20
  } else {
1335
20
    ABSL_HARDENING_ASSERT(
1336
20
        AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1337
20
        "Invalid iterator comparison. The iterators may be from different "
1338
20
        "containers or the container might have rehashed. Consider running "
1339
20
        "with --config=asan to diagnose rehashing issues.");
1340
20
  }
1341
20
}
1342
1343
struct FindInfo {
1344
  size_t offset;
1345
  size_t probe_length;
1346
};
1347
1348
// Whether a table is "small". A small table fits entirely into a probing
1349
// group, i.e., has a capacity < `Group::kWidth`.
1350
//
1351
// In small mode we are able to use the whole capacity. The extra control
1352
// bytes give us at least one "empty" control byte to stop the iteration.
1353
// This is important to make 1 a valid capacity.
1354
//
1355
// In small mode only the first `capacity` control bytes after the sentinel
1356
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
1357
// represent a real slot. This is important to take into account on
1358
// `find_first_non_full()`, where we never try
1359
// `ShouldInsertBackwards()` for small tables.
1360
50
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1361
1362
// Begins a probing operation on `common.control`, using `hash`.
1363
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
1364
90
                                      size_t hash) {
1365
90
  return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
1366
90
}
1367
90
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1368
90
  return probe(common.control(), common.capacity(), hash);
1369
90
}
1370
1371
// Probes an array of control bits using a probe sequence derived from `hash`,
1372
// and returns the offset corresponding to the first deleted or empty slot.
1373
//
1374
// Behavior when the entire table is full is undefined.
1375
//
1376
// NOTE: this function must work with tables having both empty and deleted
1377
// slots in the same group. Such tables appear during `erase()`.
1378
template <typename = void>
1379
50
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1380
50
  auto seq = probe(common, hash);
1381
50
  const ctrl_t* ctrl = common.control();
1382
50
  while (true) {
1383
50
    GroupEmptyOrDeleted g{ctrl + seq.offset()};
1384
50
    auto mask = g.MaskEmptyOrDeleted();
1385
50
    if (mask) {
1386
50
#if !defined(NDEBUG)
1387
      // We want to add entropy even when ASLR is not enabled.
1388
      // In debug build we will randomly insert in either the front or back of
1389
      // the group.
1390
      // TODO(kfm,sbenza): revisit after we do unconditional mixing
1391
50
      if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
1392
10
        return {seq.offset(mask.HighestBitSet()), seq.index()};
1393
10
      }
1394
40
#endif
1395
40
      return {seq.offset(mask.LowestBitSet()), seq.index()};
1396
50
    }
1397
0
    seq.next();
1398
0
    assert(seq.index() <= common.capacity() && "full table!");
1399
0
  }
1400
50
}
1401
1402
// Extern template for inline function keep possibility of inlining.
1403
// When compiler decided to not inline, no symbols will be added to the
1404
// corresponding translation unit.
1405
extern template FindInfo find_first_non_full(const CommonFields&, size_t);
1406
1407
// Non-inlined version of find_first_non_full for use in less
1408
// performance critical routines.
1409
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
1410
1411
8
inline void ResetGrowthLeft(CommonFields& common) {
1412
8
  common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
1413
8
}
1414
1415
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
1416
// array as marked as empty.
1417
8
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1418
8
  const size_t capacity = common.capacity();
1419
8
  ctrl_t* ctrl = common.control();
1420
8
  std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
1421
8
              capacity + 1 + NumClonedBytes());
1422
8
  ctrl[capacity] = ctrl_t::kSentinel;
1423
8
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
1424
8
  ResetGrowthLeft(common);
1425
8
}
1426
1427
// Sets `ctrl[i]` to `h`.
1428
//
1429
// Unlike setting it directly, this function will perform bounds checks and
1430
// mirror the value to the cloned tail if necessary.
1431
inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
1432
42
                    size_t slot_size) {
1433
42
  const size_t capacity = common.capacity();
1434
42
  assert(i < capacity);
1435
1436
0
  auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
1437
42
  if (IsFull(h)) {
1438
42
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1439
42
  } else {
1440
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
1441
0
  }
1442
1443
42
  ctrl_t* ctrl = common.control();
1444
42
  ctrl[i] = h;
1445
42
  ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
1446
42
}
1447
1448
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1449
inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
1450
42
                    size_t slot_size) {
1451
42
  SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
1452
42
}
1453
1454
// growth_left (which is a size_t) is stored with the backing array.
1455
0
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1456
0
  return (std::max)(align_of_slot, alignof(size_t));
1457
0
}
1458
1459
template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
1460
8
ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
1461
8
  assert(c.capacity());
1462
  // Folks with custom allocators often make unwarranted assumptions about the
1463
  // behavior of their classes vis-a-vis trivial destructability and what
1464
  // calls they will or won't make.  Avoid sampling for people with custom
1465
  // allocators to get us out of this mess.  This is not a hard guarantee but
1466
  // a workaround while we plan the exact guarantee we want to provide.
1467
0
  const size_t sample_size =
1468
8
      (std::is_same<Alloc, std::allocator<char>>::value &&
1469
8
       c.slot_array() == nullptr)
1470
8
          ? SizeOfSlot
1471
8
          : 0;
1472
8
  HashtablezInfoHandle infoz =
1473
8
      sample_size > 0 ? Sample(sample_size) : c.infoz();
1474
1475
8
  const bool has_infoz = infoz.IsSampled();
1476
8
  const size_t cap = c.capacity();
1477
8
  const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
1478
8
  char* mem = static_cast<char*>(
1479
8
      Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
1480
8
  const GenerationType old_generation = c.generation();
1481
8
  c.set_generation_ptr(reinterpret_cast<GenerationType*>(
1482
8
      mem + GenerationOffset(cap, has_infoz)));
1483
8
  c.set_generation(NextGeneration(old_generation));
1484
8
  c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
1485
8
  c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
1486
8
  ResetCtrl(c, SizeOfSlot);
1487
8
  c.set_has_infoz(has_infoz);
1488
8
  if (has_infoz) {
1489
0
    infoz.RecordStorageChanged(c.size(), cap);
1490
0
    c.set_infoz(infoz);
1491
0
  }
1492
8
}
1493
1494
// PolicyFunctions bundles together some information for a particular
1495
// raw_hash_set<T, ...> instantiation. This information is passed to
1496
// type-erased functions that want to do small amounts of type-specific
1497
// work.
1498
struct PolicyFunctions {
1499
  size_t slot_size;
1500
1501
  // Returns the hash of the pointed-to slot.
1502
  size_t (*hash_slot)(void* set, void* slot);
1503
1504
  // Transfer the contents of src_slot to dst_slot.
1505
  void (*transfer)(void* set, void* dst_slot, void* src_slot);
1506
1507
  // Deallocate the backing store from common.
1508
  void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
1509
};
1510
1511
// ClearBackingArray clears the backing array, either modifying it in place,
1512
// or creating a new one based on the value of "reuse".
1513
// REQUIRES: c.capacity > 0
1514
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1515
                       bool reuse);
1516
1517
// Type-erased version of raw_hash_set::erase_meta_only.
1518
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
1519
1520
// Function to place in PolicyFunctions::dealloc for raw_hash_sets
1521
// that are using std::allocator. This allows us to share the same
1522
// function body for raw_hash_set instantiations that have the
1523
// same slot alignment.
1524
template <size_t AlignOfSlot>
1525
ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
1526
0
                                                const PolicyFunctions& policy) {
1527
  // Unpoison before returning the memory to the allocator.
1528
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(),
1529
0
                                policy.slot_size * common.capacity());
1530
1531
0
  std::allocator<char> alloc;
1532
0
  common.infoz().Unregister();
1533
0
  Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1534
0
      &alloc, common.backing_array_start(),
1535
0
      common.alloc_size(policy.slot_size, AlignOfSlot));
1536
0
}
1537
1538
// For trivially relocatable types we use memcpy directly. This allows us to
1539
// share the same function body for raw_hash_set instantiations that have the
1540
// same slot size as long as they are relocatable.
1541
template <size_t SizeOfSlot>
1542
0
ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
1543
0
  memcpy(dst, src, SizeOfSlot);
1544
0
}
1545
1546
// Type-erased version of raw_hash_set::drop_deletes_without_resize.
1547
void DropDeletesWithoutResize(CommonFields& common,
1548
                              const PolicyFunctions& policy, void* tmp_space);
1549
1550
// A SwissTable.
1551
//
1552
// Policy: a policy defines how to perform different operations on
1553
// the slots of the hashtable (see hash_policy_traits.h for the full interface
1554
// of policy).
1555
//
1556
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
1557
// functor should accept a key and return size_t as hash. For best performance
1558
// it is important that the hash function provides high entropy across all bits
1559
// of the hash.
1560
//
1561
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
1562
// should accept two (of possibly different type) keys and return a bool: true
1563
// if they are equal, false if they are not. If two keys compare equal, then
1564
// their hash values as defined by Hash MUST be equal.
1565
//
1566
// Allocator: an Allocator
1567
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
1568
// the storage of the hashtable will be allocated and the elements will be
1569
// constructed and destroyed.
1570
template <class Policy, class Hash, class Eq, class Alloc>
1571
class raw_hash_set {
1572
  using PolicyTraits = hash_policy_traits<Policy>;
1573
  using KeyArgImpl =
1574
      KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
1575
1576
 public:
1577
  using init_type = typename PolicyTraits::init_type;
1578
  using key_type = typename PolicyTraits::key_type;
1579
  // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
1580
  // code fixes!
1581
  using slot_type = typename PolicyTraits::slot_type;
1582
  using allocator_type = Alloc;
1583
  using size_type = size_t;
1584
  using difference_type = ptrdiff_t;
1585
  using hasher = Hash;
1586
  using key_equal = Eq;
1587
  using policy_type = Policy;
1588
  using value_type = typename PolicyTraits::value_type;
1589
  using reference = value_type&;
1590
  using const_reference = const value_type&;
1591
  using pointer = typename absl::allocator_traits<
1592
      allocator_type>::template rebind_traits<value_type>::pointer;
1593
  using const_pointer = typename absl::allocator_traits<
1594
      allocator_type>::template rebind_traits<value_type>::const_pointer;
1595
1596
  // Alias used for heterogeneous lookup functions.
1597
  // `key_arg<K>` evaluates to `K` when the functors are transparent and to
1598
  // `key_type` otherwise. It permits template argument deduction on `K` for the
1599
  // transparent case.
1600
  template <class K>
1601
  using key_arg = typename KeyArgImpl::template type<K, key_type>;
1602
1603
 private:
1604
  // Give an early error when key_type is not hashable/eq.
1605
  auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
1606
  auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
1607
1608
  using AllocTraits = absl::allocator_traits<allocator_type>;
1609
  using SlotAlloc = typename absl::allocator_traits<
1610
      allocator_type>::template rebind_alloc<slot_type>;
1611
  using SlotAllocTraits = typename absl::allocator_traits<
1612
      allocator_type>::template rebind_traits<slot_type>;
1613
1614
  static_assert(std::is_lvalue_reference<reference>::value,
1615
                "Policy::element() must return a reference");
1616
1617
  template <typename T>
1618
  struct SameAsElementReference
1619
      : std::is_same<typename std::remove_cv<
1620
                         typename std::remove_reference<reference>::type>::type,
1621
                     typename std::remove_cv<
1622
                         typename std::remove_reference<T>::type>::type> {};
1623
1624
  // An enabler for insert(T&&): T must be convertible to init_type or be the
1625
  // same as [cv] value_type [ref].
1626
  // Note: we separate SameAsElementReference into its own type to avoid using
1627
  // reference unless we need to. MSVC doesn't seem to like it in some
1628
  // cases.
1629
  template <class T>
1630
  using RequiresInsertable = typename std::enable_if<
1631
      absl::disjunction<std::is_convertible<T, init_type>,
1632
                        SameAsElementReference<T>>::value,
1633
      int>::type;
1634
1635
  // RequiresNotInit is a workaround for gcc prior to 7.1.
1636
  // See https://godbolt.org/g/Y4xsUh.
1637
  template <class T>
1638
  using RequiresNotInit =
1639
      typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1640
1641
  template <class... Ts>
1642
  using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1643
1644
 public:
1645
  static_assert(std::is_same<pointer, value_type*>::value,
1646
                "Allocators with custom pointer types are not supported");
1647
  static_assert(std::is_same<const_pointer, const value_type*>::value,
1648
                "Allocators with custom pointer types are not supported");
1649
1650
  class iterator : private HashSetIteratorGenerationInfo {
1651
    friend class raw_hash_set;
1652
1653
   public:
1654
    using iterator_category = std::forward_iterator_tag;
1655
    using value_type = typename raw_hash_set::value_type;
1656
    using reference =
1657
        absl::conditional_t<PolicyTraits::constant_iterators::value,
1658
                            const value_type&, value_type&>;
1659
    using pointer = absl::remove_reference_t<reference>*;
1660
    using difference_type = typename raw_hash_set::difference_type;
1661
1662
    iterator() {}
1663
1664
    // PRECONDITION: not an end() iterator.
1665
20
    reference operator*() const {
1666
20
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
1667
20
      return PolicyTraits::element(slot_);
1668
20
    }
1669
1670
    // PRECONDITION: not an end() iterator.
1671
0
    pointer operator->() const {
1672
0
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
1673
0
      return &operator*();
1674
0
    }
1675
1676
    // PRECONDITION: not an end() iterator.
1677
0
    iterator& operator++() {
1678
0
      AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
1679
0
      ++ctrl_;
1680
0
      ++slot_;
1681
0
      skip_empty_or_deleted();
1682
0
      return *this;
1683
0
    }
1684
    // PRECONDITION: not an end() iterator.
1685
    iterator operator++(int) {
1686
      auto tmp = *this;
1687
      ++*this;
1688
      return tmp;
1689
    }
1690
1691
20
    friend bool operator==(const iterator& a, const iterator& b) {
1692
20
      AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
1693
20
      AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
1694
20
      AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
1695
20
                          a.generation_ptr(), b.generation_ptr());
1696
20
      return a.ctrl_ == b.ctrl_;
1697
20
    }
1698
0
    friend bool operator!=(const iterator& a, const iterator& b) {
1699
0
      return !(a == b);
1700
0
    }
1701
1702
   private:
1703
    iterator(ctrl_t* ctrl, slot_type* slot,
1704
             const GenerationType* generation_ptr)
1705
        : HashSetIteratorGenerationInfo(generation_ptr),
1706
          ctrl_(ctrl),
1707
80
          slot_(slot) {
1708
      // This assumption helps the compiler know that any non-end iterator is
1709
      // not equal to any end iterator.
1710
80
      ABSL_ASSUME(ctrl != nullptr);
1711
80
    }
1712
    // For end() iterators.
1713
    explicit iterator(const GenerationType* generation_ptr)
1714
0
        : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
1715
1716
    // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
1717
    // they reach one.
1718
    //
1719
    // If a sentinel is reached, we null `ctrl_` out instead.
1720
0
    void skip_empty_or_deleted() {
1721
0
      while (IsEmptyOrDeleted(*ctrl_)) {
1722
0
        uint32_t shift =
1723
0
            GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
1724
0
        ctrl_ += shift;
1725
0
        slot_ += shift;
1726
0
      }
1727
0
      if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
1728
0
    }
1729
1730
    // We use EmptyGroup() for default-constructed iterators so that they can
1731
    // be distinguished from end iterators, which have nullptr ctrl_.
1732
    ctrl_t* ctrl_ = EmptyGroup();
1733
    // To avoid uninitialized member warnings, put slot_ in an anonymous union.
1734
    // The member is not initialized on singleton and end iterators.
1735
    union {
1736
      slot_type* slot_;
1737
    };
1738
  };
1739
1740
  class const_iterator {
1741
    friend class raw_hash_set;
1742
1743
   public:
1744
    using iterator_category = typename iterator::iterator_category;
1745
    using value_type = typename raw_hash_set::value_type;
1746
    using reference = typename raw_hash_set::const_reference;
1747
    using pointer = typename raw_hash_set::const_pointer;
1748
    using difference_type = typename raw_hash_set::difference_type;
1749
1750
    const_iterator() = default;
1751
    // Implicit construction from iterator.
1752
40
    const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
1753
1754
    reference operator*() const { return *inner_; }
1755
    pointer operator->() const { return inner_.operator->(); }
1756
1757
    const_iterator& operator++() {
1758
      ++inner_;
1759
      return *this;
1760
    }
1761
    const_iterator operator++(int) { return inner_++; }
1762
1763
20
    friend bool operator==(const const_iterator& a, const const_iterator& b) {
1764
20
      return a.inner_ == b.inner_;
1765
20
    }
1766
    friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1767
      return !(a == b);
1768
    }
1769
1770
   private:
1771
    const_iterator(const ctrl_t* ctrl, const slot_type* slot,
1772
                   const GenerationType* gen)
1773
        : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
1774
    }
1775
1776
    iterator inner_;
1777
  };
1778
1779
  using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
1780
  using insert_return_type = InsertReturnType<iterator, node_type>;
1781
1782
  // Note: can't use `= default` due to non-default noexcept (causes
1783
  // problems for some compilers). NOLINTNEXTLINE
1784
  raw_hash_set() noexcept(
1785
      std::is_nothrow_default_constructible<hasher>::value &&
1786
      std::is_nothrow_default_constructible<key_equal>::value &&
1787
2
      std::is_nothrow_default_constructible<allocator_type>::value) {}
1788
1789
  ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
1790
      size_t bucket_count, const hasher& hash = hasher(),
1791
      const key_equal& eq = key_equal(),
1792
      const allocator_type& alloc = allocator_type())
1793
      : settings_(CommonFields{}, hash, eq, alloc) {
1794
    if (bucket_count) {
1795
      common().set_capacity(NormalizeCapacity(bucket_count));
1796
      initialize_slots();
1797
    }
1798
  }
1799
1800
  raw_hash_set(size_t bucket_count, const hasher& hash,
1801
               const allocator_type& alloc)
1802
      : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
1803
1804
  raw_hash_set(size_t bucket_count, const allocator_type& alloc)
1805
      : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
1806
1807
  explicit raw_hash_set(const allocator_type& alloc)
1808
      : raw_hash_set(0, hasher(), key_equal(), alloc) {}
1809
1810
  template <class InputIter>
1811
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
1812
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1813
               const allocator_type& alloc = allocator_type())
1814
      : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
1815
                     hash, eq, alloc) {
1816
    insert(first, last);
1817
  }
1818
1819
  template <class InputIter>
1820
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1821
               const hasher& hash, const allocator_type& alloc)
1822
      : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
1823
1824
  template <class InputIter>
1825
  raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1826
               const allocator_type& alloc)
1827
      : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
1828
1829
  template <class InputIter>
1830
  raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
1831
      : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
1832
1833
  // Instead of accepting std::initializer_list<value_type> as the first
1834
  // argument like std::unordered_set<value_type> does, we have two overloads
1835
  // that accept std::initializer_list<T> and std::initializer_list<init_type>.
1836
  // This is advantageous for performance.
1837
  //
1838
  //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
1839
  //   // copies the strings into the set.
1840
  //   std::unordered_set<std::string> s = {"abc", "def"};
1841
  //
1842
  //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
1843
  //   // copies the strings into the set.
1844
  //   absl::flat_hash_set<std::string> s = {"abc", "def"};
1845
  //
1846
  // The same trick is used in insert().
1847
  //
1848
  // The enabler is necessary to prevent this constructor from triggering where
1849
  // the copy constructor is meant to be called.
1850
  //
1851
  //   absl::flat_hash_set<int> a, b{a};
1852
  //
1853
  // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
1854
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1855
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
1856
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1857
               const allocator_type& alloc = allocator_type())
1858
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1859
1860
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
1861
               const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1862
               const allocator_type& alloc = allocator_type())
1863
      : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1864
1865
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1866
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1867
               const hasher& hash, const allocator_type& alloc)
1868
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1869
1870
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1871
               const hasher& hash, const allocator_type& alloc)
1872
      : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1873
1874
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1875
  raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1876
               const allocator_type& alloc)
1877
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1878
1879
  raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1880
               const allocator_type& alloc)
1881
      : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1882
1883
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1884
  raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
1885
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1886
1887
  raw_hash_set(std::initializer_list<init_type> init,
1888
               const allocator_type& alloc)
1889
      : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1890
1891
  raw_hash_set(const raw_hash_set& that)
1892
      : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
1893
                               that.alloc_ref())) {}
1894
1895
  raw_hash_set(const raw_hash_set& that, const allocator_type& a)
1896
      : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
1897
    const size_t size = that.size();
1898
    if (size == 0) return;
1899
    reserve(size);
1900
    // Because the table is guaranteed to be empty, we can do something faster
1901
    // than a full `insert`.
1902
    for (const auto& v : that) {
1903
      const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
1904
      auto target = find_first_non_full_outofline(common(), hash);
1905
      SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
1906
      emplace_at(target.offset, v);
1907
      common().maybe_increment_generation_on_insert();
1908
      infoz().RecordInsert(hash, target.probe_length);
1909
    }
1910
    common().set_size(size);
1911
    set_growth_left(growth_left() - size);
1912
  }
1913
1914
  ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
1915
      std::is_nothrow_copy_constructible<hasher>::value &&
1916
      std::is_nothrow_copy_constructible<key_equal>::value &&
1917
      std::is_nothrow_copy_constructible<allocator_type>::value)
1918
      :  // Hash, equality and allocator are copied instead of moved because
1919
         // `that` must be left valid. If Hash is std::function<Key>, moving it
1920
         // would create a nullptr functor that cannot be called.
1921
        settings_(absl::exchange(that.common(), CommonFields{}),
1922
                  that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
1923
1924
  raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1925
      : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
1926
    if (a == that.alloc_ref()) {
1927
      std::swap(common(), that.common());
1928
    } else {
1929
      reserve(that.size());
1930
      // Note: this will copy keys instead of moving them. This can be fixed if
1931
      // it ever becomes an issue.
1932
      for (auto& elem : that) insert(std::move(elem));
1933
    }
1934
  }
1935
1936
  raw_hash_set& operator=(const raw_hash_set& that) {
1937
    raw_hash_set tmp(that,
1938
                     AllocTraits::propagate_on_container_copy_assignment::value
1939
                         ? that.alloc_ref()
1940
                         : alloc_ref());
1941
    swap(tmp);
1942
    return *this;
1943
  }
1944
1945
  raw_hash_set& operator=(raw_hash_set&& that) noexcept(
1946
      absl::allocator_traits<allocator_type>::is_always_equal::value &&
1947
      std::is_nothrow_move_assignable<hasher>::value &&
1948
      std::is_nothrow_move_assignable<key_equal>::value) {
1949
    // TODO(sbenza): We should only use the operations from the noexcept clause
1950
    // to make sure we actually adhere to that contract.
1951
    // NOLINTNEXTLINE: not returning *this for performance.
1952
    return move_assign(
1953
        std::move(that),
1954
        typename AllocTraits::propagate_on_container_move_assignment());
1955
  }
1956
1957
0
  ~raw_hash_set() {
1958
0
    const size_t cap = capacity();
1959
0
    if (!cap) return;
1960
0
    destroy_slots();
1961
1962
    // Unpoison before returning the memory to the allocator.
1963
0
    SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
1964
0
    infoz().Unregister();
1965
0
    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
1966
0
        &alloc_ref(), common().backing_array_start(),
1967
0
        common().alloc_size(sizeof(slot_type), alignof(slot_type)));
1968
0
  }
1969
1970
0
  iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1971
0
    auto it = iterator_at(0);
1972
0
    it.skip_empty_or_deleted();
1973
0
    return it;
1974
0
  }
1975
0
  iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1976
0
    return iterator(common().generation_ptr());
1977
0
  }
1978
1979
  const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1980
    return const_cast<raw_hash_set*>(this)->begin();
1981
  }
1982
  const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1983
    return iterator(common().generation_ptr());
1984
  }
1985
  const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1986
    return begin();
1987
  }
1988
  const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
1989
1990
  bool empty() const { return !size(); }
1991
0
  size_t size() const { return common().size(); }
1992
8
  size_t capacity() const { return common().capacity(); }
1993
  size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
1994
1995
0
  ABSL_ATTRIBUTE_REINITIALIZES void clear() {
1996
    // Iterating over this container is O(bucket_count()). When bucket_count()
1997
    // is much greater than size(), iteration becomes prohibitively expensive.
1998
    // For clear() it is more important to reuse the allocated array when the
1999
    // container is small because allocation takes comparatively long time
2000
    // compared to destruction of the elements of the container. So we pick the
2001
    // largest bucket_count() threshold for which iteration is still fast and
2002
    // past that we simply deallocate the array.
2003
0
    const size_t cap = capacity();
2004
0
    if (cap == 0) {
2005
      // Already guaranteed to be empty; so nothing to do.
2006
0
    } else {
2007
0
      destroy_slots();
2008
0
      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
2009
0
    }
2010
0
    common().set_reserved_growth(0);
2011
0
    common().set_reservation_size(0);
2012
0
  }
2013
2014
  // This overload kicks in when the argument is an rvalue of insertable and
2015
  // decomposable type other than init_type.
2016
  //
2017
  //   flat_hash_map<std::string, int> m;
2018
  //   m.insert(std::make_pair("abc", 42));
2019
  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
2020
  // bug.
2021
  template <class T, RequiresInsertable<T> = 0, class T2 = T,
2022
            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
2023
            T* = nullptr>
2024
20
  std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2025
20
    return emplace(std::forward<T>(value));
2026
20
  }
2027
2028
  // This overload kicks in when the argument is a bitfield or an lvalue of
2029
  // insertable and decomposable type.
2030
  //
2031
  //   union { int n : 1; };
2032
  //   flat_hash_set<int> s;
2033
  //   s.insert(n);
2034
  //
2035
  //   flat_hash_set<std::string> s;
2036
  //   const char* p = "hello";
2037
  //   s.insert(p);
2038
  //
2039
  template <
2040
      class T, RequiresInsertable<const T&> = 0,
2041
      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2042
  std::pair<iterator, bool> insert(const T& value)
2043
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2044
    return emplace(value);
2045
  }
2046
2047
  // This overload kicks in when the argument is an rvalue of init_type. Its
2048
  // purpose is to handle brace-init-list arguments.
2049
  //
2050
  //   flat_hash_map<std::string, int> s;
2051
  //   s.insert({"abc", 42});
2052
  std::pair<iterator, bool> insert(init_type&& value)
2053
0
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2054
0
    return emplace(std::move(value));
2055
0
  }
2056
2057
  // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
2058
  // bug.
2059
  template <class T, RequiresInsertable<T> = 0, class T2 = T,
2060
            typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
2061
            T* = nullptr>
2062
  iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2063
    return insert(std::forward<T>(value)).first;
2064
  }
2065
2066
  template <
2067
      class T, RequiresInsertable<const T&> = 0,
2068
      typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2069
  iterator insert(const_iterator,
2070
                  const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2071
    return insert(value).first;
2072
  }
2073
2074
  iterator insert(const_iterator,
2075
                  init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2076
    return insert(std::move(value)).first;
2077
  }
2078
2079
  template <class InputIt>
2080
  void insert(InputIt first, InputIt last) {
2081
    for (; first != last; ++first) emplace(*first);
2082
  }
2083
2084
  template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
2085
  void insert(std::initializer_list<T> ilist) {
2086
    insert(ilist.begin(), ilist.end());
2087
  }
2088
2089
  void insert(std::initializer_list<init_type> ilist) {
2090
    insert(ilist.begin(), ilist.end());
2091
  }
2092
2093
  insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2094
    if (!node) return {end(), false, node_type()};
2095
    const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
2096
    auto res = PolicyTraits::apply(
2097
        InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
2098
        elem);
2099
    if (res.second) {
2100
      CommonAccess::Reset(&node);
2101
      return {res.first, true, node_type()};
2102
    } else {
2103
      return {res.first, false, std::move(node)};
2104
    }
2105
  }
2106
2107
  iterator insert(const_iterator,
2108
                  node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2109
    auto res = insert(std::move(node));
2110
    node = std::move(res.node);
2111
    return res.position;
2112
  }
2113
2114
  // This overload kicks in if we can deduce the key from args. This enables us
2115
  // to avoid constructing value_type if an entry with the same key already
2116
  // exists.
2117
  //
2118
  // For example:
2119
  //
2120
  //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
2121
  //   // Creates no std::string copies and makes no heap allocations.
2122
  //   m.emplace("abc", "xyz");
2123
  template <class... Args, typename std::enable_if<
2124
                               IsDecomposable<Args...>::value, int>::type = 0>
2125
  std::pair<iterator, bool> emplace(Args&&... args)
2126
20
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2127
20
    return PolicyTraits::apply(EmplaceDecomposable{*this},
2128
20
                               std::forward<Args>(args)...);
2129
20
  }
std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<absl::string_view const, absl::CommandLineFlag*>, 0>(std::__1::pair<absl::string_view const, absl::CommandLineFlag*>&&)
Line
Count
Source
2126
20
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2127
20
    return PolicyTraits::apply(EmplaceDecomposable{*this},
2128
20
                               std::forward<Args>(args)...);
2129
20
  }
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::emplace<std::__1::pair<absl::string_view, absl::CommandLineFlag*>, 0>(std::__1::pair<absl::string_view, absl::CommandLineFlag*>&&)
2130
2131
  // This overload kicks in if we cannot deduce the key from args. It constructs
2132
  // value_type unconditionally and then either moves it into the table or
2133
  // destroys.
2134
  template <class... Args, typename std::enable_if<
2135
                               !IsDecomposable<Args...>::value, int>::type = 0>
2136
  std::pair<iterator, bool> emplace(Args&&... args)
2137
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2138
    alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2139
    slot_type* slot = reinterpret_cast<slot_type*>(&raw);
2140
2141
    PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
2142
    const auto& elem = PolicyTraits::element(slot);
2143
    return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
2144
  }
2145
2146
  template <class... Args>
2147
  iterator emplace_hint(const_iterator,
2148
                        Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2149
    return emplace(std::forward<Args>(args)...).first;
2150
  }
2151
2152
  // Extension API: support for lazy emplace.
2153
  //
2154
  // Looks up key in the table. If found, returns the iterator to the element.
2155
  // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
2156
  // and returns an iterator to the new element.
2157
  //
2158
  // `f` must abide by several restrictions:
2159
  //  - it MUST call `raw_hash_set::constructor` with arguments as if a
2160
  //    `raw_hash_set::value_type` is constructed,
2161
  //  - it MUST NOT access the container before the call to
2162
  //    `raw_hash_set::constructor`, and
2163
  //  - it MUST NOT erase the lazily emplaced element.
2164
  // Doing any of these is undefined behavior.
2165
  //
2166
  // For example:
2167
  //
2168
  //   std::unordered_set<ArenaString> s;
2169
  //   // Makes ArenaStr even if "abc" is in the map.
2170
  //   s.insert(ArenaString(&arena, "abc"));
2171
  //
2172
  //   flat_hash_set<ArenaStr> s;
2173
  //   // Makes ArenaStr only if "abc" is not in the map.
2174
  //   s.lazy_emplace("abc", [&](const constructor& ctor) {
2175
  //     ctor(&arena, "abc");
2176
  //   });
2177
  //
2178
  // WARNING: This API is currently experimental. If there is a way to implement
2179
  // the same thing with the rest of the API, prefer that.
2180
  class constructor {
2181
    friend class raw_hash_set;
2182
2183
   public:
2184
    template <class... Args>
2185
    void operator()(Args&&... args) const {
2186
      assert(*slot_);
2187
      PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
2188
      *slot_ = nullptr;
2189
    }
2190
2191
   private:
2192
    constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
2193
2194
    allocator_type* alloc_;
2195
    slot_type** slot_;
2196
  };
2197
2198
  template <class K = key_type, class F>
2199
  iterator lazy_emplace(const key_arg<K>& key,
2200
                        F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2201
    auto res = find_or_prepare_insert(key);
2202
    if (res.second) {
2203
      slot_type* slot = slot_array() + res.first;
2204
      std::forward<F>(f)(constructor(&alloc_ref(), &slot));
2205
      assert(!slot);
2206
    }
2207
    return iterator_at(res.first);
2208
  }
2209
2210
  // Extension API: support for heterogeneous keys.
2211
  //
2212
  //   std::unordered_set<std::string> s;
2213
  //   // Turns "abc" into std::string.
2214
  //   s.erase("abc");
2215
  //
2216
  //   flat_hash_set<std::string> s;
2217
  //   // Uses "abc" directly without copying it into std::string.
2218
  //   s.erase("abc");
2219
  template <class K = key_type>
2220
  size_type erase(const key_arg<K>& key) {
2221
    auto it = find(key);
2222
    if (it == end()) return 0;
2223
    erase(it);
2224
    return 1;
2225
  }
2226
2227
  // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
2228
  // this method returns void to reduce algorithmic complexity to O(1).  The
2229
  // iterator is invalidated, so any increment should be done before calling
2230
  // erase.  In order to erase while iterating across a map, use the following
2231
  // idiom (which also works for standard containers):
2232
  //
2233
  // for (auto it = m.begin(), end = m.end(); it != end;) {
2234
  //   // `erase()` will invalidate `it`, so advance `it` first.
2235
  //   auto copy_it = it++;
2236
  //   if (<pred>) {
2237
  //     m.erase(copy_it);
2238
  //   }
2239
  // }
2240
  void erase(const_iterator cit) { erase(cit.inner_); }
2241
2242
  // This overload is necessary because otherwise erase<K>(const K&) would be
2243
  // a better match if non-const iterator is passed as an argument.
2244
  void erase(iterator it) {
2245
    AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
2246
    PolicyTraits::destroy(&alloc_ref(), it.slot_);
2247
    erase_meta_only(it);
2248
  }
2249
2250
  iterator erase(const_iterator first,
2251
                 const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2252
    // We check for empty first because ClearBackingArray requires that
2253
    // capacity() > 0 as a precondition.
2254
    if (empty()) return end();
2255
    if (first == begin() && last == end()) {
2256
      // TODO(ezb): we access control bytes in destroy_slots so it could make
2257
      // sense to combine destroy_slots and ClearBackingArray to avoid cache
2258
      // misses when the table is large. Note that we also do this in clear().
2259
      destroy_slots();
2260
      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
2261
      common().set_reserved_growth(common().reservation_size());
2262
      return end();
2263
    }
2264
    while (first != last) {
2265
      erase(first++);
2266
    }
2267
    return last.inner_;
2268
  }
2269
2270
  // Moves elements from `src` into `this`.
2271
  // If the element already exists in `this`, it is left unmodified in `src`.
2272
  template <typename H, typename E>
2273
  void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
2274
    assert(this != &src);
2275
    for (auto it = src.begin(), e = src.end(); it != e;) {
2276
      auto next = std::next(it);
2277
      if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
2278
                              PolicyTraits::element(it.slot_))
2279
              .second) {
2280
        src.erase_meta_only(it);
2281
      }
2282
      it = next;
2283
    }
2284
  }
2285
2286
  template <typename H, typename E>
2287
  void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
2288
    merge(src);
2289
  }
2290
2291
  node_type extract(const_iterator position) {
2292
    AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
2293
                 position.inner_.generation_ptr(), "extract()");
2294
    auto node =
2295
        CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
2296
    erase_meta_only(position);
2297
    return node;
2298
  }
2299
2300
  template <
2301
      class K = key_type,
2302
      typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
2303
  node_type extract(const key_arg<K>& key) {
2304
    auto it = find(key);
2305
    return it == end() ? node_type() : extract(const_iterator{it});
2306
  }
2307
2308
  void swap(raw_hash_set& that) noexcept(
2309
      IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
2310
      IsNoThrowSwappable<allocator_type>(
2311
          typename AllocTraits::propagate_on_container_swap{})) {
2312
    using std::swap;
2313
    swap(common(), that.common());
2314
    swap(hash_ref(), that.hash_ref());
2315
    swap(eq_ref(), that.eq_ref());
2316
    SwapAlloc(alloc_ref(), that.alloc_ref(),
2317
              typename AllocTraits::propagate_on_container_swap{});
2318
  }
2319
2320
  void rehash(size_t n) {
2321
    if (n == 0 && capacity() == 0) return;
2322
    if (n == 0 && size() == 0) {
2323
      ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
2324
      return;
2325
    }
2326
2327
    // bitor is a faster way of doing `max` here. We will round up to the next
2328
    // power-of-2-minus-1, so bitor is good enough.
2329
    auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
2330
    // n == 0 unconditionally rehashes as per the standard.
2331
    if (n == 0 || m > capacity()) {
2332
      resize(m);
2333
2334
      // This is after resize, to ensure that we have completed the allocation
2335
      // and have potentially sampled the hashtable.
2336
      infoz().RecordReservation(n);
2337
    }
2338
  }
2339
2340
  void reserve(size_t n) {
2341
    if (n > size() + growth_left()) {
2342
      size_t m = GrowthToLowerboundCapacity(n);
2343
      resize(NormalizeCapacity(m));
2344
2345
      // This is after resize, to ensure that we have completed the allocation
2346
      // and have potentially sampled the hashtable.
2347
      infoz().RecordReservation(n);
2348
    }
2349
    common().reset_reserved_growth(n);
2350
    common().set_reservation_size(n);
2351
  }
2352
2353
  // Extension API: support for heterogeneous keys.
2354
  //
2355
  //   std::unordered_set<std::string> s;
2356
  //   // Turns "abc" into std::string.
2357
  //   s.count("abc");
2358
  //
2359
  //   ch_set<std::string> s;
2360
  //   // Uses "abc" directly without copying it into std::string.
2361
  //   s.count("abc");
2362
  template <class K = key_type>
2363
  size_t count(const key_arg<K>& key) const {
2364
    return find(key) == end() ? 0 : 1;
2365
  }
2366
2367
  // Issues CPU prefetch instructions for the memory needed to find or insert
2368
  // a key.  Like all lookup functions, this support heterogeneous keys.
2369
  //
2370
  // NOTE: This is a very low level operation and should not be used without
2371
  // specific benchmarks indicating its importance.
2372
  template <class K = key_type>
2373
  void prefetch(const key_arg<K>& key) const {
2374
    (void)key;
2375
    // Avoid probing if we won't be able to prefetch the addresses received.
2376
#ifdef ABSL_HAVE_PREFETCH
2377
    prefetch_heap_block();
2378
    auto seq = probe(common(), hash_ref()(key));
2379
    PrefetchToLocalCache(control() + seq.offset());
2380
    PrefetchToLocalCache(slot_array() + seq.offset());
2381
#endif  // ABSL_HAVE_PREFETCH
2382
  }
2383
2384
  // The API of find() has two extensions.
2385
  //
2386
  // 1. The hash can be passed by the user. It must be equal to the hash of the
2387
  // key.
2388
  //
2389
  // 2. The type of the key argument doesn't have to be key_type. This is so
2390
  // called heterogeneous key support.
2391
  template <class K = key_type>
2392
  iterator find(const key_arg<K>& key,
2393
20
                size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2394
20
    auto seq = probe(common(), hash);
2395
20
    slot_type* slot_ptr = slot_array();
2396
20
    const ctrl_t* ctrl = control();
2397
20
    while (true) {
2398
20
      Group g{ctrl + seq.offset()};
2399
20
      for (uint32_t i : g.Match(H2(hash))) {
2400
20
        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2401
20
                EqualElement<K>{key, eq_ref()},
2402
20
                PolicyTraits::element(slot_ptr + seq.offset(i)))))
2403
20
          return iterator_at(seq.offset(i));
2404
20
      }
2405
0
      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
2406
0
      seq.next();
2407
0
      assert(seq.index() <= capacity() && "full table!");
2408
0
    }
2409
20
  }
2410
  template <class K = key_type>
2411
0
  iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2412
0
    prefetch_heap_block();
2413
0
    return find(key, hash_ref()(key));
2414
0
  }
2415
2416
  template <class K = key_type>
2417
  const_iterator find(const key_arg<K>& key,
2418
20
                      size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2419
20
    return const_cast<raw_hash_set*>(this)->find(key, hash);
2420
20
  }
2421
  template <class K = key_type>
2422
  const_iterator find(const key_arg<K>& key) const
2423
20
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2424
20
    prefetch_heap_block();
2425
20
    return find(key, hash_ref()(key));
2426
20
  }
2427
2428
  template <class K = key_type>
2429
  bool contains(const key_arg<K>& key) const {
2430
    return find(key) != end();
2431
  }
2432
2433
  template <class K = key_type>
2434
  std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
2435
      ABSL_ATTRIBUTE_LIFETIME_BOUND {
2436
    auto it = find(key);
2437
    if (it != end()) return {it, std::next(it)};
2438
    return {it, it};
2439
  }
2440
  template <class K = key_type>
2441
  std::pair<const_iterator, const_iterator> equal_range(
2442
      const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2443
    auto it = find(key);
2444
    if (it != end()) return {it, std::next(it)};
2445
    return {it, it};
2446
  }
2447
2448
  size_t bucket_count() const { return capacity(); }
2449
  float load_factor() const {
2450
    return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
2451
  }
2452
  float max_load_factor() const { return 1.0f; }
2453
  void max_load_factor(float) {
2454
    // Does nothing.
2455
  }
2456
2457
  hasher hash_function() const { return hash_ref(); }
2458
  key_equal key_eq() const { return eq_ref(); }
2459
  allocator_type get_allocator() const { return alloc_ref(); }
2460
2461
  friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
2462
    if (a.size() != b.size()) return false;
2463
    const raw_hash_set* outer = &a;
2464
    const raw_hash_set* inner = &b;
2465
    if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2466
    for (const value_type& elem : *outer) {
2467
      auto it = PolicyTraits::apply(FindElement{*inner}, elem);
2468
      if (it == inner->end() || !(*it == elem)) return false;
2469
    }
2470
    return true;
2471
  }
2472
2473
  friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
2474
    return !(a == b);
2475
  }
2476
2477
  template <typename H>
2478
  friend typename std::enable_if<H::template is_hashable<value_type>::value,
2479
                                 H>::type
2480
  AbslHashValue(H h, const raw_hash_set& s) {
2481
    return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
2482
                      s.size());
2483
  }
2484
2485
  friend void swap(raw_hash_set& a,
2486
                   raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
2487
    a.swap(b);
2488
  }
2489
2490
 private:
2491
  template <class Container, typename Enabler>
2492
  friend struct absl::container_internal::hashtable_debug_internal::
2493
      HashtableDebugAccess;
2494
2495
  struct FindElement {
2496
    template <class K, class... Args>
2497
20
    const_iterator operator()(const K& key, Args&&...) const {
2498
20
      return s.find(key);
2499
20
    }
2500
    const raw_hash_set& s;
2501
  };
2502
2503
  struct HashElement {
2504
    template <class K, class... Args>
2505
22
    size_t operator()(const K& key, Args&&...) const {
2506
22
      return h(key);
2507
22
    }
2508
    const hasher& h;
2509
  };
2510
2511
  template <class K1>
2512
  struct EqualElement {
2513
    template <class K2, class... Args>
2514
20
    bool operator()(const K2& lhs, Args&&...) const {
2515
20
      return eq(lhs, rhs);
2516
20
    }
2517
    const K1& rhs;
2518
    const key_equal& eq;
2519
  };
2520
2521
  struct EmplaceDecomposable {
2522
    template <class K, class... Args>
2523
20
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2524
20
      auto res = s.find_or_prepare_insert(key);
2525
20
      if (res.second) {
2526
20
        s.emplace_at(res.first, std::forward<Args>(args)...);
2527
20
      }
2528
20
      return {s.iterator_at(res.first), res.second};
2529
20
    }
std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<absl::string_view, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::string_view const&, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
Line
Count
Source
2523
20
    std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2524
20
      auto res = s.find_or_prepare_insert(key);
2525
20
      if (res.second) {
2526
20
        s.emplace_at(res.first, std::forward<Args>(args)...);
2527
20
      }
2528
20
      return {s.iterator_at(res.first), res.second};
2529
20
    }
Unexecuted instantiation: std::__1::pair<absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::iterator, bool> absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::EmplaceDecomposable::operator()<absl::string_view, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(absl::string_view const&, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&) const
2530
    raw_hash_set& s;
2531
  };
2532
2533
  template <bool do_destroy>
2534
  struct InsertSlot {
2535
    template <class K, class... Args>
2536
    std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2537
      auto res = s.find_or_prepare_insert(key);
2538
      if (res.second) {
2539
        PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
2540
                               &slot);
2541
      } else if (do_destroy) {
2542
        PolicyTraits::destroy(&s.alloc_ref(), &slot);
2543
      }
2544
      return {s.iterator_at(res.first), res.second};
2545
    }
2546
    raw_hash_set& s;
2547
    // Constructed slot. Either moved into place or destroyed.
2548
    slot_type&& slot;
2549
  };
2550
2551
0
  inline void destroy_slots() {
2552
0
    const size_t cap = capacity();
2553
0
    const ctrl_t* ctrl = control();
2554
0
    slot_type* slot = slot_array();
2555
0
    for (size_t i = 0; i != cap; ++i) {
2556
0
      if (IsFull(ctrl[i])) {
2557
0
        PolicyTraits::destroy(&alloc_ref(), slot + i);
2558
0
      }
2559
0
    }
2560
0
  }
2561
2562
  // Erases, but does not destroy, the value pointed to by `it`.
2563
  //
2564
  // This merely updates the pertinent control byte. This can be used in
2565
  // conjunction with Policy::transfer to move the object to another place.
2566
  void erase_meta_only(const_iterator it) {
2567
    EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
2568
  }
2569
2570
  // Allocates a backing array for `self` and initializes its control bytes.
2571
  // This reads `capacity` and updates all other fields based on the result of
2572
  // the allocation.
2573
  //
2574
  // This does not free the currently held array; `capacity` must be nonzero.
2575
8
  inline void initialize_slots() {
2576
    // People are often sloppy with the exact type of their allocator (sometimes
2577
    // it has an extra const or is missing the pair, but rebinds made it work
2578
    // anyway).
2579
8
    using CharAlloc =
2580
8
        typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
2581
8
    InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
2582
8
        common(), CharAlloc(alloc_ref()));
2583
8
  }
2584
2585
8
  ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
2586
8
    assert(IsValidCapacity(new_capacity));
2587
0
    auto* old_ctrl = control();
2588
8
    auto* old_slots = slot_array();
2589
8
    const bool had_infoz = common().has_infoz();
2590
8
    const size_t old_capacity = common().capacity();
2591
8
    common().set_capacity(new_capacity);
2592
8
    initialize_slots();
2593
2594
8
    auto* new_slots = slot_array();
2595
8
    size_t total_probe_length = 0;
2596
30
    for (size_t i = 0; i != old_capacity; ++i) {
2597
22
      if (IsFull(old_ctrl[i])) {
2598
22
        size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
2599
22
                                          PolicyTraits::element(old_slots + i));
2600
22
        auto target = find_first_non_full(common(), hash);
2601
22
        size_t new_i = target.offset;
2602
22
        total_probe_length += target.probe_length;
2603
22
        SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2604
22
        PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
2605
22
      }
2606
22
    }
2607
8
    if (old_capacity) {
2608
6
      SanitizerUnpoisonMemoryRegion(old_slots,
2609
6
                                    sizeof(slot_type) * old_capacity);
2610
6
      Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2611
6
          &alloc_ref(), old_ctrl - ControlOffset(had_infoz),
2612
6
          AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type),
2613
6
                    had_infoz));
2614
6
    }
2615
8
    infoz().RecordRehash(total_probe_length);
2616
8
  }
2617
2618
  // Prunes control bytes to remove as many tombstones as possible.
2619
  //
2620
  // See the comment on `rehash_and_grow_if_necessary()`.
2621
0
  inline void drop_deletes_without_resize() {
2622
    // Stack-allocate space for swapping elements.
2623
0
    alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
2624
0
    DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
2625
0
  }
2626
2627
  // Called whenever the table *might* need to conditionally grow.
2628
  //
2629
  // This function is an optimization opportunity to perform a rehash even when
2630
  // growth is unnecessary, because vacating tombstones is beneficial for
2631
  // performance in the long-run.
2632
8
  void rehash_and_grow_if_necessary() {
2633
8
    const size_t cap = capacity();
2634
8
    if (cap > Group::kWidth &&
2635
        // Do these calculations in 64-bit to avoid overflow.
2636
8
        size() * uint64_t{32} <= cap * uint64_t{25}) {
2637
      // Squash DELETED without growing if there is enough capacity.
2638
      //
2639
      // Rehash in place if the current size is <= 25/32 of capacity.
2640
      // Rationale for such a high factor: 1) drop_deletes_without_resize() is
2641
      // faster than resize, and 2) it takes quite a bit of work to add
2642
      // tombstones.  In the worst case, seems to take approximately 4
2643
      // insert/erase pairs to create a single tombstone and so if we are
2644
      // rehashing because of tombstones, we can afford to rehash-in-place as
2645
      // long as we are reclaiming at least 1/8 the capacity without doing more
2646
      // than 2X the work.  (Where "work" is defined to be size() for rehashing
2647
      // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
2648
      // place is faster per operation than inserting or even doubling the size
2649
      // of the table, so we actually afford to reclaim even less space from a
2650
      // resize-in-place.  The decision is to rehash in place if we can reclaim
2651
      // at about 1/8th of the usable capacity (specifically 3/28 of the
2652
      // capacity) which means that the total cost of rehashing will be a small
2653
      // fraction of the total work.
2654
      //
2655
      // Here is output of an experiment using the BM_CacheInSteadyState
2656
      // benchmark running the old case (where we rehash-in-place only if we can
2657
      // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
2658
      // if we can recover 3/32*capacity).
2659
      //
2660
      // Note that although in the worst-case number of rehashes jumped up from
2661
      // 15 to 190, but the number of operations per second is almost the same.
2662
      //
2663
      // Abridged output of running BM_CacheInSteadyState benchmark from
2664
      // raw_hash_set_benchmark.   N is the number of insert/erase operations.
2665
      //
2666
      //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
2667
      // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
2668
      //  448 | 145284       0.44        18 | 140118       0.44        19
2669
      //  493 | 152546       0.24        11 | 151417       0.48        28
2670
      //  538 | 151439       0.26        11 | 151152       0.53        38
2671
      //  583 | 151765       0.28        11 | 150572       0.57        50
2672
      //  628 | 150241       0.31        11 | 150853       0.61        66
2673
      //  672 | 149602       0.33        12 | 150110       0.66        90
2674
      //  717 | 149998       0.35        12 | 149531       0.70       129
2675
      //  762 | 149836       0.37        13 | 148559       0.74       190
2676
      //  807 | 149736       0.39        14 | 151107       0.39        14
2677
      //  852 | 150204       0.42        15 | 151019       0.42        15
2678
0
      drop_deletes_without_resize();
2679
8
    } else {
2680
      // Otherwise grow the container.
2681
8
      resize(NextCapacity(cap));
2682
8
    }
2683
8
  }
2684
2685
  // TODO(alkis): Optimize this assuming *this and that don't overlap.
2686
  raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
2687
    raw_hash_set tmp(std::move(that));
2688
    swap(tmp);
2689
    return *this;
2690
  }
2691
  raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
2692
    raw_hash_set tmp(std::move(that), alloc_ref());
2693
    swap(tmp);
2694
    return *this;
2695
  }
2696
2697
 protected:
2698
  // Attempts to find `key` in the table; if it isn't found, returns a slot that
2699
  // the value can be inserted into, with the control byte already set to
2700
  // `key`'s H2.
2701
  template <class K>
2702
20
  std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
2703
20
    prefetch_heap_block();
2704
20
    auto hash = hash_ref()(key);
2705
20
    auto seq = probe(common(), hash);
2706
20
    const ctrl_t* ctrl = control();
2707
20
    while (true) {
2708
20
      Group g{ctrl + seq.offset()};
2709
20
      for (uint32_t i : g.Match(H2(hash))) {
2710
0
        if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2711
0
                EqualElement<K>{key, eq_ref()},
2712
0
                PolicyTraits::element(slot_array() + seq.offset(i)))))
2713
0
          return {seq.offset(i), false};
2714
0
      }
2715
20
      if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
2716
0
      seq.next();
2717
0
      assert(seq.index() <= capacity() && "full table!");
2718
0
    }
2719
20
    return {prepare_insert(hash), true};
2720
20
  }
2721
2722
  // Given the hash of a value not currently in the table, finds the next
2723
  // viable slot index to insert it at.
2724
  //
2725
  // REQUIRES: At least one non-full slot available.
2726
20
  size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
2727
20
    const bool rehash_for_bug_detection =
2728
20
        common().should_rehash_for_bug_detection_on_insert();
2729
20
    if (rehash_for_bug_detection) {
2730
      // Move to a different heap allocation in order to detect bugs.
2731
0
      const size_t cap = capacity();
2732
0
      resize(growth_left() > 0 ? cap : NextCapacity(cap));
2733
0
    }
2734
20
    auto target = find_first_non_full(common(), hash);
2735
20
    if (!rehash_for_bug_detection &&
2736
20
        ABSL_PREDICT_FALSE(growth_left() == 0 &&
2737
20
                           !IsDeleted(control()[target.offset]))) {
2738
8
      rehash_and_grow_if_necessary();
2739
8
      target = find_first_non_full(common(), hash);
2740
8
    }
2741
20
    common().increment_size();
2742
20
    set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
2743
20
    SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
2744
20
    common().maybe_increment_generation_on_insert();
2745
20
    infoz().RecordInsert(hash, target.probe_length);
2746
20
    return target.offset;
2747
20
  }
2748
2749
  // Constructs the value in the space pointed by the iterator. This only works
2750
  // after an unsuccessful find_or_prepare_insert() and before any other
2751
  // modifications happen in the raw_hash_set.
2752
  //
2753
  // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
2754
  // k is the key decomposed from `forward<Args>(args)...`, and the bool
2755
  // returned by find_or_prepare_insert(k) was true.
2756
  // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
2757
  template <class... Args>
2758
20
  void emplace_at(size_t i, Args&&... args) {
2759
20
    PolicyTraits::construct(&alloc_ref(), slot_array() + i,
2760
20
                            std::forward<Args>(args)...);
2761
2762
20
    assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2763
20
               iterator_at(i) &&
2764
20
           "constructed value does not match the lookup key");
2765
20
  }
void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view const&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(unsigned long, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view const&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
Line
Count
Source
2758
20
  void emplace_at(size_t i, Args&&... args) {
2759
20
    PolicyTraits::construct(&alloc_ref(), slot_array() + i,
2760
20
                            std::forward<Args>(args)...);
2761
2762
20
    assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2763
20
               iterator_at(i) &&
2764
20
           "constructed value does not match the lookup key");
2765
20
  }
Unexecuted instantiation: void absl::container_internal::raw_hash_set<absl::container_internal::FlatHashMapPolicy<absl::string_view, absl::CommandLineFlag*>, absl::container_internal::StringHash, absl::container_internal::StringEq, std::__1::allocator<std::__1::pair<absl::string_view const, absl::CommandLineFlag*> > >::emplace_at<std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view&&>, std::__1::tuple<absl::CommandLineFlag*&&> >(unsigned long, std::__1::piecewise_construct_t const&, std::__1::tuple<absl::string_view&&>&&, std::__1::tuple<absl::CommandLineFlag*&&>&&)
2766
2767
80
  iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2768
80
    return {control() + i, slot_array() + i, common().generation_ptr()};
2769
80
  }
2770
  const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2771
    return {control() + i, slot_array() + i, common().generation_ptr()};
2772
  }
2773
2774
 private:
2775
  friend struct RawHashSetTestOnlyAccess;
2776
2777
  // The number of slots we can still fill without needing to rehash.
2778
  //
2779
  // This is stored separately due to tombstones: we do not include tombstones
2780
  // in the growth capacity, because we'd like to rehash when the table is
2781
  // otherwise filled with tombstones: otherwise, probe sequences might get
2782
  // unacceptably long without triggering a rehash. Callers can also force a
2783
  // rehash via the standard `rehash(0)`, which will recompute this value as a
2784
  // side-effect.
2785
  //
2786
  // See `CapacityToGrowth()`.
2787
40
  size_t growth_left() const { return common().growth_left(); }
2788
20
  void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
2789
2790
  // Prefetch the heap-allocated memory region to resolve potential TLB and
2791
  // cache misses. This is intended to overlap with execution of calculating the
2792
  // hash for a key.
2793
40
  void prefetch_heap_block() const {
2794
40
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
2795
40
    __builtin_prefetch(control(), 0, 1);
2796
40
#endif
2797
40
  }
2798
2799
352
  CommonFields& common() { return settings_.template get<0>(); }
2800
380
  const CommonFields& common() const { return settings_.template get<0>(); }
2801
2802
196
  ctrl_t* control() const { return common().control(); }
2803
136
  slot_type* slot_array() const {
2804
136
    return static_cast<slot_type*>(common().slot_array());
2805
136
  }
2806
28
  HashtablezInfoHandle infoz() { return common().infoz(); }
2807
2808
42
  hasher& hash_ref() { return settings_.template get<1>(); }
2809
20
  const hasher& hash_ref() const { return settings_.template get<1>(); }
2810
20
  key_equal& eq_ref() { return settings_.template get<2>(); }
2811
  const key_equal& eq_ref() const { return settings_.template get<2>(); }
2812
56
  allocator_type& alloc_ref() { return settings_.template get<3>(); }
2813
  const allocator_type& alloc_ref() const {
2814
    return settings_.template get<3>();
2815
  }
2816
2817
  // Make type-specific functions for this type's PolicyFunctions struct.
2818
0
  static size_t hash_slot_fn(void* set, void* slot) {
2819
0
    auto* h = static_cast<raw_hash_set*>(set);
2820
0
    return PolicyTraits::apply(
2821
0
        HashElement{h->hash_ref()},
2822
0
        PolicyTraits::element(static_cast<slot_type*>(slot)));
2823
0
  }
2824
0
  static void transfer_slot_fn(void* set, void* dst, void* src) {
2825
0
    auto* h = static_cast<raw_hash_set*>(set);
2826
0
    PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
2827
0
                           static_cast<slot_type*>(src));
2828
0
  }
2829
  // Note: dealloc_fn will only be used if we have a non-standard allocator.
2830
0
  static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
2831
0
    auto* set = reinterpret_cast<raw_hash_set*>(&common);
2832
0
2833
0
    // Unpoison before returning the memory to the allocator.
2834
0
    SanitizerUnpoisonMemoryRegion(common.slot_array(),
2835
0
                                  sizeof(slot_type) * common.capacity());
2836
0
2837
0
    common.infoz().Unregister();
2838
0
    Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2839
0
        &set->alloc_ref(), common.backing_array_start(),
2840
0
        common.alloc_size(sizeof(slot_type), alignof(slot_type)));
2841
0
  }
2842
2843
0
  static const PolicyFunctions& GetPolicyFunctions() {
2844
0
    static constexpr PolicyFunctions value = {
2845
0
        sizeof(slot_type),
2846
0
        &raw_hash_set::hash_slot_fn,
2847
0
        PolicyTraits::transfer_uses_memcpy()
2848
0
            ? TransferRelocatable<sizeof(slot_type)>
2849
0
            : &raw_hash_set::transfer_slot_fn,
2850
0
        (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
2851
0
             ? &DeallocateStandard<alignof(slot_type)>
2852
0
             : &raw_hash_set::dealloc_fn),
2853
0
    };
2854
0
    return value;
2855
0
  }
2856
2857
  // Bundle together CommonFields plus other objects which might be empty.
2858
  // CompressedTuple will ensure that sizeof is not affected by any of the empty
2859
  // fields that occur after CommonFields.
2860
  absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
2861
                                            allocator_type>
2862
      settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
2863
};
2864
2865
// Erases all elements that satisfy the predicate `pred` from the container `c`.
2866
template <typename P, typename H, typename E, typename A, typename Predicate>
2867
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
2868
    Predicate& pred, raw_hash_set<P, H, E, A>* c) {
2869
  const auto initial_size = c->size();
2870
  for (auto it = c->begin(), last = c->end(); it != last;) {
2871
    if (pred(*it)) {
2872
      c->erase(it++);
2873
    } else {
2874
      ++it;
2875
    }
2876
  }
2877
  return initial_size - c->size();
2878
}
2879
2880
namespace hashtable_debug_internal {
2881
template <typename Set>
2882
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
2883
  using Traits = typename Set::PolicyTraits;
2884
  using Slot = typename Traits::slot_type;
2885
2886
  static size_t GetNumProbes(const Set& set,
2887
                             const typename Set::key_type& key) {
2888
    size_t num_probes = 0;
2889
    size_t hash = set.hash_ref()(key);
2890
    auto seq = probe(set.common(), hash);
2891
    const ctrl_t* ctrl = set.control();
2892
    while (true) {
2893
      container_internal::Group g{ctrl + seq.offset()};
2894
      for (uint32_t i : g.Match(container_internal::H2(hash))) {
2895
        if (Traits::apply(
2896
                typename Set::template EqualElement<typename Set::key_type>{
2897
                    key, set.eq_ref()},
2898
                Traits::element(set.slot_array() + seq.offset(i))))
2899
          return num_probes;
2900
        ++num_probes;
2901
      }
2902
      if (g.MaskEmpty()) return num_probes;
2903
      seq.next();
2904
      ++num_probes;
2905
    }
2906
  }
2907
2908
  static size_t AllocatedByteSize(const Set& c) {
2909
    size_t capacity = c.capacity();
2910
    if (capacity == 0) return 0;
2911
    size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
2912
2913
    size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2914
    if (per_slot != ~size_t{}) {
2915
      m += per_slot * c.size();
2916
    } else {
2917
      const ctrl_t* ctrl = c.control();
2918
      for (size_t i = 0; i != capacity; ++i) {
2919
        if (container_internal::IsFull(ctrl[i])) {
2920
          m += Traits::space_used(c.slot_array() + i);
2921
        }
2922
      }
2923
    }
2924
    return m;
2925
  }
2926
};
2927
2928
}  // namespace hashtable_debug_internal
2929
}  // namespace container_internal
2930
ABSL_NAMESPACE_END
2931
}  // namespace absl
2932
2933
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
2934
2935
#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_