Coverage Report

Created: 2025-11-15 06:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/container/internal/raw_hash_set.cc
Line
Count
Source
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/container/internal/raw_hash_set.h"
16
17
#include <atomic>
18
#include <cassert>
19
#include <cstddef>
20
#include <cstdint>
21
#include <cstring>
22
#include <tuple>
23
#include <utility>
24
25
#include "absl/base/attributes.h"
26
#include "absl/base/config.h"
27
#include "absl/base/dynamic_annotations.h"
28
#include "absl/base/internal/endian.h"
29
#include "absl/base/internal/raw_logging.h"
30
#include "absl/base/optimization.h"
31
#include "absl/container/internal/container_memory.h"
32
#include "absl/container/internal/hashtable_control_bytes.h"
33
#include "absl/container/internal/hashtablez_sampler.h"
34
#include "absl/container/internal/raw_hash_set_resize_impl.h"
35
#include "absl/functional/function_ref.h"
36
#include "absl/hash/hash.h"
37
38
namespace absl {
39
ABSL_NAMESPACE_BEGIN
40
namespace container_internal {
41
42
// Represents a control byte corresponding to a full slot with arbitrary hash.
43
0
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
44
45
// A single control byte for default-constructed iterators. We leave it
46
// uninitialized because reading this memory is a bug.
47
ABSL_DLL ctrl_t kDefaultIterControl;
48
49
// We need one full byte followed by a sentinel byte for iterator::operator++.
50
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(),
51
                                                        ctrl_t::kSentinel};
52
53
namespace {
54
55
#ifdef ABSL_SWISSTABLE_ASSERT
56
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
57
#else
58
// We use this macro for assertions that users may see when the table is in an
59
// invalid state that sanitizers may help diagnose.
60
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
61
112
  assert((CONDITION) && "Try enabling sanitizers.")
62
#endif
63
64
0
[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
65
0
  ABSL_RAW_LOG(FATAL, "Hash table size overflow");
66
0
}
67
68
0
void ValidateMaxSize(size_t size, size_t slot_size) {
69
0
  if (IsAboveValidSize(size, slot_size)) {
70
0
    HashTableSizeOverflow();
71
0
  }
72
0
}
73
74
// Returns "random" seed.
75
0
inline size_t RandomSeed() {
76
0
#ifdef ABSL_HAVE_THREAD_LOCAL
77
0
  static thread_local size_t counter = 0;
78
0
  size_t value = ++counter;
79
#else   // ABSL_HAVE_THREAD_LOCAL
80
  static std::atomic<size_t> counter(0);
81
  size_t value = counter.fetch_add(1, std::memory_order_relaxed);
82
#endif  // ABSL_HAVE_THREAD_LOCAL
83
0
  return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
84
0
}
85
86
0
bool ShouldRehashForBugDetection(size_t capacity) {
87
  // Note: we can't use the abseil-random library because abseil-random
88
  // depends on swisstable. We want to return true with probability
89
  // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
90
  // we probe based on a random hash and see if the offset is less than
91
  // RehashProbabilityConstant().
92
0
  return probe(capacity, absl::HashOf(RandomSeed())).offset() <
93
0
         RehashProbabilityConstant();
94
0
}
95
96
// Find a non-deterministic hash for single group table.
97
// Last two bits are used to find a position for a newly inserted element after
98
// resize.
99
// This function basically using H2 last bits to save on shift operation.
100
6
size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
101
6
  return hash ^ seed.seed();
102
6
}
103
104
// Returns the offset of the new element after resize from capacity 1 to 3.
105
2
size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) {
106
  // After resize from capacity 1 to 3, we always have exactly the slot with
107
  // index 1 occupied, so we need to insert either at index 0 or index 2.
108
2
  static_assert(SooSlotIndex() == 1);
109
2
  return SingleGroupTableH1(hash, seed) & 2;
110
2
}
111
112
// Returns the address of the ith slot in slots where each slot occupies
113
// slot_size.
114
2
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
115
2
  return static_cast<void*>(static_cast<char*>(slot_array) +
116
2
                            (slot * slot_size));
117
2
}
118
119
// Returns the address of the slot `i` iterations after `slot` assuming each
120
// slot has the specified size.
121
6
inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) {
122
6
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +
123
6
                                 slot_size * i);
124
6
}
125
126
// Returns the address of the slot just before `slot` assuming each slot has the
127
// specified size.
128
0
inline void* PrevSlot(void* slot, size_t slot_size) {
129
0
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
130
0
}
131
132
}  // namespace
133
134
// Must be defined out-of-line to avoid MSVC error C2482 on some platforms,
135
// which is caused by non-constexpr initialization.
136
2
uint16_t HashtableSize::NextSeed() {
137
2
  static_assert(PerTableSeed::kBitCount == 16);
138
2
  thread_local uint16_t seed =
139
2
      static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
140
2
  seed += uint16_t{0xad53};
141
2
  return seed;
142
2
}
143
144
0
GenerationType* EmptyGeneration() {
145
0
  if (SwisstableGenerationsEnabled()) {
146
0
    constexpr size_t kNumEmptyGenerations = 1024;
147
0
    static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
148
0
    return const_cast<GenerationType*>(
149
0
        &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
150
0
  }
151
0
  return nullptr;
152
0
}
153
154
bool CommonFieldsGenerationInfoEnabled::
155
0
    should_rehash_for_bug_detection_on_insert(size_t capacity) const {
156
0
  if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
157
0
  if (reserved_growth_ > 0) return false;
158
0
  return ShouldRehashForBugDetection(capacity);
159
0
}
160
161
bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
162
0
    size_t capacity) const {
163
0
  return ShouldRehashForBugDetection(capacity);
164
0
}
165
166
namespace {
167
168
// Probes an array of control bits using a probe sequence,
169
// and returns the mask corresponding to the first group with a deleted or empty
170
// slot.
171
inline Group::NonIterableBitMaskType probe_till_first_non_full_group(
172
    const ctrl_t* ctrl, probe_seq<Group::kWidth>& seq,
173
0
    [[maybe_unused]] size_t capacity) {
174
0
  while (true) {
175
0
    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
176
0
    auto mask = g.MaskEmptyOrDeleted();
177
0
    if (mask) {
178
0
      return mask;
179
0
    }
180
0
    seq.next();
181
0
    ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
182
0
  }
183
0
}
184
185
FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
186
0
                                     size_t capacity) {
187
0
  auto seq = probe_h1(capacity, h1);
188
0
  if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
189
0
    return {seq.offset(), /*probe_length=*/0};
190
0
  }
191
0
  auto mask = probe_till_first_non_full_group(ctrl, seq, capacity);
192
0
  return {seq.offset(mask.LowestBitSet()), seq.index()};
193
0
}
194
195
// Probes an array of control bits using a probe sequence derived from `hash`,
196
// and returns the offset corresponding to the first deleted or empty slot.
197
//
198
// Behavior when the entire table is full is undefined.
199
//
200
// NOTE: this function must work with tables having both empty and deleted
201
// slots in the same group. Such tables appear during `erase()`.
202
0
FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
203
0
  return find_first_non_full_from_h1(common.control(), H1(hash),
204
0
                                     common.capacity());
205
0
}
206
207
// Same as `find_first_non_full`, but returns the mask corresponding to the
208
// first group with a deleted or empty slot.
209
std::pair<FindInfo, Group::NonIterableBitMaskType> find_first_non_full_group(
210
0
    const CommonFields& common, size_t hash) {
211
0
  auto seq = probe(common, hash);
212
0
  auto mask =
213
0
      probe_till_first_non_full_group(common.control(), seq, common.capacity());
214
0
  return {{seq.offset(), seq.index()}, mask};
215
0
}
216
217
// Whether a table fits in half a group. A half-group table fits entirely into a
218
// probing group, i.e., has a capacity < `Group::kWidth`.
219
//
220
// In half-group mode we are able to use the whole capacity. The extra control
221
// bytes give us at least one "empty" control byte to stop the iteration.
222
// This is important to make 1 a valid capacity.
223
//
224
// In half-group mode only the first `capacity` control bytes after the sentinel
225
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
226
// represent a real slot.
227
0
constexpr bool is_half_group(size_t capacity) {
228
0
  return capacity < Group::kWidth - 1;
229
0
}
230
231
template <class Fn>
232
0
void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) {
233
0
  const size_t cap = c.capacity();
234
0
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap));
235
0
  const ctrl_t* ctrl = c.control();
236
0
  void* slot = c.slot_array();
237
0
  if (is_half_group(cap)) {
238
    // Mirrored/cloned control bytes in half-group table are also located in the
239
    // first group (starting from position 0). We are taking group from position
240
    // `capacity` in order to avoid duplicates.
241
242
    // Half-group tables capacity fits into portable group, where
243
    // GroupPortableImpl::MaskFull is more efficient for the
244
    // capacity <= GroupPortableImpl::kWidth.
245
0
    ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
246
0
                           "unexpectedly large half-group capacity");
247
0
    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
248
0
                  "unexpected group width");
249
    // Group starts from kSentinel slot, so indices in the mask will
250
    // be increased by 1.
251
0
    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
252
0
    --ctrl;
253
0
    slot = PrevSlot(slot, slot_size);
254
0
    for (uint32_t i : mask) {
255
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
256
0
    }
257
0
    return;
258
0
  }
259
0
  size_t remaining = c.size();
260
0
  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
261
0
  while (remaining != 0) {
262
0
    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
263
0
      ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
264
0
                             "hash table was modified unexpectedly");
265
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
266
0
      --remaining;
267
0
    }
268
0
    ctrl += Group::kWidth;
269
0
    slot = NextSlot(slot, slot_size, Group::kWidth);
270
0
    ABSL_SWISSTABLE_ASSERT(
271
0
        (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
272
0
        "hash table was modified unexpectedly");
273
0
  }
274
  // NOTE: erasure of the current element is allowed in callback for
275
  // absl::erase_if specialization. So we use `>=`.
276
0
  ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
277
0
                         "hash table was modified unexpectedly");
278
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)> >(absl::container_internal::CommonFields const&, unsigned long, absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)>)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0>(absl::container_internal::CommonFields const&, unsigned long, absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0)
279
280
}  // namespace
281
282
0
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
283
0
  ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
284
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
285
0
  for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
286
0
    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
287
0
  }
288
  // Copy the cloned ctrl bytes.
289
0
  std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
290
0
  ctrl[capacity] = ctrl_t::kSentinel;
291
0
}
292
293
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
294
0
                          absl::FunctionRef<void(const ctrl_t*, void*)> cb) {
295
0
  IterateOverFullSlotsImpl(c, slot_size, cb);
296
0
}
297
298
namespace {
299
300
4
void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
301
4
  growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
302
4
}
303
304
0
void ResetGrowthLeft(CommonFields& common) {
305
0
  ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
306
0
}
307
308
// Finds guaranteed to exists empty slot from the given position.
309
// NOTE: this function is almost never triggered inside of the
310
// DropDeletesWithoutResize, so we keep it simple.
311
// The table is rather sparse, so empty slot will be found very quickly.
312
0
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
313
0
  for (size_t i = start; i < end; ++i) {
314
0
    if (IsEmpty(ctrl[i])) {
315
0
      return i;
316
0
    }
317
0
  }
318
0
  ABSL_UNREACHABLE();
319
0
}
320
321
// Finds guaranteed to exist full slot starting from the given position.
322
// NOTE: this function is only triggered for rehash(0), when we need to
323
// go back to SOO state, so we keep it simple.
324
0
size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) {
325
0
  for (size_t i = start; i < end; ++i) {
326
0
    if (IsFull(ctrl[i])) {
327
0
      return i;
328
0
    }
329
0
  }
330
0
  ABSL_UNREACHABLE();
331
0
}
332
333
16
void PrepareInsertCommon(CommonFields& common) {
334
16
  common.increment_size();
335
16
  common.maybe_increment_generation_on_insert();
336
16
}
337
338
// Sets sanitizer poisoning for slot corresponding to control byte being set.
339
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
340
12
                                size_t slot_size) {
341
12
  ABSL_SWISSTABLE_ASSERT(i < c.capacity());
342
12
  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
343
12
  if (IsFull(h)) {
344
12
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
345
12
  } else {
346
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
347
0
  }
348
12
}
349
350
// Sets `ctrl[i]` to `h`.
351
//
352
// Unlike setting it directly, this function will perform bounds checks and
353
// mirror the value to the cloned tail if necessary.
354
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
355
8
                    size_t slot_size) {
356
8
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
357
8
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
358
8
  ctrl_t* ctrl = c.control();
359
8
  ctrl[i] = h;
360
8
  ctrl[((i - NumClonedBytes()) & c.capacity()) +
361
8
       (NumClonedBytes() & c.capacity())] = h;
362
8
}
363
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
364
8
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
365
8
  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
366
8
}
367
368
// Like SetCtrl, but in a single group table, we can save some operations when
369
// setting the cloned control byte.
370
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
371
4
                                      size_t slot_size) {
372
4
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
373
4
  ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
374
4
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
375
4
  ctrl_t* ctrl = c.control();
376
4
  ctrl[i] = h;
377
4
  ctrl[i + c.capacity() + 1] = h;
378
4
}
379
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
380
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
381
4
                                      size_t slot_size) {
382
4
  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
383
4
}
384
385
// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
386
// we can save some operations when setting the cloned control byte.
387
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
388
0
                                size_t slot_size) {
389
0
  ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
390
0
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
391
0
  ctrl_t* ctrl = c.control();
392
0
  ctrl[i] = h;
393
0
  ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
394
0
}
395
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
396
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
397
0
                                size_t slot_size) {
398
0
  SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
399
0
}
400
401
size_t DropDeletesWithoutResizeAndPrepareInsert(
402
    CommonFields& common, const PolicyFunctions& __restrict policy,
403
0
    size_t new_hash) {
404
0
  void* set = &common;
405
0
  void* slot_array = common.slot_array();
406
0
  const size_t capacity = common.capacity();
407
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
408
0
  ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
409
  // Algorithm:
410
  // - mark all DELETED slots as EMPTY
411
  // - mark all FULL slots as DELETED
412
  // - for each slot marked as DELETED
413
  //     hash = Hash(element)
414
  //     target = find_first_non_full(hash)
415
  //     if target is in the same group
416
  //       mark slot as FULL
417
  //     else if target is EMPTY
418
  //       transfer element to target
419
  //       mark slot as EMPTY
420
  //       mark target as FULL
421
  //     else if target is DELETED
422
  //       swap current element with target element
423
  //       mark target as FULL
424
  //       repeat procedure for current slot with moved from element (target)
425
0
  ctrl_t* ctrl = common.control();
426
0
  ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
427
0
  const void* hash_fn = policy.hash_fn(common);
428
0
  auto hasher = policy.hash_slot;
429
0
  auto transfer_n = policy.transfer_n;
430
0
  const size_t slot_size = policy.slot_size;
431
432
0
  size_t total_probe_length = 0;
433
0
  void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
434
435
  // The index of an empty slot that can be used as temporary memory for
436
  // the swap operation.
437
0
  constexpr size_t kUnknownId = ~size_t{};
438
0
  size_t tmp_space_id = kUnknownId;
439
440
0
  for (size_t i = 0; i != capacity;
441
0
       ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
442
0
    ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
443
0
    if (IsEmpty(ctrl[i])) {
444
0
      tmp_space_id = i;
445
0
      continue;
446
0
    }
447
0
    if (!IsDeleted(ctrl[i])) continue;
448
0
    const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed());
449
0
    const FindInfo target = find_first_non_full(common, hash);
450
0
    const size_t new_i = target.offset;
451
0
    total_probe_length += target.probe_length;
452
453
    // Verify if the old and new i fall within the same group wrt the hash.
454
    // If they do, we don't need to move the object as it falls already in the
455
    // best probe we can.
456
0
    const size_t probe_offset = probe(common, hash).offset();
457
0
    const h2_t h2 = H2(hash);
458
0
    const auto probe_index = [probe_offset, capacity](size_t pos) {
459
0
      return ((pos - probe_offset) & capacity) / Group::kWidth;
460
0
    };
461
462
    // Element doesn't move.
463
0
    if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
464
0
      SetCtrlInLargeTable(common, i, h2, slot_size);
465
0
      continue;
466
0
    }
467
468
0
    void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
469
0
    if (IsEmpty(ctrl[new_i])) {
470
      // Transfer element to the empty spot.
471
      // SetCtrl poisons/unpoisons the slots so we have to call it at the
472
      // right time.
473
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
474
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
475
0
      SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);
476
      // Initialize or change empty space id.
477
0
      tmp_space_id = i;
478
0
    } else {
479
0
      ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
480
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
481
      // Until we are done rehashing, DELETED marks previously FULL slots.
482
483
0
      if (tmp_space_id == kUnknownId) {
484
0
        tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
485
0
      }
486
0
      void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
487
0
      SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
488
489
      // Swap i and new_i elements.
490
0
      (*transfer_n)(set, tmp_space, new_slot_ptr, 1);
491
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
492
0
      (*transfer_n)(set, slot_ptr, tmp_space, 1);
493
494
0
      SanitizerPoisonMemoryRegion(tmp_space, slot_size);
495
496
      // repeat the processing of the ith slot
497
0
      --i;
498
0
      slot_ptr = PrevSlot(slot_ptr, slot_size);
499
0
    }
500
0
  }
501
  // Prepare insert for the new element.
502
0
  PrepareInsertCommon(common);
503
0
  ResetGrowthLeft(common);
504
0
  FindInfo find_info = find_first_non_full(common, new_hash);
505
0
  SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);
506
0
  common.infoz().RecordInsertMiss(new_hash, find_info.probe_length);
507
0
  common.infoz().RecordRehash(total_probe_length);
508
0
  return find_info.offset;
509
0
}
510
511
0
bool WasNeverFull(CommonFields& c, size_t index) {
512
0
  if (is_single_group(c.capacity())) {
513
0
    return true;
514
0
  }
515
0
  const size_t index_before = (index - Group::kWidth) & c.capacity();
516
0
  const auto empty_after = Group(c.control() + index).MaskEmpty();
517
0
  const auto empty_before = Group(c.control() + index_before).MaskEmpty();
518
519
  // We count how many consecutive non empties we have to the right and to the
520
  // left of `it`. If the sum is >= kWidth then there is at least one probe
521
  // window that might have seen a full group.
522
0
  return empty_before && empty_after &&
523
0
         static_cast<size_t>(empty_after.TrailingZeros()) +
524
0
                 empty_before.LeadingZeros() <
525
0
             Group::kWidth;
526
0
}
527
528
// Updates the control bytes to indicate a completely empty table such that all
529
// control bytes are kEmpty except for the kSentinel byte.
530
0
void ResetCtrl(CommonFields& common, size_t slot_size) {
531
0
  const size_t capacity = common.capacity();
532
0
  ctrl_t* ctrl = common.control();
533
0
  static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
534
0
  if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
535
0
    if (IsSmallCapacity(capacity)) return;
536
0
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
537
0
    std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
538
0
                Group::kWidth);
539
0
    if (capacity == kTwoGroupCapacity) {
540
0
      std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),
541
0
                  Group::kWidth);
542
0
    }
543
0
  } else {
544
0
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
545
0
                capacity + 1 + NumClonedBytes());
546
0
  }
547
0
  ctrl[capacity] = ctrl_t::kSentinel;
548
0
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
549
0
}
550
551
// Initializes control bytes for growing from capacity 1 to 3.
552
// `orig_h2` is placed in the position `SooSlotIndex()`.
553
// `new_h2` is placed in the position `new_offset`.
554
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes(
555
2
    h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) {
556
2
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
557
2
  static_assert(kNewCapacity == 3);
558
2
  static_assert(is_single_group(kNewCapacity));
559
2
  static_assert(SooSlotIndex() == 1);
560
2
  ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
561
562
2
  static constexpr uint64_t kEmptyXorSentinel =
563
2
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
564
2
      static_cast<uint8_t>(ctrl_t::kSentinel);
565
2
  static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
566
2
  static constexpr size_t kMirroredSooSlotIndex =
567
2
      SooSlotIndex() + kNewCapacity + 1;
568
  // The first 8 bytes, where SOO slot original and mirrored positions are
569
  // replaced with 0.
570
  // Result will look like: E0ESE0EE
571
2
  static constexpr uint64_t kFirstCtrlBytesWithZeroes =
572
2
      k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
573
2
      (kEmptyXorSentinel << (8 * kNewCapacity)) ^
574
2
      (kEmpty64 << (8 * kMirroredSooSlotIndex));
575
576
2
  const uint64_t soo_h2 = static_cast<uint64_t>(orig_h2);
577
2
  const uint64_t new_h2_xor_empty =
578
2
      static_cast<uint64_t>(new_h2 ^ static_cast<uint8_t>(ctrl_t::kEmpty));
579
  // Fill the original and mirrored bytes for SOO slot.
580
  // Result will look like:
581
  // EHESEHEE
582
  // Where H = soo_h2, E = kEmpty, S = kSentinel.
583
2
  uint64_t first_ctrl_bytes =
584
2
      ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
585
2
      (soo_h2 << (8 * kMirroredSooSlotIndex));
586
  // Replace original and mirrored empty bytes for the new position.
587
  // Result for new_offset 0 will look like:
588
  // NHESNHEE
589
  // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
590
  // Result for new_offset 2 will look like:
591
  // EHNSEHNE
592
2
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
593
2
  size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
594
2
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
595
596
  // Fill last bytes with kEmpty.
597
2
  std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
598
2
              Group::kWidth);
599
  // Overwrite the first 8 bytes with first_ctrl_bytes.
600
2
  absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
601
602
  // Example for group size 16:
603
  // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE
604
  // new_offset 0:
605
  // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE
606
  // new_offset 2:
607
  // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE
608
609
  // Example for group size 8:
610
  // new_ctrl after 1st memset =      ???EEEEEEEE
611
  // new_offset 0:
612
  // new_ctrl after 2nd store  =      NHESNHEEEEE
613
  // new_offset 2:
614
  // new_ctrl after 2nd store  =      EHNSEHNEEEE
615
2
}
616
617
}  // namespace
618
619
0
void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) {
620
0
  ABSL_SWISSTABLE_ASSERT(c.is_small());
621
0
  if (soo_enabled) {
622
0
    c.set_empty_soo();
623
0
    return;
624
0
  }
625
0
  c.decrement_size();
626
0
  c.infoz().RecordErase();
627
0
  SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);
628
0
}
629
630
0
void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) {
631
0
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
632
0
  ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");
633
0
  c.decrement_size();
634
0
  c.infoz().RecordErase();
635
636
0
  size_t index = static_cast<size_t>(ctrl - c.control());
637
638
0
  if (WasNeverFull(c, index)) {
639
0
    SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
640
0
    c.growth_info().OverwriteFullAsEmpty();
641
0
    return;
642
0
  }
643
644
0
  c.growth_info().OverwriteFullAsDeleted();
645
0
  SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);
646
0
}
647
648
void ClearBackingArray(CommonFields& c,
649
                       const PolicyFunctions& __restrict policy, void* alloc,
650
0
                       bool reuse, bool soo_enabled) {
651
0
  if (reuse) {
652
0
    c.set_size_to_zero();
653
0
    ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
654
0
    ResetCtrl(c, policy.slot_size);
655
0
    ResetGrowthLeft(c);
656
0
    c.infoz().RecordStorageChanged(0, c.capacity());
657
0
  } else {
658
    // We need to record infoz before calling dealloc, which will unregister
659
    // infoz.
660
0
    c.infoz().RecordClearedReservation();
661
0
    c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
662
0
    c.infoz().Unregister();
663
0
    (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,
664
0
                      policy.slot_align, c.has_infoz());
665
0
    c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}};
666
0
  }
667
0
}
668
669
namespace {
670
671
enum class ResizeNonSooMode {
672
  kGuaranteedEmpty,
673
  kGuaranteedAllocated,
674
};
675
676
// Iterates over full slots in old table, finds new positions for them and
677
// transfers the slots.
678
// This function is used for reserving or rehashing non-empty tables.
679
// This use case is rare so the function is type erased.
680
// Returns the total probe length.
681
size_t FindNewPositionsAndTransferSlots(
682
    CommonFields& common, const PolicyFunctions& __restrict policy,
683
0
    ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) {
684
0
  void* new_slots = common.slot_array();
685
0
  const void* hash_fn = policy.hash_fn(common);
686
0
  const size_t slot_size = policy.slot_size;
687
0
  const size_t seed = common.seed().seed();
688
689
0
  const auto insert_slot = [&](void* slot) {
690
0
    size_t hash = policy.hash_slot(hash_fn, slot, seed);
691
0
    FindInfo target;
692
0
    if (common.is_small()) {
693
0
      target = FindInfo{0, 0};
694
0
    } else {
695
0
      target = find_first_non_full(common, hash);
696
0
      SetCtrl(common, target.offset, H2(hash), slot_size);
697
0
    }
698
0
    policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
699
0
                      slot, 1);
700
0
    return target.probe_length;
701
0
  };
702
0
  if (IsSmallCapacity(old_capacity)) {
703
0
    if (common.size() == 1) insert_slot(old_slots);
704
0
    return 0;
705
0
  }
706
0
  size_t total_probe_length = 0;
707
0
  for (size_t i = 0; i < old_capacity; ++i) {
708
0
    if (IsFull(old_ctrl[i])) {
709
0
      total_probe_length += insert_slot(old_slots);
710
0
    }
711
0
    old_slots = NextSlot(old_slots, slot_size);
712
0
  }
713
0
  return total_probe_length;
714
0
}
715
716
void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,
717
                             size_t hash, size_t total_probe_length,
718
0
                             size_t distance_from_desired) {
719
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
720
0
  infoz.RecordStorageChanged(common.size() - 1, common.capacity());
721
0
  infoz.RecordRehash(total_probe_length);
722
0
  infoz.RecordInsertMiss(hash, distance_from_desired);
723
0
  common.set_has_infoz();
724
  // TODO(b/413062340): we could potentially store infoz in place of the
725
  // control pointer for the capacity 1 case.
726
0
  common.set_infoz(infoz);
727
0
}
728
729
// Specialization to avoid passing two 0s from hot function.
730
ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(
731
0
    CommonFields& common, HashtablezInfoHandle infoz, size_t hash) {
732
0
  ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,
733
0
                          /*distance_from_desired=*/0);
734
0
}
735
736
ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,
737
                                                 HashtablezInfoHandle infoz,
738
                                                 size_t hash,
739
                                                 size_t total_probe_length,
740
0
                                                 size_t distance_from_desired) {
741
0
  ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,
742
0
                          distance_from_desired);
743
0
}
744
745
ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,
746
                                                 HashtablezInfoHandle infoz,
747
0
                                                 size_t total_probe_length) {
748
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
749
0
  infoz.RecordStorageChanged(common.size(), common.capacity());
750
0
  infoz.RecordRehash(total_probe_length);
751
0
  common.set_has_infoz();
752
0
  common.set_infoz(infoz);
753
0
}
754
755
struct BackingArrayPtrs {
756
  ctrl_t* ctrl;
757
  void* slots;
758
};
759
760
BackingArrayPtrs AllocBackingArray(CommonFields& common,
761
                                   const PolicyFunctions& __restrict policy,
762
                                   size_t new_capacity, bool has_infoz,
763
8
                                   void* alloc) {
764
8
  RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align,
765
8
                          has_infoz);
766
8
  char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
767
8
  const GenerationType old_generation = common.generation();
768
8
  common.set_generation_ptr(
769
8
      reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
770
8
  common.set_generation(NextGeneration(old_generation));
771
772
8
  return {reinterpret_cast<ctrl_t*>(mem + layout.control_offset()),
773
8
          mem + layout.slot_offset()};
774
8
}
775
776
template <ResizeNonSooMode kMode>
777
void ResizeNonSooImpl(CommonFields& common,
778
                      const PolicyFunctions& __restrict policy,
779
0
                      size_t new_capacity, HashtablezInfoHandle infoz) {
780
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
781
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
782
783
0
  [[maybe_unused]] const size_t old_capacity = common.capacity();
784
0
  [[maybe_unused]] ctrl_t* old_ctrl;
785
0
  [[maybe_unused]] void* old_slots;
786
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
787
0
    old_ctrl = common.control();
788
0
    old_slots = common.slot_array();
789
0
  }
790
791
0
  const size_t slot_size = policy.slot_size;
792
0
  [[maybe_unused]] const size_t slot_align = policy.slot_align;
793
0
  const bool has_infoz = infoz.IsSampled();
794
0
  void* alloc = policy.get_char_alloc(common);
795
796
0
  common.set_capacity(new_capacity);
797
0
  const auto [new_ctrl, new_slots] =
798
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
799
0
  common.set_control(new_ctrl);
800
0
  common.set_slots(new_slots);
801
0
  common.generate_new_seed(has_infoz);
802
803
0
  size_t total_probe_length = 0;
804
0
  ResetCtrl(common, slot_size);
805
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
806
0
                         old_capacity == policy.soo_capacity());
807
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
808
0
                         old_capacity > 0);
809
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
810
0
    total_probe_length = FindNewPositionsAndTransferSlots(
811
0
        common, policy, old_ctrl, old_slots, old_capacity);
812
0
    (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
813
0
                      has_infoz);
814
0
    ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
815
0
                    common.size());
816
0
  } else {
817
0
    GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
818
0
        CapacityToGrowth(new_capacity));
819
0
  }
820
821
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
822
0
    ReportResizeToInfoz(common, infoz, total_probe_length);
823
0
  }
824
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)0>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)1>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
825
826
void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
827
                                      const PolicyFunctions& __restrict policy,
828
0
                                      size_t new_capacity, bool force_infoz) {
829
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
830
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
831
0
  ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);
832
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());
833
0
  ABSL_SWISSTABLE_ASSERT(common.empty());
834
0
  const size_t slot_size = policy.slot_size;
835
0
  HashtablezInfoHandle infoz;
836
0
  const bool should_sample =
837
0
      policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
838
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
839
0
    infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
840
0
                            policy.soo_capacity());
841
0
  }
842
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,
843
0
                                                       new_capacity, infoz);
844
0
}
845
846
// If the table was SOO, initializes new control bytes and transfers slot.
847
// After transferring the slot, sets control and slots in CommonFields.
848
// It is rare to resize an SOO table with one element to a large size.
849
// Requires: `c` contains SOO data.
850
void InsertOldSooSlotAndInitializeControlBytes(
851
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl,
852
0
    void* new_slots, bool has_infoz) {
853
0
  ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());
854
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
855
0
  size_t new_capacity = c.capacity();
856
857
0
  c.generate_new_seed(has_infoz);
858
859
0
  const size_t soo_slot_hash =
860
0
      policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed());
861
0
  size_t offset = probe(new_capacity, soo_slot_hash).offset();
862
0
  offset = offset == new_capacity ? 0 : offset;
863
0
  SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
864
0
  void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
865
0
  SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
866
0
  policy.transfer_n(&c, target_slot, c.soo_data(), 1);
867
0
  c.set_control(new_ctrl);
868
0
  c.set_slots(new_slots);
869
0
  ResetCtrl(c, policy.slot_size);
870
0
  SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size);
871
0
}
872
873
enum class ResizeFullSooTableSamplingMode {
874
  kNoSampling,
875
  // Force sampling. If the table was still not sampled, do not resize.
876
  kForceSampleNoResizeIfUnsampled,
877
};
878
879
void AssertSoo([[maybe_unused]] CommonFields& common,
880
0
               [[maybe_unused]] const PolicyFunctions& policy) {
881
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
882
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());
883
0
}
884
void AssertFullSoo([[maybe_unused]] CommonFields& common,
885
0
                   [[maybe_unused]] const PolicyFunctions& policy) {
886
0
  AssertSoo(common, policy);
887
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
888
0
}
889
890
void ResizeFullSooTable(CommonFields& common,
891
                        const PolicyFunctions& __restrict policy,
892
                        size_t new_capacity,
893
0
                        ResizeFullSooTableSamplingMode sampling_mode) {
894
0
  AssertFullSoo(common, policy);
895
0
  const size_t slot_size = policy.slot_size;
896
0
  void* alloc = policy.get_char_alloc(common);
897
898
0
  HashtablezInfoHandle infoz;
899
0
  bool has_infoz = false;
900
0
  if (sampling_mode ==
901
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) {
902
0
    if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) {
903
0
      infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
904
0
                              policy.soo_capacity());
905
0
    }
906
907
0
    if (!infoz.IsSampled()) return;
908
0
    has_infoz = true;
909
0
  }
910
911
0
  common.set_capacity(new_capacity);
912
913
  // We do not set control and slots in CommonFields yet to avoid overriding
914
  // SOO data.
915
0
  const auto [new_ctrl, new_slots] =
916
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
917
918
0
  InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots,
919
0
                                            has_infoz);
920
0
  ResetGrowthLeft(common);
921
0
  if (has_infoz) {
922
0
    common.set_has_infoz();
923
0
    common.set_infoz(infoz);
924
0
    infoz.RecordStorageChanged(common.size(), new_capacity);
925
0
  }
926
0
}
927
928
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,
929
                                            size_t old_capacity,
930
                                            ctrl_t* __restrict new_ctrl,
931
4
                                            size_t new_capacity) {
932
4
  ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
933
4
  constexpr size_t kHalfWidth = Group::kWidth / 2;
934
4
  ABSL_ASSUME(old_capacity < kHalfWidth);
935
4
  ABSL_ASSUME(old_capacity > 0);
936
4
  static_assert(Group::kWidth == 8 || Group::kWidth == 16,
937
4
                "Group size is not supported.");
938
939
  // NOTE: operations are done with compile time known size = 8.
940
  // Compiler optimizes that into single ASM operation.
941
942
  // Load the bytes from old_capacity. This contains
943
  // - the sentinel byte
944
  // - all the old control bytes
945
  // - the rest is filled with kEmpty bytes
946
  // Example:
947
  // old_ctrl =     012S012EEEEEEEEE...
948
  // copied_bytes = S012EEEE
949
4
  uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);
950
951
  // We change the sentinel byte to kEmpty before storing to both the start of
952
  // the new_ctrl, and past the end of the new_ctrl later for the new cloned
953
  // bytes. Note that this is faster than setting the sentinel byte to kEmpty
954
  // after the copy directly in new_ctrl because we are limited on store
955
  // bandwidth.
956
4
  static constexpr uint64_t kEmptyXorSentinel =
957
4
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
958
4
      static_cast<uint8_t>(ctrl_t::kSentinel);
959
960
  // Replace the first byte kSentinel with kEmpty.
961
  // Resulting bytes will be shifted by one byte old control blocks.
962
  // Example:
963
  // old_ctrl = 012S012EEEEEEEEE...
964
  // before =   S012EEEE
965
  // after  =   E012EEEE
966
4
  copied_bytes ^= kEmptyXorSentinel;
967
968
4
  if (Group::kWidth == 8) {
969
    // With group size 8, we can grow with two write operations.
970
0
    ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
971
0
                           "old_capacity is too large for group size 8");
972
0
    absl::little_endian::Store64(new_ctrl, copied_bytes);
973
974
0
    static constexpr uint64_t kSentinal64 =
975
0
        static_cast<uint8_t>(ctrl_t::kSentinel);
976
977
    // Prepend kSentinel byte to the beginning of copied_bytes.
978
    // We have maximum 3 non-empty bytes at the beginning of copied_bytes for
979
    // group size 8.
980
    // Example:
981
    // old_ctrl = 012S012EEEE
982
    // before =   E012EEEE
983
    // after  =   SE012EEE
984
0
    copied_bytes = (copied_bytes << 8) ^ kSentinal64;
985
0
    absl::little_endian::Store64(new_ctrl + new_capacity, copied_bytes);
986
    // Example for capacity 3:
987
    // old_ctrl = 012S012EEEE
988
    // After the first store:
989
    //           >!
990
    // new_ctrl = E012EEEE???????
991
    // After the second store:
992
    //                  >!
993
    // new_ctrl = E012EEESE012EEE
994
0
    return;
995
0
  }
996
997
4
  ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);  // NOLINT(misc-static-assert)
998
999
  // Fill the second half of the main control bytes with kEmpty.
1000
  // For small capacity that may write into mirrored control bytes.
1001
  // It is fine as we will overwrite all the bytes later.
1002
4
  std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
1003
4
              kHalfWidth);
1004
  // Fill the second half of the mirrored control bytes with kEmpty.
1005
4
  std::memset(new_ctrl + new_capacity + kHalfWidth,
1006
4
              static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
1007
  // Copy the first half of the non-mirrored control bytes.
1008
4
  absl::little_endian::Store64(new_ctrl, copied_bytes);
1009
4
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1010
  // Copy the first half of the mirrored control bytes.
1011
4
  absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
1012
1013
  // Example for growth capacity 1->3:
1014
  // old_ctrl =                  0S0EEEEEEEEEEEEEE
1015
  // new_ctrl at the end =       E0ESE0EEEEEEEEEEEEE
1016
  //                                    >!
1017
  // new_ctrl after 1st memset = ????????EEEEEEEE???
1018
  //                                       >!
1019
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEE
1020
  //                            >!
1021
  // new_ctrl after 1st store =  E0EEEEEEEEEEEEEEEEE
1022
  // new_ctrl after kSentinel =  E0ESEEEEEEEEEEEEEEE
1023
  //                                >!
1024
  // new_ctrl after 2nd store =  E0ESE0EEEEEEEEEEEEE
1025
1026
  // Example for growth capacity 3->7:
1027
  // old_ctrl =                  012S012EEEEEEEEEEEE
1028
  // new_ctrl at the end =       E012EEESE012EEEEEEEEEEE
1029
  //                                    >!
1030
  // new_ctrl after 1st memset = ????????EEEEEEEE???????
1031
  //                                           >!
1032
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEEEEEE
1033
  //                            >!
1034
  // new_ctrl after 1st store =  E012EEEEEEEEEEEEEEEEEEE
1035
  // new_ctrl after kSentinel =  E012EEESEEEEEEEEEEEEEEE
1036
  //                                >!
1037
  // new_ctrl after 2nd store =  E012EEESE012EEEEEEEEEEE
1038
1039
  // Example for growth capacity 7->15:
1040
  // old_ctrl =                  0123456S0123456EEEEEEEE
1041
  // new_ctrl at the end =       E0123456EEEEEEESE0123456EEEEEEE
1042
  //                                    >!
1043
  // new_ctrl after 1st memset = ????????EEEEEEEE???????????????
1044
  //                                                   >!
1045
  // new_ctrl after 2nd memset = ????????EEEEEEEE???????EEEEEEEE
1046
  //                            >!
1047
  // new_ctrl after 1st store =  E0123456EEEEEEEE???????EEEEEEEE
1048
  // new_ctrl after kSentinel =  E0123456EEEEEEES???????EEEEEEEE
1049
  //                                            >!
1050
  // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE
1051
4
}
1052
1053
// Size of the buffer we allocate on stack for storing probed elements in
1054
// GrowToNextCapacity algorithm.
1055
constexpr size_t kProbedElementsBufferSize = 512;
1056
1057
// Decodes information about probed elements from contiguous memory.
1058
// Finds new position for each element and transfers it to the new slots.
1059
// Returns the total probe length.
1060
template <typename ProbedItem>
1061
ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
1062
    CommonFields& c, const PolicyFunctions& __restrict policy,
1063
0
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
1064
0
  const size_t new_capacity = c.capacity();
1065
1066
0
  void* new_slots = c.slot_array();
1067
0
  ctrl_t* new_ctrl = c.control();
1068
0
  size_t total_probe_length = 0;
1069
1070
0
  const size_t slot_size = policy.slot_size;
1071
0
  auto transfer_n = policy.transfer_n;
1072
1073
0
  for (; start < end; ++start) {
1074
0
    const FindInfo target = find_first_non_full_from_h1(
1075
0
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
1076
0
    total_probe_length += target.probe_length;
1077
0
    const size_t old_index = static_cast<size_t>(start->source_offset);
1078
0
    const size_t new_i = target.offset;
1079
0
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
1080
0
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
1081
0
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
1082
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1083
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1084
0
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
1085
0
    transfer_n(&c, dst_slot, src_slot, 1);
1086
0
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
1087
0
  }
1088
0
  return total_probe_length;
1089
0
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned int, 32ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 64ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 122ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, void*)
1090
1091
// Sentinel value for the start of marked elements.
1092
// Signals that there are no marked elements.
1093
constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
1094
1095
// Process probed elements that did not fit into available buffers.
1096
// We marked them in control bytes as kSentinel.
1097
// Hash recomputation and full probing is done here.
1098
// This use case should be extremely rare.
1099
ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(
1100
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,
1101
0
    void* old_slots, size_t start) {
1102
0
  size_t old_capacity = PreviousCapacity(c.capacity());
1103
0
  const size_t slot_size = policy.slot_size;
1104
0
  void* new_slots = c.slot_array();
1105
0
  size_t total_probe_length = 0;
1106
0
  const void* hash_fn = policy.hash_fn(c);
1107
0
  auto hash_slot = policy.hash_slot;
1108
0
  auto transfer_n = policy.transfer_n;
1109
0
  const size_t seed = c.seed().seed();
1110
0
  for (size_t old_index = start; old_index < old_capacity; ++old_index) {
1111
0
    if (old_ctrl[old_index] != ctrl_t::kSentinel) {
1112
0
      continue;
1113
0
    }
1114
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1115
0
    const size_t hash = hash_slot(hash_fn, src_slot, seed);
1116
0
    const FindInfo target = find_first_non_full(c, hash);
1117
0
    total_probe_length += target.probe_length;
1118
0
    const size_t new_i = target.offset;
1119
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1120
0
    SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
1121
0
    transfer_n(&c, dst_slot, src_slot, 1);
1122
0
  }
1123
0
  return total_probe_length;
1124
0
}
1125
1126
// The largest old capacity for which it is guaranteed that all probed elements
1127
// fit in ProbedItemEncoder's local buffer.
1128
// For such tables, `encode_probed_element` is trivial.
1129
constexpr size_t kMaxLocalBufferOldCapacity =
1130
    kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
1131
static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
1132
constexpr size_t kMaxLocalBufferNewCapacity =
1133
    NextCapacity(kMaxLocalBufferOldCapacity);
1134
static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
1135
static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
1136
              ProbedItem4Bytes::kMaxNewCapacity);
1137
1138
// Initializes mirrored control bytes after
1139
// transfer_unprobed_elements_to_next_capacity.
1140
0
void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
1141
0
  std::memcpy(new_ctrl + new_capacity,
1142
              // We own GrowthInfo just before control bytes. So it is ok
1143
              // to read one byte from it.
1144
0
              new_ctrl - 1, Group::kWidth);
1145
0
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1146
0
}
1147
1148
// Encodes probed elements into available memory.
1149
// At first, a local (on stack) buffer is used. The size of the buffer is
1150
// kProbedElementsBufferSize bytes.
1151
// When the local buffer is full, we switch to `control_` buffer. We are allowed
1152
// to overwrite `control_` buffer till the `source_offset` byte. In case we have
1153
// no space in `control_` buffer, we fallback to a naive algorithm for all the
1154
// rest of the probed elements. We mark elements as kSentinel in control bytes
1155
// and later process them fully. See ProcessMarkedElements for details. It
1156
// should be extremely rare.
1157
template <typename ProbedItemType,
1158
          // If true, we only use the local buffer and never switch to the
1159
          // control buffer.
1160
          bool kGuaranteedFitToBuffer = false>
1161
class ProbedItemEncoder {
1162
 public:
1163
  using ProbedItem = ProbedItemType;
1164
0
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
1165
1166
  // Encode item into the best available location.
1167
0
  void EncodeItem(ProbedItem item) {
1168
0
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1169
0
      return ProcessEncodeWithOverflow(item);
1170
0
    }
1171
0
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1172
0
    *pos_ = item;
1173
0
    ++pos_;
1174
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
1175
1176
  // Decodes information about probed elements from all available sources.
1177
  // Finds new position for each element and transfers it to the new slots.
1178
  // Returns the total probe length.
1179
  size_t DecodeAndInsertToTable(CommonFields& common,
1180
                                const PolicyFunctions& __restrict policy,
1181
0
                                void* old_slots) const {
1182
0
    if (pos_ == buffer_) {
1183
0
      return 0;
1184
0
    }
1185
0
    if constexpr (kGuaranteedFitToBuffer) {
1186
0
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1187
0
    }
1188
0
    size_t total_probe_length = DecodeAndInsertImpl(
1189
0
        common, policy, buffer_,
1190
0
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1191
0
    if (!local_buffer_full_) {
1192
0
      return total_probe_length;
1193
0
    }
1194
0
    total_probe_length +=
1195
0
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1196
0
    return total_probe_length;
1197
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1198
1199
 private:
1200
0
  static ProbedItem* AlignToNextItem(void* ptr) {
1201
0
    return reinterpret_cast<ProbedItem*>(AlignUpTo(
1202
0
        reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
1203
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::AlignToNextItem(void*)
1204
1205
0
  ProbedItem* OverflowBufferStart() const {
1206
    // We reuse GrowthInfo memory as well.
1207
0
    return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
1208
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::OverflowBufferStart() const
1209
1210
  // Encodes item when previously allocated buffer is full.
1211
  // At first that happens when local buffer is full.
1212
  // We switch from the local buffer to the control buffer.
1213
  // Every time this function is called, the available buffer is extended till
1214
  // `item.source_offset` byte in the control buffer.
1215
  // After the buffer is extended, this function wouldn't be called till the
1216
  // buffer is exhausted.
1217
  //
1218
  // If there's no space in the control buffer, we fallback to naive algorithm
1219
  // and mark probed elements as kSentinel in the control buffer. In this case,
1220
  // we will call this function for every subsequent probed element.
1221
0
  ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
1222
0
    if (!local_buffer_full_) {
1223
0
      local_buffer_full_ = true;
1224
0
      pos_ = OverflowBufferStart();
1225
0
    }
1226
0
    const size_t source_offset = static_cast<size_t>(item.source_offset);
1227
    // We are in fallback mode so we can't reuse control buffer anymore.
1228
    // Probed elements are marked as kSentinel in the control buffer.
1229
0
    if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
1230
0
                           kNoMarkedElementsSentinel)) {
1231
0
      control_[source_offset] = ctrl_t::kSentinel;
1232
0
      return;
1233
0
    }
1234
    // Refresh the end pointer to the new available position.
1235
    // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
1236
    // to write.
1237
0
    end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
1238
0
    if (ABSL_PREDICT_TRUE(pos_ < end_)) {
1239
0
      *pos_ = item;
1240
0
      ++pos_;
1241
0
      return;
1242
0
    }
1243
0
    control_[source_offset] = ctrl_t::kSentinel;
1244
0
    marked_elements_starting_position_ = source_offset;
1245
    // Now we will always fall down to `ProcessEncodeWithOverflow`.
1246
0
    ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
1247
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
1248
1249
  // Decodes information about probed elements from control buffer and processes
1250
  // marked elements.
1251
  // Finds new position for each element and transfers it to the new slots.
1252
  // Returns the total probe length.
1253
  ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
1254
      CommonFields& common, const PolicyFunctions& __restrict policy,
1255
0
      void* old_slots) const {
1256
0
    ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
1257
0
                           "must not be called when local buffer is not full");
1258
0
    size_t total_probe_length = DecodeAndInsertImpl(
1259
0
        common, policy, OverflowBufferStart(), pos_, old_slots);
1260
0
    if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
1261
0
                          kNoMarkedElementsSentinel)) {
1262
0
      return total_probe_length;
1263
0
    }
1264
0
    total_probe_length +=
1265
0
        ProcessProbedMarkedElements(common, policy, control_, old_slots,
1266
0
                                    marked_elements_starting_position_);
1267
0
    return total_probe_length;
1268
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1269
1270
  static constexpr size_t kBufferSize =
1271
      kProbedElementsBufferSize / sizeof(ProbedItem);
1272
  ProbedItem buffer_[kBufferSize];
1273
  // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
1274
  // otherwise, they're in the overflow buffer.
1275
  ProbedItem* pos_ = buffer_;
1276
  const void* end_ = buffer_ + kBufferSize;
1277
  ctrl_t* const control_;
1278
  size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
1279
  bool local_buffer_full_ = false;
1280
};
1281
1282
// Grows to next capacity with specified encoder type.
1283
// Encoder is used to store probed elements that are processed later.
1284
// Different encoder is used depending on the capacity of the table.
1285
// Returns total probe length.
1286
template <typename Encoder>
1287
size_t GrowToNextCapacity(CommonFields& common,
1288
                          const PolicyFunctions& __restrict policy,
1289
0
                          ctrl_t* old_ctrl, void* old_slots) {
1290
0
  using ProbedItem = typename Encoder::ProbedItem;
1291
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1292
0
  Encoder encoder(old_ctrl);
1293
0
  policy.transfer_unprobed_elements_to_next_capacity(
1294
0
      common, old_ctrl, old_slots, &encoder,
1295
0
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1296
0
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1297
0
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1298
0
      });
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
1299
0
  InitializeMirroredControlBytes(common.control(), common.capacity());
1300
0
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1301
0
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
1302
1303
// Grows to next capacity for relatively small tables so that even if all
1304
// elements are probed, we don't need to overflow the local buffer.
1305
// Returns total probe length.
1306
size_t GrowToNextCapacityThatFitsInLocalBuffer(
1307
    CommonFields& common, const PolicyFunctions& __restrict policy,
1308
0
    ctrl_t* old_ctrl, void* old_slots) {
1309
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
1310
0
  return GrowToNextCapacity<
1311
0
      ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
1312
0
      common, policy, old_ctrl, old_slots);
1313
0
}
1314
1315
// Grows to next capacity with different encodings. Returns total probe length.
1316
// These functions are useful to simplify profile analysis.
1317
size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
1318
                                       const PolicyFunctions& __restrict policy,
1319
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1320
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
1321
0
      common, policy, old_ctrl, old_slots);
1322
0
}
1323
size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
1324
                                       const PolicyFunctions& __restrict policy,
1325
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1326
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
1327
0
      common, policy, old_ctrl, old_slots);
1328
0
}
1329
size_t GrowToNextCapacity16BytesEncoder(
1330
    CommonFields& common, const PolicyFunctions& __restrict policy,
1331
0
    ctrl_t* old_ctrl, void* old_slots) {
1332
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
1333
0
      common, policy, old_ctrl, old_slots);
1334
0
}
1335
1336
// Grows to next capacity for tables with relatively large capacity so that we
1337
// can't guarantee that all probed elements fit in the local buffer. Returns
1338
// total probe length.
1339
size_t GrowToNextCapacityOverflowLocalBuffer(
1340
    CommonFields& common, const PolicyFunctions& __restrict policy,
1341
0
    ctrl_t* old_ctrl, void* old_slots) {
1342
0
  const size_t new_capacity = common.capacity();
1343
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
1344
0
    return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
1345
0
  }
1346
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
1347
0
    return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
1348
0
  }
1349
  // 16 bytes encoding supports the maximum swisstable capacity.
1350
0
  return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
1351
0
}
1352
1353
// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
1354
// capacity of the table. Returns total probe length.
1355
ABSL_ATTRIBUTE_NOINLINE
1356
size_t GrowToNextCapacityDispatch(CommonFields& common,
1357
                                  const PolicyFunctions& __restrict policy,
1358
0
                                  ctrl_t* old_ctrl, void* old_slots) {
1359
0
  const size_t new_capacity = common.capacity();
1360
0
  if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
1361
0
    return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
1362
0
                                                   old_slots);
1363
0
  } else {
1364
0
    return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
1365
0
                                                 old_slots);
1366
0
  }
1367
0
}
1368
1369
void IncrementSmallSizeNonSoo(CommonFields& common,
1370
0
                              const PolicyFunctions& __restrict policy) {
1371
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1372
0
  common.increment_size();
1373
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
1374
0
}
1375
1376
void IncrementSmallSize(CommonFields& common,
1377
0
                        const PolicyFunctions& __restrict policy) {
1378
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1379
0
  if (policy.soo_enabled) {
1380
0
    common.set_full_soo();
1381
0
  } else {
1382
0
    IncrementSmallSizeNonSoo(common, policy);
1383
0
  }
1384
0
}
1385
1386
std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(
1387
    CommonFields& common, const PolicyFunctions& __restrict policy,
1388
2
    absl::FunctionRef<size_t(size_t)> get_hash) {
1389
  // TODO(b/413062340): Refactor to reuse more code with
1390
  // GrowSooTableToNextCapacityAndPrepareInsert.
1391
2
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);
1392
2
  ABSL_SWISSTABLE_ASSERT(!common.empty());
1393
2
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1394
2
  constexpr size_t kOldCapacity = 1;
1395
2
  constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);
1396
2
  ctrl_t* old_ctrl = common.control();
1397
2
  void* old_slots = common.slot_array();
1398
1399
2
  const size_t slot_size = policy.slot_size;
1400
2
  const size_t slot_align = policy.slot_align;
1401
2
  void* alloc = policy.get_char_alloc(common);
1402
2
  HashtablezInfoHandle infoz = common.infoz();
1403
2
  const bool has_infoz = infoz.IsSampled();
1404
2
  common.set_capacity(kNewCapacity);
1405
1406
2
  const auto [new_ctrl, new_slots] =
1407
2
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1408
2
  common.set_control(new_ctrl);
1409
2
  common.set_slots(new_slots);
1410
2
  SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);
1411
1412
2
  if (ABSL_PREDICT_TRUE(!has_infoz)) {
1413
    // When we're sampled, we already have a seed.
1414
2
    common.generate_new_seed(/*has_infoz=*/false);
1415
2
  }
1416
2
  const size_t new_hash = get_hash(common.seed().seed());
1417
2
  h2_t new_h2 = H2(new_hash);
1418
2
  size_t orig_hash =
1419
2
      policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed());
1420
2
  size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1421
2
  InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);
1422
1423
2
  void* old_element_target = NextSlot(new_slots, slot_size);
1424
2
  SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);
1425
2
  policy.transfer_n(&common, old_element_target, old_slots, 1);
1426
1427
2
  void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);
1428
2
  SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);
1429
1430
2
  policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,
1431
2
                 has_infoz);
1432
2
  PrepareInsertCommon(common);
1433
2
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1434
2
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1435
1436
2
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1437
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);
1438
0
  }
1439
2
  return {new_ctrl + offset, new_element_target_slot};
1440
2
}
1441
1442
// Grows to next capacity and prepares insert for the given new_hash.
1443
// Returns the offset of the new element.
1444
size_t GrowToNextCapacityAndPrepareInsert(
1445
    CommonFields& common, const PolicyFunctions& __restrict policy,
1446
4
    size_t new_hash) {
1447
4
  ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
1448
4
  const size_t old_capacity = common.capacity();
1449
4
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1450
4
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));
1451
1452
4
  const size_t new_capacity = NextCapacity(old_capacity);
1453
4
  ctrl_t* old_ctrl = common.control();
1454
4
  void* old_slots = common.slot_array();
1455
1456
4
  common.set_capacity(new_capacity);
1457
4
  const size_t slot_size = policy.slot_size;
1458
4
  const size_t slot_align = policy.slot_align;
1459
4
  void* alloc = policy.get_char_alloc(common);
1460
4
  HashtablezInfoHandle infoz = common.infoz();
1461
4
  const bool has_infoz = infoz.IsSampled();
1462
1463
4
  const auto [new_ctrl, new_slots] =
1464
4
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
1465
4
  common.set_control(new_ctrl);
1466
4
  common.set_slots(new_slots);
1467
4
  SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
1468
1469
4
  h2_t new_h2 = H2(new_hash);
1470
4
  size_t total_probe_length = 0;
1471
4
  FindInfo find_info;
1472
4
  if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
1473
4
    size_t offset;
1474
4
    GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
1475
4
                                           new_capacity);
1476
    // We put the new element either at the beginning or at the end of the
1477
    // table with approximately equal probability.
1478
4
    offset =
1479
4
        SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;
1480
1481
4
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
1482
4
    SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
1483
4
    find_info = FindInfo{offset, 0};
1484
    // Single group tables have all slots full on resize. So we can transfer
1485
    // all slots without checking the control bytes.
1486
4
    ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
1487
4
    void* target = NextSlot(new_slots, slot_size);
1488
4
    SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
1489
4
    policy.transfer_n(&common, target, old_slots, old_capacity);
1490
4
  } else {
1491
0
    total_probe_length =
1492
0
        GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
1493
0
    find_info = find_first_non_full(common, new_hash);
1494
0
    SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
1495
0
  }
1496
4
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1497
4
  (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
1498
4
                    has_infoz);
1499
4
  PrepareInsertCommon(common);
1500
4
  ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
1501
4
                  common.size());
1502
1503
4
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1504
0
    ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,
1505
0
                        find_info.probe_length);
1506
0
  }
1507
4
  return find_info.offset;
1508
4
}
1509
1510
}  // namespace
1511
1512
std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(
1513
    CommonFields& common, const PolicyFunctions& __restrict policy,
1514
4
    absl::FunctionRef<size_t(size_t)> get_hash) {
1515
4
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1516
4
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1517
4
  if (common.capacity() == 1) {
1518
2
    if (common.empty()) {
1519
0
      IncrementSmallSizeNonSoo(common, policy);
1520
0
      return {SooControl(), common.slot_array()};
1521
2
    } else {
1522
2
      return Grow1To3AndPrepareInsert(common, policy, get_hash);
1523
2
    }
1524
2
  }
1525
1526
  // Growing from 0 to 1 capacity.
1527
2
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);
1528
2
  constexpr size_t kNewCapacity = 1;
1529
1530
2
  common.set_capacity(kNewCapacity);
1531
2
  HashtablezInfoHandle infoz;
1532
2
  const bool should_sample =
1533
2
      policy.is_hashtablez_eligible && ShouldSampleNextTable();
1534
2
  if (ABSL_PREDICT_FALSE(should_sample)) {
1535
0
    infoz = ForcedTrySample(policy.slot_size, policy.key_size,
1536
0
                            policy.value_size, policy.soo_capacity());
1537
0
  }
1538
2
  const bool has_infoz = infoz.IsSampled();
1539
2
  void* alloc = policy.get_char_alloc(common);
1540
1541
2
  const auto [new_ctrl, new_slots] =
1542
2
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1543
2
  common.set_control(new_ctrl);
1544
2
  common.set_slots(new_slots);
1545
1546
2
  static_assert(NextCapacity(0) == 1);
1547
2
  PrepareInsertCommon(common);
1548
1549
2
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1550
0
    common.generate_new_seed(/*has_infoz=*/true);
1551
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz,
1552
0
                                        get_hash(common.seed().seed()));
1553
0
  }
1554
2
  return {SooControl(), new_slots};
1555
2
}
1556
1557
namespace {
1558
1559
// Called whenever the table needs to vacate empty slots either by removing
1560
// tombstones via rehash or growth to next capacity.
1561
ABSL_ATTRIBUTE_NOINLINE
1562
size_t RehashOrGrowToNextCapacityAndPrepareInsert(
1563
    CommonFields& common, const PolicyFunctions& __restrict policy,
1564
0
    size_t new_hash) {
1565
0
  const size_t cap = common.capacity();
1566
0
  ABSL_ASSUME(cap > 0);
1567
0
  if (cap > Group::kWidth &&
1568
      // Do these calculations in 64-bit to avoid overflow.
1569
0
      common.size() * uint64_t{32} <= cap * uint64_t{25}) {
1570
    // Squash DELETED without growing if there is enough capacity.
1571
    //
1572
    // Rehash in place if the current size is <= 25/32 of capacity.
1573
    // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
1574
    // faster than resize, and 2) it takes quite a bit of work to add
1575
    // tombstones.  In the worst case, seems to take approximately 4
1576
    // insert/erase pairs to create a single tombstone and so if we are
1577
    // rehashing because of tombstones, we can afford to rehash-in-place as
1578
    // long as we are reclaiming at least 1/8 the capacity without doing more
1579
    // than 2X the work.  (Where "work" is defined to be size() for rehashing
1580
    // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
1581
    // place is faster per operation than inserting or even doubling the size
1582
    // of the table, so we actually afford to reclaim even less space from a
1583
    // resize-in-place.  The decision is to rehash in place if we can reclaim
1584
    // at about 1/8th of the usable capacity (specifically 3/28 of the
1585
    // capacity) which means that the total cost of rehashing will be a small
1586
    // fraction of the total work.
1587
    //
1588
    // Here is output of an experiment using the BM_CacheInSteadyState
1589
    // benchmark running the old case (where we rehash-in-place only if we can
1590
    // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
1591
    // if we can recover 3/32*capacity).
1592
    //
1593
    // Note that although in the worst-case number of rehashes jumped up from
1594
    // 15 to 190, but the number of operations per second is almost the same.
1595
    //
1596
    // Abridged output of running BM_CacheInSteadyState benchmark from
1597
    // raw_hash_set_benchmark.   N is the number of insert/erase operations.
1598
    //
1599
    //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
1600
    // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
1601
    //  448 | 145284       0.44        18 | 140118       0.44        19
1602
    //  493 | 152546       0.24        11 | 151417       0.48        28
1603
    //  538 | 151439       0.26        11 | 151152       0.53        38
1604
    //  583 | 151765       0.28        11 | 150572       0.57        50
1605
    //  628 | 150241       0.31        11 | 150853       0.61        66
1606
    //  672 | 149602       0.33        12 | 150110       0.66        90
1607
    //  717 | 149998       0.35        12 | 149531       0.70       129
1608
    //  762 | 149836       0.37        13 | 148559       0.74       190
1609
    //  807 | 149736       0.39        14 | 151107       0.39        14
1610
    //  852 | 150204       0.42        15 | 151019       0.42        15
1611
0
    return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);
1612
0
  } else {
1613
    // Otherwise grow the container.
1614
0
    return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);
1615
0
  }
1616
0
}
1617
1618
// Slow path for PrepareInsertLarge that is called when the table has deleted
1619
// slots or need to be resized or rehashed.
1620
size_t PrepareInsertLargeSlow(CommonFields& common,
1621
                              const PolicyFunctions& __restrict policy,
1622
4
                              size_t hash) {
1623
4
  const GrowthInfo growth_info = common.growth_info();
1624
4
  ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
1625
4
  if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
1626
    // Table without deleted slots (>95% cases) that needs to be resized.
1627
4
    ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
1628
4
                           growth_info.GetGrowthLeft() == 0);
1629
4
    return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
1630
4
  }
1631
0
  if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
1632
    // Table with deleted slots that needs to be rehashed or resized.
1633
0
    return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);
1634
0
  }
1635
  // Table with deleted slots that has space for the inserting element.
1636
0
  FindInfo target = find_first_non_full(common, hash);
1637
0
  PrepareInsertCommon(common);
1638
0
  common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
1639
0
  SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);
1640
0
  common.infoz().RecordInsertMiss(hash, target.probe_length);
1641
0
  return target.offset;
1642
0
}
1643
1644
// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
1645
// forces the table to be sampled and prepares the insert.
1646
// SOO tables need to switch from SOO to heap in order to store the infoz.
1647
// Requires:
1648
//   1. `c.capacity() == SooCapacity()`.
1649
//   2. `c.empty()`.
1650
ABSL_ATTRIBUTE_NOINLINE size_t
1651
GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1652
    CommonFields& common, const PolicyFunctions& __restrict policy,
1653
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1654
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
1655
0
                                   /*force_infoz=*/true);
1656
0
  PrepareInsertCommon(common);
1657
0
  common.growth_info().OverwriteEmptyAsFull();
1658
0
  const size_t new_hash = get_hash(common.seed().seed());
1659
0
  SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
1660
0
                            policy.slot_size);
1661
0
  common.infoz().RecordInsertMiss(new_hash, /*distance_from_desired=*/0);
1662
0
  return SooSlotIndex();
1663
0
}
1664
1665
// Resizes empty non-allocated table to the capacity to fit new_size elements.
1666
// Requires:
1667
//   1. `c.capacity() == policy.soo_capacity()`.
1668
//   2. `c.empty()`.
1669
//   3. `new_size > policy.soo_capacity()`.
1670
// The table will be attempted to be sampled.
1671
void ReserveEmptyNonAllocatedTableToFitNewSize(
1672
    CommonFields& common, const PolicyFunctions& __restrict policy,
1673
0
    size_t new_size) {
1674
0
  ValidateMaxSize(new_size, policy.slot_size);
1675
0
  ABSL_ASSUME(new_size > 0);
1676
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
1677
0
                                   /*force_infoz=*/false);
1678
  // This is after resize, to ensure that we have completed the allocation
1679
  // and have potentially sampled the hashtable.
1680
0
  common.infoz().RecordReservation(new_size);
1681
0
}
1682
1683
// Type erased version of raw_hash_set::reserve for tables that have an
1684
// allocated backing array.
1685
//
1686
// Requires:
1687
//   1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.
1688
// Reserving already allocated tables is considered to be a rare case.
1689
ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
1690
    CommonFields& common, const PolicyFunctions& __restrict policy,
1691
0
    size_t new_size) {
1692
0
  const size_t cap = common.capacity();
1693
0
  ValidateMaxSize(new_size, policy.slot_size);
1694
0
  ABSL_ASSUME(new_size > 0);
1695
0
  const size_t new_capacity = SizeToCapacity(new_size);
1696
0
  if (cap == policy.soo_capacity()) {
1697
0
    ABSL_SWISSTABLE_ASSERT(!common.empty());
1698
0
    ResizeFullSooTable(common, policy, new_capacity,
1699
0
                       ResizeFullSooTableSamplingMode::kNoSampling);
1700
0
  } else {
1701
0
    ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());
1702
    // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
1703
0
    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1704
0
  }
1705
0
  common.infoz().RecordReservation(new_size);
1706
0
}
1707
1708
// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
1709
// table to be sampled. SOO tables need to switch from SOO to heap in order to
1710
// store the infoz. No-op if sampling is disabled or not possible.
1711
void GrowFullSooTableToNextCapacityForceSampling(
1712
0
    CommonFields& common, const PolicyFunctions& __restrict policy) {
1713
0
  AssertFullSoo(common, policy);
1714
0
  ResizeFullSooTable(
1715
0
      common, policy, NextCapacity(SooCapacity()),
1716
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
1717
0
}
1718
1719
}  // namespace
1720
1721
10
void* GetRefForEmptyClass(CommonFields& common) {
1722
  // Empty base optimization typically make the empty base class address to be
1723
  // the same as the first address of the derived class object.
1724
  // But we generally assume that for empty classes we can return any valid
1725
  // pointer.
1726
10
  return &common;
1727
10
}
1728
1729
void ResizeAllocatedTableWithSeedChange(
1730
    CommonFields& common, const PolicyFunctions& __restrict policy,
1731
0
    size_t new_capacity) {
1732
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
1733
0
      common, policy, new_capacity, common.infoz());
1734
0
}
1735
1736
void ReserveEmptyNonAllocatedTableToFitBucketCount(
1737
    CommonFields& common, const PolicyFunctions& __restrict policy,
1738
0
    size_t bucket_count) {
1739
0
  size_t new_capacity = NormalizeCapacity(bucket_count);
1740
0
  ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);
1741
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1742
0
                                   /*force_infoz=*/false);
1743
0
}
1744
1745
// Resizes a full SOO table to the NextCapacity(SooCapacity()).
1746
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
1747
size_t GrowSooTableToNextCapacityAndPrepareInsert(
1748
    CommonFields& common, const PolicyFunctions& __restrict policy,
1749
0
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1750
0
  AssertSoo(common, policy);
1751
0
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1752
    // The table is empty, it is only used for forced sampling of SOO tables.
1753
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1754
0
        common, policy, get_hash);
1755
0
  }
1756
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1757
0
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1758
0
  const size_t slot_size = policy.slot_size;
1759
0
  void* alloc = policy.get_char_alloc(common);
1760
0
  common.set_capacity(kNewCapacity);
1761
1762
  // Since the table is not empty, it will not be sampled.
1763
  // The decision to sample was already made during the first insertion.
1764
  //
1765
  // We do not set control and slots in CommonFields yet to avoid overriding
1766
  // SOO data.
1767
0
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1768
0
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1769
1770
0
  PrepareInsertCommon(common);
1771
0
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1772
0
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1773
0
  common.generate_new_seed(/*has_infoz=*/false);
1774
0
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1775
0
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1776
0
  const size_t new_hash = get_hash(common.seed().seed());
1777
1778
0
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1779
0
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1780
0
                                      new_ctrl);
1781
1782
0
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1783
0
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1784
0
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1785
0
  if constexpr (TransferUsesMemcpy) {
1786
    // Target slot is placed at index 1, but capacity is at
1787
    // minimum 3. So we are allowed to copy at least twice as much
1788
    // memory.
1789
0
    static_assert(SooSlotIndex() == 1);
1790
0
    static_assert(SooSlotMemcpySize > 0);
1791
0
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1792
0
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1793
0
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1794
0
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1795
0
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1796
0
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1797
0
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1798
0
  } else {
1799
0
    static_assert(SooSlotMemcpySize == 0);
1800
0
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1801
0
  }
1802
0
  common.set_control(new_ctrl);
1803
0
  common.set_slots(new_slots);
1804
1805
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1806
  // have been resized to the next capacity.
1807
0
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1808
0
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1809
0
                                slot_size);
1810
0
  return offset;
1811
0
}
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<0ul, false>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<1ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<4ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<8ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<16ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
1812
1813
void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
1814
0
            size_t n) {
1815
0
  const size_t cap = common.capacity();
1816
1817
0
  auto clear_backing_array = [&]() {
1818
0
    ClearBackingArray(common, policy, policy.get_char_alloc(common),
1819
0
                      /*reuse=*/false, policy.soo_enabled);
1820
0
  };
1821
1822
0
  const size_t slot_size = policy.slot_size;
1823
1824
0
  if (n == 0) {
1825
0
    if (cap <= policy.soo_capacity()) return;
1826
0
    if (common.empty()) {
1827
0
      clear_backing_array();
1828
0
      return;
1829
0
    }
1830
0
    if (common.size() <= policy.soo_capacity()) {
1831
      // When the table is already sampled, we keep it sampled.
1832
0
      if (common.infoz().IsSampled()) {
1833
0
        static constexpr size_t kInitialSampledCapacity =
1834
0
            NextCapacity(SooCapacity());
1835
0
        if (cap > kInitialSampledCapacity) {
1836
0
          ResizeAllocatedTableWithSeedChange(common, policy,
1837
0
                                             kInitialSampledCapacity);
1838
0
        }
1839
        // This asserts that we didn't lose sampling coverage in `resize`.
1840
0
        ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
1841
0
        return;
1842
0
      }
1843
0
      ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
1844
0
      ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
1845
0
      HeapOrSoo tmp_slot;
1846
0
      size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
1847
0
      policy.transfer_n(
1848
0
          &common, &tmp_slot,
1849
0
          SlotAddress(common.slot_array(), begin_offset, slot_size), 1);
1850
0
      clear_backing_array();
1851
0
      policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);
1852
0
      common.set_full_soo();
1853
0
      return;
1854
0
    }
1855
0
  }
1856
1857
0
  ValidateMaxSize(n, policy.slot_size);
1858
  // bitor is a faster way of doing `max` here. We will round up to the next
1859
  // power-of-2-minus-1, so bitor is good enough.
1860
0
  const size_t new_capacity =
1861
0
      NormalizeCapacity(n | SizeToCapacity(common.size()));
1862
  // n == 0 unconditionally rehashes as per the standard.
1863
0
  if (n == 0 || new_capacity > cap) {
1864
0
    if (cap == policy.soo_capacity()) {
1865
0
      if (common.empty()) {
1866
0
        ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1867
0
                                         /*force_infoz=*/false);
1868
0
      } else {
1869
0
        ResizeFullSooTable(common, policy, new_capacity,
1870
0
                           ResizeFullSooTableSamplingMode::kNoSampling);
1871
0
      }
1872
0
    } else {
1873
0
      ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1874
0
    }
1875
    // This is after resize, to ensure that we have completed the allocation
1876
    // and have potentially sampled the hashtable.
1877
0
    common.infoz().RecordReservation(n);
1878
0
  }
1879
0
}
1880
1881
void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,
1882
          const CommonFields& other,
1883
0
          absl::FunctionRef<void(void*, const void*)> copy_fn) {
1884
0
  const size_t size = other.size();
1885
0
  ABSL_SWISSTABLE_ASSERT(size > 0);
1886
0
  const size_t soo_capacity = policy.soo_capacity();
1887
0
  const size_t slot_size = policy.slot_size;
1888
0
  const bool soo_enabled = policy.soo_enabled;
1889
0
  if (size == 1) {
1890
0
    if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1);
1891
0
    IncrementSmallSize(common, policy);
1892
0
    const size_t other_capacity = other.capacity();
1893
0
    const void* other_slot =
1894
0
        other_capacity <= soo_capacity ? other.soo_data()
1895
0
        : other.is_small()
1896
0
            ? other.slot_array()
1897
0
            : SlotAddress(other.slot_array(),
1898
0
                          FindFirstFullSlot(0, other_capacity, other.control()),
1899
0
                          slot_size);
1900
0
    copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot);
1901
1902
0
    if (soo_enabled && policy.is_hashtablez_eligible &&
1903
0
        ShouldSampleNextTable()) {
1904
0
      GrowFullSooTableToNextCapacityForceSampling(common, policy);
1905
0
    }
1906
0
    return;
1907
0
  }
1908
1909
0
  ReserveTableToFitNewSize(common, policy, size);
1910
0
  auto infoz = common.infoz();
1911
0
  ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);
1912
0
  const size_t cap = common.capacity();
1913
0
  ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);
1914
0
  size_t offset = cap;
1915
0
  const void* hash_fn = policy.hash_fn(common);
1916
0
  auto hasher = policy.hash_slot;
1917
0
  const size_t seed = common.seed().seed();
1918
0
  IterateOverFullSlotsImpl(
1919
0
      other, slot_size, [&](const ctrl_t*, void* that_slot) {
1920
        // The table is guaranteed to be empty, so we can do faster than
1921
        // a full `insert`.
1922
0
        const size_t hash = (*hasher)(hash_fn, that_slot, seed);
1923
0
        FindInfo target = find_first_non_full(common, hash);
1924
0
        infoz.RecordInsertMiss(hash, target.probe_length);
1925
0
        offset = target.offset;
1926
0
        SetCtrl(common, offset, H2(hash), slot_size);
1927
0
        copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);
1928
0
        common.maybe_increment_generation_on_insert();
1929
0
      });
1930
0
  common.increment_size(size);
1931
0
  common.growth_info().OverwriteManyEmptyAsFull(size);
1932
0
}
1933
1934
void ReserveTableToFitNewSize(CommonFields& common,
1935
                              const PolicyFunctions& __restrict policy,
1936
0
                              size_t new_size) {
1937
0
  common.reset_reserved_growth(new_size);
1938
0
  common.set_reservation_size(new_size);
1939
0
  ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());
1940
0
  const size_t cap = common.capacity();
1941
0
  if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) {
1942
0
    return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
1943
0
  }
1944
1945
0
  ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
1946
0
  ABSL_SWISSTABLE_ASSERT(cap > 0);
1947
0
  const size_t max_size_before_growth =
1948
0
      IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();
1949
0
  if (new_size <= max_size_before_growth) {
1950
0
    return;
1951
0
  }
1952
0
  ReserveAllocatedTable(common, policy, new_size);
1953
0
}
1954
1955
namespace {
1956
size_t PrepareInsertLargeImpl(CommonFields& common,
1957
                              const PolicyFunctions& __restrict policy,
1958
                              size_t hash,
1959
                              Group::NonIterableBitMaskType mask_empty,
1960
12
                              FindInfo target_group) {
1961
12
  ABSL_SWISSTABLE_ASSERT(!common.is_small());
1962
12
  const GrowthInfo growth_info = common.growth_info();
1963
  // When there are no deleted slots in the table
1964
  // and growth_left is positive, we can insert at the first
1965
  // empty slot in the probe sequence (target).
1966
12
  if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) {
1967
4
    return PrepareInsertLargeSlow(common, policy, hash);
1968
4
  }
1969
8
  PrepareInsertCommon(common);
1970
8
  common.growth_info().OverwriteEmptyAsFull();
1971
8
  target_group.offset += mask_empty.LowestBitSet();
1972
8
  target_group.offset &= common.capacity();
1973
8
  SetCtrl(common, target_group.offset, H2(hash), policy.slot_size);
1974
8
  common.infoz().RecordInsertMiss(hash, target_group.probe_length);
1975
8
  return target_group.offset;
1976
12
}
1977
}  // namespace
1978
1979
size_t PrepareInsertLarge(CommonFields& common,
1980
                          const PolicyFunctions& __restrict policy, size_t hash,
1981
                          Group::NonIterableBitMaskType mask_empty,
1982
12
                          FindInfo target_group) {
1983
  // NOLINTNEXTLINE(misc-static-assert)
1984
12
  ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled());
1985
12
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
1986
12
}
1987
1988
size_t PrepareInsertLargeGenerationsEnabled(
1989
    CommonFields& common, const PolicyFunctions& policy, size_t hash,
1990
    Group::NonIterableBitMaskType mask_empty, FindInfo target_group,
1991
0
    absl::FunctionRef<size_t(size_t)> recompute_hash) {
1992
  // NOLINTNEXTLINE(misc-static-assert)
1993
0
  ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled());
1994
0
  if (common.should_rehash_for_bug_detection_on_insert()) {
1995
    // Move to a different heap allocation in order to detect bugs.
1996
0
    const size_t cap = common.capacity();
1997
0
    ResizeAllocatedTableWithSeedChange(
1998
0
        common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));
1999
0
    hash = recompute_hash(common.seed().seed());
2000
0
    std::tie(target_group, mask_empty) =
2001
0
        find_first_non_full_group(common, hash);
2002
0
  }
2003
0
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
2004
0
}
2005
2006
namespace {
2007
// Returns true if the following is true
2008
// 1. OptimalMemcpySizeForSooSlotTransfer(left) >
2009
//    OptimalMemcpySizeForSooSlotTransfer(left - 1)
2010
// 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,
2011
// right].
2012
// This function is used to verify that we have all the possible template
2013
// instantiations for GrowFullSooTableToNextCapacity.
2014
// With this verification the problem may be detected at compile time instead of
2015
// link time.
2016
constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,
2017
0
                                                              size_t right) {
2018
0
  size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);
2019
0
  if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) {
2020
0
    return false;
2021
0
  }
2022
0
  for (size_t i = left + 1; i <= right; ++i) {
2023
0
    if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) {
2024
0
      return false;
2025
0
    }
2026
0
  }
2027
0
  return true;
2028
0
}
2029
}  // namespace
2030
2031
// Extern template instantiation for inline function.
2032
template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
2033
                                              size_t old_capacity,
2034
                                              ctrl_t* new_ctrl,
2035
                                              size_t new_capacity);
2036
2037
// We need to instantiate ALL possible template combinations because we define
2038
// the function in the cc file.
2039
template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
2040
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2041
    bool);
2042
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2043
    OptimalMemcpySizeForSooSlotTransfer(1), true>(
2044
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2045
    bool);
2046
2047
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
2048
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2049
    OptimalMemcpySizeForSooSlotTransfer(3), true>(
2050
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2051
    bool);
2052
2053
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
2054
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2055
    OptimalMemcpySizeForSooSlotTransfer(8), true>(
2056
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2057
    bool);
2058
2059
#if UINTPTR_MAX == UINT32_MAX
2060
static_assert(MaxSooSlotSize() == 8);
2061
#else
2062
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
2063
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2064
    OptimalMemcpySizeForSooSlotTransfer(16), true>(
2065
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2066
    bool);
2067
static_assert(MaxSooSlotSize() == 16);
2068
#endif
2069
2070
template void* AllocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2071
                                    std::allocator<char>>(void* alloc,
2072
                                                          size_t n);
2073
template void DeallocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2074
                                     std::allocator<char>>(
2075
    void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
2076
    size_t slot_align, bool had_infoz);
2077
2078
}  // namespace container_internal
2079
ABSL_NAMESPACE_END
2080
}  // namespace absl