Coverage Report

Created: 2025-11-24 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/container/internal/raw_hash_set.cc
Line
Count
Source
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/container/internal/raw_hash_set.h"
16
17
#include <atomic>
18
#include <cassert>
19
#include <cstddef>
20
#include <cstdint>
21
#include <cstring>
22
#include <memory>
23
#include <tuple>
24
#include <utility>
25
26
#include "absl/base/attributes.h"
27
#include "absl/base/config.h"
28
#include "absl/base/dynamic_annotations.h"
29
#include "absl/base/internal/endian.h"
30
#include "absl/base/internal/raw_logging.h"
31
#include "absl/base/optimization.h"
32
#include "absl/container/internal/container_memory.h"
33
#include "absl/container/internal/hashtable_control_bytes.h"
34
#include "absl/container/internal/hashtablez_sampler.h"
35
#include "absl/container/internal/raw_hash_set_resize_impl.h"
36
#include "absl/functional/function_ref.h"
37
#include "absl/hash/hash.h"
38
39
namespace absl {
40
ABSL_NAMESPACE_BEGIN
41
namespace container_internal {
42
43
// Represents a control byte corresponding to a full slot with arbitrary hash.
44
0
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
45
46
// A single control byte for default-constructed iterators. We leave it
47
// uninitialized because reading this memory is a bug.
48
ABSL_DLL ctrl_t kDefaultIterControl;
49
50
// We need one full byte followed by a sentinel byte for iterator::operator++.
51
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(),
52
                                                        ctrl_t::kSentinel};
53
54
namespace {
55
56
#ifdef ABSL_SWISSTABLE_ASSERT
57
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
58
#else
59
// We use this macro for assertions that users may see when the table is in an
60
// invalid state that sanitizers may help diagnose.
61
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
62
112
  assert((CONDITION) && "Try enabling sanitizers.")
63
#endif
64
65
0
[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
66
0
  ABSL_RAW_LOG(FATAL, "Hash table size overflow");
67
0
}
68
69
0
void ValidateMaxSize(size_t size, size_t slot_size) {
70
0
  if (IsAboveValidSize(size, slot_size)) {
71
0
    HashTableSizeOverflow();
72
0
  }
73
0
}
74
75
// Returns "random" seed.
76
0
inline size_t RandomSeed() {
77
0
#ifdef ABSL_HAVE_THREAD_LOCAL
78
0
  static thread_local size_t counter = 0;
79
0
  size_t value = ++counter;
80
#else   // ABSL_HAVE_THREAD_LOCAL
81
  static std::atomic<size_t> counter(0);
82
  size_t value = counter.fetch_add(1, std::memory_order_relaxed);
83
#endif  // ABSL_HAVE_THREAD_LOCAL
84
0
  return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
85
0
}
86
87
0
bool ShouldRehashForBugDetection(size_t capacity) {
88
  // Note: we can't use the abseil-random library because abseil-random
89
  // depends on swisstable. We want to return true with probability
90
  // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
91
  // we probe based on a random hash and see if the offset is less than
92
  // RehashProbabilityConstant().
93
0
  return probe(capacity, absl::HashOf(RandomSeed())).offset() <
94
0
         RehashProbabilityConstant();
95
0
}
96
97
// Find a non-deterministic hash for single group table.
98
// Last two bits are used to find a position for a newly inserted element after
99
// resize.
100
// This function basically using H2 last bits to save on shift operation.
101
6
size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
102
6
  return hash ^ seed.seed();
103
6
}
104
105
// Returns the offset of the new element after resize from capacity 1 to 3.
106
2
size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) {
107
  // After resize from capacity 1 to 3, we always have exactly the slot with
108
  // index 1 occupied, so we need to insert either at index 0 or index 2.
109
2
  static_assert(SooSlotIndex() == 1);
110
2
  return SingleGroupTableH1(hash, seed) & 2;
111
2
}
112
113
// Returns the address of the ith slot in slots where each slot occupies
114
// slot_size.
115
2
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
116
2
  return static_cast<void*>(static_cast<char*>(slot_array) +
117
2
                            (slot * slot_size));
118
2
}
119
120
// Returns the address of the slot `i` iterations after `slot` assuming each
121
// slot has the specified size.
122
6
inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) {
123
6
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +
124
6
                                 slot_size * i);
125
6
}
126
127
// Returns the address of the slot just before `slot` assuming each slot has the
128
// specified size.
129
0
inline void* PrevSlot(void* slot, size_t slot_size) {
130
0
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
131
0
}
132
133
}  // namespace
134
135
// Must be defined out-of-line to avoid MSVC error C2482 on some platforms,
136
// which is caused by non-constexpr initialization.
137
2
uint16_t HashtableSize::NextSeed() {
138
2
  static_assert(PerTableSeed::kBitCount == 16);
139
2
  thread_local uint16_t seed =
140
2
      static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
141
2
  seed += uint16_t{0xad53};
142
2
  return seed;
143
2
}
144
145
0
GenerationType* EmptyGeneration() {
146
0
  if (SwisstableGenerationsEnabled()) {
147
0
    constexpr size_t kNumEmptyGenerations = 1024;
148
0
    static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
149
0
    return const_cast<GenerationType*>(
150
0
        &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
151
0
  }
152
0
  return nullptr;
153
0
}
154
155
bool CommonFieldsGenerationInfoEnabled::
156
0
    should_rehash_for_bug_detection_on_insert(size_t capacity) const {
157
0
  if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
158
0
  if (reserved_growth_ > 0) return false;
159
0
  return ShouldRehashForBugDetection(capacity);
160
0
}
161
162
bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
163
0
    size_t capacity) const {
164
0
  return ShouldRehashForBugDetection(capacity);
165
0
}
166
167
namespace {
168
169
// Probes an array of control bits using a probe sequence,
170
// and returns the mask corresponding to the first group with a deleted or empty
171
// slot.
172
inline Group::NonIterableBitMaskType probe_till_first_non_full_group(
173
    const ctrl_t* ctrl, probe_seq<Group::kWidth>& seq,
174
0
    [[maybe_unused]] size_t capacity) {
175
0
  while (true) {
176
0
    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
177
0
    auto mask = g.MaskEmptyOrDeleted();
178
0
    if (mask) {
179
0
      return mask;
180
0
    }
181
0
    seq.next();
182
0
    ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
183
0
  }
184
0
}
185
186
FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
187
0
                                     size_t capacity) {
188
0
  auto seq = probe_h1(capacity, h1);
189
0
  if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
190
0
    return {seq.offset(), /*probe_length=*/0};
191
0
  }
192
0
  auto mask = probe_till_first_non_full_group(ctrl, seq, capacity);
193
0
  return {seq.offset(mask.LowestBitSet()), seq.index()};
194
0
}
195
196
// Probes an array of control bits using a probe sequence derived from `hash`,
197
// and returns the offset corresponding to the first deleted or empty slot.
198
//
199
// Behavior when the entire table is full is undefined.
200
//
201
// NOTE: this function must work with tables having both empty and deleted
202
// slots in the same group. Such tables appear during `erase()`.
203
0
FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
204
0
  return find_first_non_full_from_h1(common.control(), H1(hash),
205
0
                                     common.capacity());
206
0
}
207
208
// Same as `find_first_non_full`, but returns the mask corresponding to the
209
// first group with a deleted or empty slot.
210
std::pair<FindInfo, Group::NonIterableBitMaskType> find_first_non_full_group(
211
0
    const CommonFields& common, size_t hash) {
212
0
  auto seq = probe(common, hash);
213
0
  auto mask =
214
0
      probe_till_first_non_full_group(common.control(), seq, common.capacity());
215
0
  return {{seq.offset(), seq.index()}, mask};
216
0
}
217
218
// Whether a table fits in half a group. A half-group table fits entirely into a
219
// probing group, i.e., has a capacity < `Group::kWidth`.
220
//
221
// In half-group mode we are able to use the whole capacity. The extra control
222
// bytes give us at least one "empty" control byte to stop the iteration.
223
// This is important to make 1 a valid capacity.
224
//
225
// In half-group mode only the first `capacity` control bytes after the sentinel
226
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
227
// represent a real slot.
228
0
constexpr bool is_half_group(size_t capacity) {
229
0
  return capacity < Group::kWidth - 1;
230
0
}
231
232
template <class Fn>
233
0
void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) {
234
0
  const size_t cap = c.capacity();
235
0
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap));
236
0
  const ctrl_t* ctrl = c.control();
237
0
  void* slot = c.slot_array();
238
0
  if (is_half_group(cap)) {
239
    // Mirrored/cloned control bytes in half-group table are also located in the
240
    // first group (starting from position 0). We are taking group from position
241
    // `capacity` in order to avoid duplicates.
242
243
    // Half-group tables capacity fits into portable group, where
244
    // GroupPortableImpl::MaskFull is more efficient for the
245
    // capacity <= GroupPortableImpl::kWidth.
246
0
    ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
247
0
                           "unexpectedly large half-group capacity");
248
0
    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
249
0
                  "unexpected group width");
250
    // Group starts from kSentinel slot, so indices in the mask will
251
    // be increased by 1.
252
0
    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
253
0
    --ctrl;
254
0
    slot = PrevSlot(slot, slot_size);
255
0
    for (uint32_t i : mask) {
256
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
257
0
    }
258
0
    return;
259
0
  }
260
0
  size_t remaining = c.size();
261
0
  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
262
0
  while (remaining != 0) {
263
0
    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
264
0
      ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
265
0
                             "hash table was modified unexpectedly");
266
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
267
0
      --remaining;
268
0
    }
269
0
    ctrl += Group::kWidth;
270
0
    slot = NextSlot(slot, slot_size, Group::kWidth);
271
0
    ABSL_SWISSTABLE_ASSERT(
272
0
        (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
273
0
        "hash table was modified unexpectedly");
274
0
  }
275
  // NOTE: erasure of the current element is allowed in callback for
276
  // absl::erase_if specialization. So we use `>=`.
277
0
  ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
278
0
                         "hash table was modified unexpectedly");
279
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)> >(absl::container_internal::CommonFields const&, unsigned long, absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)>)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0>(absl::container_internal::CommonFields const&, unsigned long, absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0)
280
281
}  // namespace
282
283
0
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
284
0
  ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
285
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
286
0
  for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
287
0
    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
288
0
  }
289
  // Copy the cloned ctrl bytes.
290
0
  std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
291
0
  ctrl[capacity] = ctrl_t::kSentinel;
292
0
}
293
294
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
295
0
                          absl::FunctionRef<void(const ctrl_t*, void*)> cb) {
296
0
  IterateOverFullSlotsImpl(c, slot_size, cb);
297
0
}
298
299
namespace {
300
301
4
void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
302
4
  growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
303
4
}
304
305
0
void ResetGrowthLeft(CommonFields& common) {
306
0
  ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
307
0
}
308
309
// Finds guaranteed to exists empty slot from the given position.
310
// NOTE: this function is almost never triggered inside of the
311
// DropDeletesWithoutResize, so we keep it simple.
312
// The table is rather sparse, so empty slot will be found very quickly.
313
0
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
314
0
  for (size_t i = start; i < end; ++i) {
315
0
    if (IsEmpty(ctrl[i])) {
316
0
      return i;
317
0
    }
318
0
  }
319
0
  ABSL_UNREACHABLE();
320
0
}
321
322
// Finds guaranteed to exist full slot starting from the given position.
323
// NOTE: this function is only triggered for rehash(0), when we need to
324
// go back to SOO state, so we keep it simple.
325
0
size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) {
326
0
  for (size_t i = start; i < end; ++i) {
327
0
    if (IsFull(ctrl[i])) {
328
0
      return i;
329
0
    }
330
0
  }
331
0
  ABSL_UNREACHABLE();
332
0
}
333
334
16
void PrepareInsertCommon(CommonFields& common) {
335
16
  common.increment_size();
336
16
  common.maybe_increment_generation_on_insert();
337
16
}
338
339
// Sets sanitizer poisoning for slot corresponding to control byte being set.
340
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
341
12
                                size_t slot_size) {
342
12
  ABSL_SWISSTABLE_ASSERT(i < c.capacity());
343
12
  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
344
12
  if (IsFull(h)) {
345
12
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
346
12
  } else {
347
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
348
0
  }
349
12
}
350
351
// Sets `ctrl[i]` to `h`.
352
//
353
// Unlike setting it directly, this function will perform bounds checks and
354
// mirror the value to the cloned tail if necessary.
355
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
356
8
                    size_t slot_size) {
357
8
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
358
8
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
359
8
  ctrl_t* ctrl = c.control();
360
8
  ctrl[i] = h;
361
8
  ctrl[((i - NumClonedBytes()) & c.capacity()) +
362
8
       (NumClonedBytes() & c.capacity())] = h;
363
8
}
364
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
365
8
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
366
8
  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
367
8
}
368
369
// Like SetCtrl, but in a single group table, we can save some operations when
370
// setting the cloned control byte.
371
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
372
4
                                      size_t slot_size) {
373
4
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
374
4
  ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
375
4
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
376
4
  ctrl_t* ctrl = c.control();
377
4
  ctrl[i] = h;
378
4
  ctrl[i + c.capacity() + 1] = h;
379
4
}
380
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
381
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
382
4
                                      size_t slot_size) {
383
4
  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
384
4
}
385
386
// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
387
// we can save some operations when setting the cloned control byte.
388
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
389
0
                                size_t slot_size) {
390
0
  ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
391
0
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
392
0
  ctrl_t* ctrl = c.control();
393
0
  ctrl[i] = h;
394
0
  ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
395
0
}
396
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
397
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
398
0
                                size_t slot_size) {
399
0
  SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
400
0
}
401
402
size_t DropDeletesWithoutResizeAndPrepareInsert(
403
    CommonFields& common, const PolicyFunctions& __restrict policy,
404
0
    size_t new_hash) {
405
0
  void* set = &common;
406
0
  void* slot_array = common.slot_array();
407
0
  const size_t capacity = common.capacity();
408
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
409
0
  ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
410
  // Algorithm:
411
  // - mark all DELETED slots as EMPTY
412
  // - mark all FULL slots as DELETED
413
  // - for each slot marked as DELETED
414
  //     hash = Hash(element)
415
  //     target = find_first_non_full(hash)
416
  //     if target is in the same group
417
  //       mark slot as FULL
418
  //     else if target is EMPTY
419
  //       transfer element to target
420
  //       mark slot as EMPTY
421
  //       mark target as FULL
422
  //     else if target is DELETED
423
  //       swap current element with target element
424
  //       mark target as FULL
425
  //       repeat procedure for current slot with moved from element (target)
426
0
  ctrl_t* ctrl = common.control();
427
0
  ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
428
0
  const void* hash_fn = policy.hash_fn(common);
429
0
  auto hasher = policy.hash_slot;
430
0
  auto transfer_n = policy.transfer_n;
431
0
  const size_t slot_size = policy.slot_size;
432
433
0
  size_t total_probe_length = 0;
434
0
  void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
435
436
  // The index of an empty slot that can be used as temporary memory for
437
  // the swap operation.
438
0
  constexpr size_t kUnknownId = ~size_t{};
439
0
  size_t tmp_space_id = kUnknownId;
440
441
0
  for (size_t i = 0; i != capacity;
442
0
       ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
443
0
    ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
444
0
    if (IsEmpty(ctrl[i])) {
445
0
      tmp_space_id = i;
446
0
      continue;
447
0
    }
448
0
    if (!IsDeleted(ctrl[i])) continue;
449
0
    const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed());
450
0
    const FindInfo target = find_first_non_full(common, hash);
451
0
    const size_t new_i = target.offset;
452
0
    total_probe_length += target.probe_length;
453
454
    // Verify if the old and new i fall within the same group wrt the hash.
455
    // If they do, we don't need to move the object as it falls already in the
456
    // best probe we can.
457
0
    const size_t probe_offset = probe(common, hash).offset();
458
0
    const h2_t h2 = H2(hash);
459
0
    const auto probe_index = [probe_offset, capacity](size_t pos) {
460
0
      return ((pos - probe_offset) & capacity) / Group::kWidth;
461
0
    };
462
463
    // Element doesn't move.
464
0
    if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
465
0
      SetCtrlInLargeTable(common, i, h2, slot_size);
466
0
      continue;
467
0
    }
468
469
0
    void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
470
0
    if (IsEmpty(ctrl[new_i])) {
471
      // Transfer element to the empty spot.
472
      // SetCtrl poisons/unpoisons the slots so we have to call it at the
473
      // right time.
474
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
475
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
476
0
      SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);
477
      // Initialize or change empty space id.
478
0
      tmp_space_id = i;
479
0
    } else {
480
0
      ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
481
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
482
      // Until we are done rehashing, DELETED marks previously FULL slots.
483
484
0
      if (tmp_space_id == kUnknownId) {
485
0
        tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
486
0
      }
487
0
      void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
488
0
      SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
489
490
      // Swap i and new_i elements.
491
0
      (*transfer_n)(set, tmp_space, new_slot_ptr, 1);
492
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
493
0
      (*transfer_n)(set, slot_ptr, tmp_space, 1);
494
495
0
      SanitizerPoisonMemoryRegion(tmp_space, slot_size);
496
497
      // repeat the processing of the ith slot
498
0
      --i;
499
0
      slot_ptr = PrevSlot(slot_ptr, slot_size);
500
0
    }
501
0
  }
502
  // Prepare insert for the new element.
503
0
  PrepareInsertCommon(common);
504
0
  ResetGrowthLeft(common);
505
0
  FindInfo find_info = find_first_non_full(common, new_hash);
506
0
  SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);
507
0
  common.infoz().RecordInsertMiss(new_hash, find_info.probe_length);
508
0
  common.infoz().RecordRehash(total_probe_length);
509
0
  return find_info.offset;
510
0
}
511
512
0
bool WasNeverFull(CommonFields& c, size_t index) {
513
0
  if (is_single_group(c.capacity())) {
514
0
    return true;
515
0
  }
516
0
  const size_t index_before = (index - Group::kWidth) & c.capacity();
517
0
  const auto empty_after = Group(c.control() + index).MaskEmpty();
518
0
  const auto empty_before = Group(c.control() + index_before).MaskEmpty();
519
520
  // We count how many consecutive non empties we have to the right and to the
521
  // left of `it`. If the sum is >= kWidth then there is at least one probe
522
  // window that might have seen a full group.
523
0
  return empty_before && empty_after &&
524
0
         static_cast<size_t>(empty_after.TrailingZeros()) +
525
0
                 empty_before.LeadingZeros() <
526
0
             Group::kWidth;
527
0
}
528
529
// Updates the control bytes to indicate a completely empty table such that all
530
// control bytes are kEmpty except for the kSentinel byte.
531
0
void ResetCtrl(CommonFields& common, size_t slot_size) {
532
0
  const size_t capacity = common.capacity();
533
0
  ctrl_t* ctrl = common.control();
534
0
  static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
535
0
  if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
536
0
    if (IsSmallCapacity(capacity)) return;
537
0
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
538
0
    std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
539
0
                Group::kWidth);
540
0
    if (capacity == kTwoGroupCapacity) {
541
0
      std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),
542
0
                  Group::kWidth);
543
0
    }
544
0
  } else {
545
0
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
546
0
                capacity + 1 + NumClonedBytes());
547
0
  }
548
0
  ctrl[capacity] = ctrl_t::kSentinel;
549
0
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
550
0
}
551
552
// Initializes control bytes for growing from capacity 1 to 3.
553
// `orig_h2` is placed in the position `SooSlotIndex()`.
554
// `new_h2` is placed in the position `new_offset`.
555
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes(
556
2
    h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) {
557
2
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
558
2
  static_assert(kNewCapacity == 3);
559
2
  static_assert(is_single_group(kNewCapacity));
560
2
  static_assert(SooSlotIndex() == 1);
561
2
  ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
562
563
2
  static constexpr uint64_t kEmptyXorSentinel =
564
2
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
565
2
      static_cast<uint8_t>(ctrl_t::kSentinel);
566
2
  static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
567
2
  static constexpr size_t kMirroredSooSlotIndex =
568
2
      SooSlotIndex() + kNewCapacity + 1;
569
  // The first 8 bytes, where SOO slot original and mirrored positions are
570
  // replaced with 0.
571
  // Result will look like: E0ESE0EE
572
2
  static constexpr uint64_t kFirstCtrlBytesWithZeroes =
573
2
      k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
574
2
      (kEmptyXorSentinel << (8 * kNewCapacity)) ^
575
2
      (kEmpty64 << (8 * kMirroredSooSlotIndex));
576
577
2
  const uint64_t soo_h2 = static_cast<uint64_t>(orig_h2);
578
2
  const uint64_t new_h2_xor_empty =
579
2
      static_cast<uint64_t>(new_h2 ^ static_cast<uint8_t>(ctrl_t::kEmpty));
580
  // Fill the original and mirrored bytes for SOO slot.
581
  // Result will look like:
582
  // EHESEHEE
583
  // Where H = soo_h2, E = kEmpty, S = kSentinel.
584
2
  uint64_t first_ctrl_bytes =
585
2
      ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
586
2
      (soo_h2 << (8 * kMirroredSooSlotIndex));
587
  // Replace original and mirrored empty bytes for the new position.
588
  // Result for new_offset 0 will look like:
589
  // NHESNHEE
590
  // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
591
  // Result for new_offset 2 will look like:
592
  // EHNSEHNE
593
2
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
594
2
  size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
595
2
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
596
597
  // Fill last bytes with kEmpty.
598
2
  std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
599
2
              Group::kWidth);
600
  // Overwrite the first 8 bytes with first_ctrl_bytes.
601
2
  absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
602
603
  // Example for group size 16:
604
  // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE
605
  // new_offset 0:
606
  // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE
607
  // new_offset 2:
608
  // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE
609
610
  // Example for group size 8:
611
  // new_ctrl after 1st memset =      ???EEEEEEEE
612
  // new_offset 0:
613
  // new_ctrl after 2nd store  =      NHESNHEEEEE
614
  // new_offset 2:
615
  // new_ctrl after 2nd store  =      EHNSEHNEEEE
616
2
}
617
618
}  // namespace
619
620
0
void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) {
621
0
  ABSL_SWISSTABLE_ASSERT(c.is_small());
622
0
  if (soo_enabled) {
623
0
    c.set_empty_soo();
624
0
    return;
625
0
  }
626
0
  c.decrement_size();
627
0
  c.infoz().RecordErase();
628
0
  SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);
629
0
}
630
631
0
void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) {
632
0
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
633
0
  ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");
634
0
  c.decrement_size();
635
0
  c.infoz().RecordErase();
636
637
0
  size_t index = static_cast<size_t>(ctrl - c.control());
638
639
0
  if (WasNeverFull(c, index)) {
640
0
    SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
641
0
    c.growth_info().OverwriteFullAsEmpty();
642
0
    return;
643
0
  }
644
645
0
  c.growth_info().OverwriteFullAsDeleted();
646
0
  SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);
647
0
}
648
649
void ClearBackingArray(CommonFields& c,
650
                       const PolicyFunctions& __restrict policy, void* alloc,
651
0
                       bool reuse, bool soo_enabled) {
652
0
  if (reuse) {
653
0
    c.set_size_to_zero();
654
0
    ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
655
0
    ResetCtrl(c, policy.slot_size);
656
0
    ResetGrowthLeft(c);
657
0
    c.infoz().RecordStorageChanged(0, c.capacity());
658
0
  } else {
659
    // We need to record infoz before calling dealloc, which will unregister
660
    // infoz.
661
0
    c.infoz().RecordClearedReservation();
662
0
    c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
663
0
    c.infoz().Unregister();
664
0
    (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,
665
0
                      policy.slot_align, c.has_infoz());
666
0
    c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}};
667
0
  }
668
0
}
669
670
namespace {
671
672
enum class ResizeNonSooMode {
673
  kGuaranteedEmpty,
674
  kGuaranteedAllocated,
675
};
676
677
// Iterates over full slots in old table, finds new positions for them and
678
// transfers the slots.
679
// This function is used for reserving or rehashing non-empty tables.
680
// This use case is rare so the function is type erased.
681
// Returns the total probe length.
682
size_t FindNewPositionsAndTransferSlots(
683
    CommonFields& common, const PolicyFunctions& __restrict policy,
684
0
    ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) {
685
0
  void* new_slots = common.slot_array();
686
0
  const void* hash_fn = policy.hash_fn(common);
687
0
  const size_t slot_size = policy.slot_size;
688
0
  const size_t seed = common.seed().seed();
689
690
0
  const auto insert_slot = [&](void* slot) {
691
0
    size_t hash = policy.hash_slot(hash_fn, slot, seed);
692
0
    FindInfo target;
693
0
    if (common.is_small()) {
694
0
      target = FindInfo{0, 0};
695
0
    } else {
696
0
      target = find_first_non_full(common, hash);
697
0
      SetCtrl(common, target.offset, H2(hash), slot_size);
698
0
    }
699
0
    policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
700
0
                      slot, 1);
701
0
    return target.probe_length;
702
0
  };
703
0
  if (IsSmallCapacity(old_capacity)) {
704
0
    if (common.size() == 1) insert_slot(old_slots);
705
0
    return 0;
706
0
  }
707
0
  size_t total_probe_length = 0;
708
0
  for (size_t i = 0; i < old_capacity; ++i) {
709
0
    if (IsFull(old_ctrl[i])) {
710
0
      total_probe_length += insert_slot(old_slots);
711
0
    }
712
0
    old_slots = NextSlot(old_slots, slot_size);
713
0
  }
714
0
  return total_probe_length;
715
0
}
716
717
void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,
718
                             size_t hash, size_t total_probe_length,
719
0
                             size_t distance_from_desired) {
720
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
721
0
  infoz.RecordStorageChanged(common.size() - 1, common.capacity());
722
0
  infoz.RecordRehash(total_probe_length);
723
0
  infoz.RecordInsertMiss(hash, distance_from_desired);
724
0
  common.set_has_infoz();
725
  // TODO(b/413062340): we could potentially store infoz in place of the
726
  // control pointer for the capacity 1 case.
727
0
  common.set_infoz(infoz);
728
0
}
729
730
// Specialization to avoid passing two 0s from hot function.
731
ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(
732
0
    CommonFields& common, HashtablezInfoHandle infoz, size_t hash) {
733
0
  ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,
734
0
                          /*distance_from_desired=*/0);
735
0
}
736
737
ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,
738
                                                 HashtablezInfoHandle infoz,
739
                                                 size_t hash,
740
                                                 size_t total_probe_length,
741
0
                                                 size_t distance_from_desired) {
742
0
  ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,
743
0
                          distance_from_desired);
744
0
}
745
746
ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,
747
                                                 HashtablezInfoHandle infoz,
748
0
                                                 size_t total_probe_length) {
749
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
750
0
  infoz.RecordStorageChanged(common.size(), common.capacity());
751
0
  infoz.RecordRehash(total_probe_length);
752
0
  common.set_has_infoz();
753
0
  common.set_infoz(infoz);
754
0
}
755
756
struct BackingArrayPtrs {
757
  ctrl_t* ctrl;
758
  void* slots;
759
};
760
761
BackingArrayPtrs AllocBackingArray(CommonFields& common,
762
                                   const PolicyFunctions& __restrict policy,
763
                                   size_t new_capacity, bool has_infoz,
764
8
                                   void* alloc) {
765
8
  RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align,
766
8
                          has_infoz);
767
  // Perform a direct call in the common case to allow for profile-guided
768
  // heap optimization (PGHO) to understand which allocation function is used.
769
8
  constexpr size_t kDefaultAlignment = BackingArrayAlignment(alignof(size_t));
770
8
  char* mem = static_cast<char*>(
771
8
      ABSL_PREDICT_TRUE(
772
8
          policy.alloc ==
773
8
          (&AllocateBackingArray<kDefaultAlignment, std::allocator<char>>))
774
8
          ? AllocateBackingArray<kDefaultAlignment, std::allocator<char>>(
775
8
                alloc, layout.alloc_size())
776
8
          : policy.alloc(alloc, layout.alloc_size()));
777
8
  const GenerationType old_generation = common.generation();
778
8
  common.set_generation_ptr(
779
8
      reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
780
8
  common.set_generation(NextGeneration(old_generation));
781
782
8
  return {reinterpret_cast<ctrl_t*>(mem + layout.control_offset()),
783
8
          mem + layout.slot_offset()};
784
8
}
785
786
template <ResizeNonSooMode kMode>
787
void ResizeNonSooImpl(CommonFields& common,
788
                      const PolicyFunctions& __restrict policy,
789
0
                      size_t new_capacity, HashtablezInfoHandle infoz) {
790
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
791
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
792
793
0
  [[maybe_unused]] const size_t old_capacity = common.capacity();
794
0
  [[maybe_unused]] ctrl_t* old_ctrl;
795
0
  [[maybe_unused]] void* old_slots;
796
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
797
0
    old_ctrl = common.control();
798
0
    old_slots = common.slot_array();
799
0
  }
800
801
0
  const size_t slot_size = policy.slot_size;
802
0
  [[maybe_unused]] const size_t slot_align = policy.slot_align;
803
0
  const bool has_infoz = infoz.IsSampled();
804
0
  void* alloc = policy.get_char_alloc(common);
805
806
0
  common.set_capacity(new_capacity);
807
0
  const auto [new_ctrl, new_slots] =
808
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
809
0
  common.set_control(new_ctrl);
810
0
  common.set_slots(new_slots);
811
0
  common.generate_new_seed(has_infoz);
812
813
0
  size_t total_probe_length = 0;
814
0
  ResetCtrl(common, slot_size);
815
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
816
0
                         old_capacity == policy.soo_capacity());
817
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
818
0
                         old_capacity > 0);
819
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
820
0
    total_probe_length = FindNewPositionsAndTransferSlots(
821
0
        common, policy, old_ctrl, old_slots, old_capacity);
822
0
    (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
823
0
                      has_infoz);
824
0
    ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
825
0
                    common.size());
826
0
  } else {
827
0
    GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
828
0
        CapacityToGrowth(new_capacity));
829
0
  }
830
831
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
832
0
    ReportResizeToInfoz(common, infoz, total_probe_length);
833
0
  }
834
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)0>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)1>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
835
836
void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
837
                                      const PolicyFunctions& __restrict policy,
838
0
                                      size_t new_capacity, bool force_infoz) {
839
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
840
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
841
0
  ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);
842
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());
843
0
  ABSL_SWISSTABLE_ASSERT(common.empty());
844
0
  const size_t slot_size = policy.slot_size;
845
0
  HashtablezInfoHandle infoz;
846
0
  const bool should_sample =
847
0
      policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
848
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
849
0
    infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
850
0
                            policy.soo_capacity());
851
0
  }
852
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,
853
0
                                                       new_capacity, infoz);
854
0
}
855
856
// If the table was SOO, initializes new control bytes and transfers slot.
857
// After transferring the slot, sets control and slots in CommonFields.
858
// It is rare to resize an SOO table with one element to a large size.
859
// Requires: `c` contains SOO data.
860
void InsertOldSooSlotAndInitializeControlBytes(
861
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl,
862
0
    void* new_slots, bool has_infoz) {
863
0
  ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());
864
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
865
0
  size_t new_capacity = c.capacity();
866
867
0
  c.generate_new_seed(has_infoz);
868
869
0
  const size_t soo_slot_hash =
870
0
      policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed());
871
0
  size_t offset = probe(new_capacity, soo_slot_hash).offset();
872
0
  offset = offset == new_capacity ? 0 : offset;
873
0
  SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
874
0
  void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
875
0
  SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
876
0
  policy.transfer_n(&c, target_slot, c.soo_data(), 1);
877
0
  c.set_control(new_ctrl);
878
0
  c.set_slots(new_slots);
879
0
  ResetCtrl(c, policy.slot_size);
880
0
  SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size);
881
0
}
882
883
enum class ResizeFullSooTableSamplingMode {
884
  kNoSampling,
885
  // Force sampling. If the table was still not sampled, do not resize.
886
  kForceSampleNoResizeIfUnsampled,
887
};
888
889
void AssertSoo([[maybe_unused]] CommonFields& common,
890
0
               [[maybe_unused]] const PolicyFunctions& policy) {
891
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
892
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());
893
0
}
894
void AssertFullSoo([[maybe_unused]] CommonFields& common,
895
0
                   [[maybe_unused]] const PolicyFunctions& policy) {
896
0
  AssertSoo(common, policy);
897
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
898
0
}
899
900
void ResizeFullSooTable(CommonFields& common,
901
                        const PolicyFunctions& __restrict policy,
902
                        size_t new_capacity,
903
0
                        ResizeFullSooTableSamplingMode sampling_mode) {
904
0
  AssertFullSoo(common, policy);
905
0
  const size_t slot_size = policy.slot_size;
906
0
  void* alloc = policy.get_char_alloc(common);
907
908
0
  HashtablezInfoHandle infoz;
909
0
  bool has_infoz = false;
910
0
  if (sampling_mode ==
911
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) {
912
0
    if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) {
913
0
      infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
914
0
                              policy.soo_capacity());
915
0
    }
916
917
0
    if (!infoz.IsSampled()) return;
918
0
    has_infoz = true;
919
0
  }
920
921
0
  common.set_capacity(new_capacity);
922
923
  // We do not set control and slots in CommonFields yet to avoid overriding
924
  // SOO data.
925
0
  const auto [new_ctrl, new_slots] =
926
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
927
928
0
  InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots,
929
0
                                            has_infoz);
930
0
  ResetGrowthLeft(common);
931
0
  if (has_infoz) {
932
0
    common.set_has_infoz();
933
0
    common.set_infoz(infoz);
934
0
    infoz.RecordStorageChanged(common.size(), new_capacity);
935
0
  }
936
0
}
937
938
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,
939
                                            size_t old_capacity,
940
                                            ctrl_t* __restrict new_ctrl,
941
4
                                            size_t new_capacity) {
942
4
  ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
943
4
  constexpr size_t kHalfWidth = Group::kWidth / 2;
944
4
  ABSL_ASSUME(old_capacity < kHalfWidth);
945
4
  ABSL_ASSUME(old_capacity > 0);
946
4
  static_assert(Group::kWidth == 8 || Group::kWidth == 16,
947
4
                "Group size is not supported.");
948
949
  // NOTE: operations are done with compile time known size = 8.
950
  // Compiler optimizes that into single ASM operation.
951
952
  // Load the bytes from old_capacity. This contains
953
  // - the sentinel byte
954
  // - all the old control bytes
955
  // - the rest is filled with kEmpty bytes
956
  // Example:
957
  // old_ctrl =     012S012EEEEEEEEE...
958
  // copied_bytes = S012EEEE
959
4
  uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);
960
961
  // We change the sentinel byte to kEmpty before storing to both the start of
962
  // the new_ctrl, and past the end of the new_ctrl later for the new cloned
963
  // bytes. Note that this is faster than setting the sentinel byte to kEmpty
964
  // after the copy directly in new_ctrl because we are limited on store
965
  // bandwidth.
966
4
  static constexpr uint64_t kEmptyXorSentinel =
967
4
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
968
4
      static_cast<uint8_t>(ctrl_t::kSentinel);
969
970
  // Replace the first byte kSentinel with kEmpty.
971
  // Resulting bytes will be shifted by one byte old control blocks.
972
  // Example:
973
  // old_ctrl = 012S012EEEEEEEEE...
974
  // before =   S012EEEE
975
  // after  =   E012EEEE
976
4
  copied_bytes ^= kEmptyXorSentinel;
977
978
4
  if (Group::kWidth == 8) {
979
    // With group size 8, we can grow with two write operations.
980
0
    ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
981
0
                           "old_capacity is too large for group size 8");
982
0
    absl::little_endian::Store64(new_ctrl, copied_bytes);
983
984
0
    static constexpr uint64_t kSentinal64 =
985
0
        static_cast<uint8_t>(ctrl_t::kSentinel);
986
987
    // Prepend kSentinel byte to the beginning of copied_bytes.
988
    // We have maximum 3 non-empty bytes at the beginning of copied_bytes for
989
    // group size 8.
990
    // Example:
991
    // old_ctrl = 012S012EEEE
992
    // before =   E012EEEE
993
    // after  =   SE012EEE
994
0
    copied_bytes = (copied_bytes << 8) ^ kSentinal64;
995
0
    absl::little_endian::Store64(new_ctrl + new_capacity, copied_bytes);
996
    // Example for capacity 3:
997
    // old_ctrl = 012S012EEEE
998
    // After the first store:
999
    //           >!
1000
    // new_ctrl = E012EEEE???????
1001
    // After the second store:
1002
    //                  >!
1003
    // new_ctrl = E012EEESE012EEE
1004
0
    return;
1005
0
  }
1006
1007
4
  ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);  // NOLINT(misc-static-assert)
1008
1009
  // Fill the second half of the main control bytes with kEmpty.
1010
  // For small capacity that may write into mirrored control bytes.
1011
  // It is fine as we will overwrite all the bytes later.
1012
4
  std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
1013
4
              kHalfWidth);
1014
  // Fill the second half of the mirrored control bytes with kEmpty.
1015
4
  std::memset(new_ctrl + new_capacity + kHalfWidth,
1016
4
              static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
1017
  // Copy the first half of the non-mirrored control bytes.
1018
4
  absl::little_endian::Store64(new_ctrl, copied_bytes);
1019
4
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1020
  // Copy the first half of the mirrored control bytes.
1021
4
  absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
1022
1023
  // Example for growth capacity 1->3:
1024
  // old_ctrl =                  0S0EEEEEEEEEEEEEE
1025
  // new_ctrl at the end =       E0ESE0EEEEEEEEEEEEE
1026
  //                                    >!
1027
  // new_ctrl after 1st memset = ????????EEEEEEEE???
1028
  //                                       >!
1029
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEE
1030
  //                            >!
1031
  // new_ctrl after 1st store =  E0EEEEEEEEEEEEEEEEE
1032
  // new_ctrl after kSentinel =  E0ESEEEEEEEEEEEEEEE
1033
  //                                >!
1034
  // new_ctrl after 2nd store =  E0ESE0EEEEEEEEEEEEE
1035
1036
  // Example for growth capacity 3->7:
1037
  // old_ctrl =                  012S012EEEEEEEEEEEE
1038
  // new_ctrl at the end =       E012EEESE012EEEEEEEEEEE
1039
  //                                    >!
1040
  // new_ctrl after 1st memset = ????????EEEEEEEE???????
1041
  //                                           >!
1042
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEEEEEE
1043
  //                            >!
1044
  // new_ctrl after 1st store =  E012EEEEEEEEEEEEEEEEEEE
1045
  // new_ctrl after kSentinel =  E012EEESEEEEEEEEEEEEEEE
1046
  //                                >!
1047
  // new_ctrl after 2nd store =  E012EEESE012EEEEEEEEEEE
1048
1049
  // Example for growth capacity 7->15:
1050
  // old_ctrl =                  0123456S0123456EEEEEEEE
1051
  // new_ctrl at the end =       E0123456EEEEEEESE0123456EEEEEEE
1052
  //                                    >!
1053
  // new_ctrl after 1st memset = ????????EEEEEEEE???????????????
1054
  //                                                   >!
1055
  // new_ctrl after 2nd memset = ????????EEEEEEEE???????EEEEEEEE
1056
  //                            >!
1057
  // new_ctrl after 1st store =  E0123456EEEEEEEE???????EEEEEEEE
1058
  // new_ctrl after kSentinel =  E0123456EEEEEEES???????EEEEEEEE
1059
  //                                            >!
1060
  // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE
1061
4
}
1062
1063
// Size of the buffer we allocate on stack for storing probed elements in
1064
// GrowToNextCapacity algorithm.
1065
constexpr size_t kProbedElementsBufferSize = 512;
1066
1067
// Decodes information about probed elements from contiguous memory.
1068
// Finds new position for each element and transfers it to the new slots.
1069
// Returns the total probe length.
1070
template <typename ProbedItem>
1071
ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
1072
    CommonFields& c, const PolicyFunctions& __restrict policy,
1073
0
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
1074
0
  const size_t new_capacity = c.capacity();
1075
1076
0
  void* new_slots = c.slot_array();
1077
0
  ctrl_t* new_ctrl = c.control();
1078
0
  size_t total_probe_length = 0;
1079
1080
0
  const size_t slot_size = policy.slot_size;
1081
0
  auto transfer_n = policy.transfer_n;
1082
1083
0
  for (; start < end; ++start) {
1084
0
    const FindInfo target = find_first_non_full_from_h1(
1085
0
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
1086
0
    total_probe_length += target.probe_length;
1087
0
    const size_t old_index = static_cast<size_t>(start->source_offset);
1088
0
    const size_t new_i = target.offset;
1089
0
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
1090
0
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
1091
0
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
1092
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1093
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1094
0
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
1095
0
    transfer_n(&c, dst_slot, src_slot, 1);
1096
0
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
1097
0
  }
1098
0
  return total_probe_length;
1099
0
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned int, 32ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 64ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 122ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, void*)
1100
1101
// Sentinel value for the start of marked elements.
1102
// Signals that there are no marked elements.
1103
constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
1104
1105
// Process probed elements that did not fit into available buffers.
1106
// We marked them in control bytes as kSentinel.
1107
// Hash recomputation and full probing is done here.
1108
// This use case should be extremely rare.
1109
ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(
1110
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,
1111
0
    void* old_slots, size_t start) {
1112
0
  size_t old_capacity = PreviousCapacity(c.capacity());
1113
0
  const size_t slot_size = policy.slot_size;
1114
0
  void* new_slots = c.slot_array();
1115
0
  size_t total_probe_length = 0;
1116
0
  const void* hash_fn = policy.hash_fn(c);
1117
0
  auto hash_slot = policy.hash_slot;
1118
0
  auto transfer_n = policy.transfer_n;
1119
0
  const size_t seed = c.seed().seed();
1120
0
  for (size_t old_index = start; old_index < old_capacity; ++old_index) {
1121
0
    if (old_ctrl[old_index] != ctrl_t::kSentinel) {
1122
0
      continue;
1123
0
    }
1124
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1125
0
    const size_t hash = hash_slot(hash_fn, src_slot, seed);
1126
0
    const FindInfo target = find_first_non_full(c, hash);
1127
0
    total_probe_length += target.probe_length;
1128
0
    const size_t new_i = target.offset;
1129
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1130
0
    SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
1131
0
    transfer_n(&c, dst_slot, src_slot, 1);
1132
0
  }
1133
0
  return total_probe_length;
1134
0
}
1135
1136
// The largest old capacity for which it is guaranteed that all probed elements
1137
// fit in ProbedItemEncoder's local buffer.
1138
// For such tables, `encode_probed_element` is trivial.
1139
constexpr size_t kMaxLocalBufferOldCapacity =
1140
    kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
1141
static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
1142
constexpr size_t kMaxLocalBufferNewCapacity =
1143
    NextCapacity(kMaxLocalBufferOldCapacity);
1144
static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
1145
static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
1146
              ProbedItem4Bytes::kMaxNewCapacity);
1147
1148
// Initializes mirrored control bytes after
1149
// transfer_unprobed_elements_to_next_capacity.
1150
0
void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
1151
0
  std::memcpy(new_ctrl + new_capacity,
1152
              // We own GrowthInfo just before control bytes. So it is ok
1153
              // to read one byte from it.
1154
0
              new_ctrl - 1, Group::kWidth);
1155
0
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1156
0
}
1157
1158
// Encodes probed elements into available memory.
1159
// At first, a local (on stack) buffer is used. The size of the buffer is
1160
// kProbedElementsBufferSize bytes.
1161
// When the local buffer is full, we switch to `control_` buffer. We are allowed
1162
// to overwrite `control_` buffer till the `source_offset` byte. In case we have
1163
// no space in `control_` buffer, we fallback to a naive algorithm for all the
1164
// rest of the probed elements. We mark elements as kSentinel in control bytes
1165
// and later process them fully. See ProcessMarkedElements for details. It
1166
// should be extremely rare.
1167
template <typename ProbedItemType,
1168
          // If true, we only use the local buffer and never switch to the
1169
          // control buffer.
1170
          bool kGuaranteedFitToBuffer = false>
1171
class ProbedItemEncoder {
1172
 public:
1173
  using ProbedItem = ProbedItemType;
1174
0
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
1175
1176
  // Encode item into the best available location.
1177
0
  void EncodeItem(ProbedItem item) {
1178
0
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1179
0
      return ProcessEncodeWithOverflow(item);
1180
0
    }
1181
0
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1182
0
    *pos_ = item;
1183
0
    ++pos_;
1184
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
1185
1186
  // Decodes information about probed elements from all available sources.
1187
  // Finds new position for each element and transfers it to the new slots.
1188
  // Returns the total probe length.
1189
  size_t DecodeAndInsertToTable(CommonFields& common,
1190
                                const PolicyFunctions& __restrict policy,
1191
0
                                void* old_slots) const {
1192
0
    if (pos_ == buffer_) {
1193
0
      return 0;
1194
0
    }
1195
0
    if constexpr (kGuaranteedFitToBuffer) {
1196
0
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1197
0
    }
1198
0
    size_t total_probe_length = DecodeAndInsertImpl(
1199
0
        common, policy, buffer_,
1200
0
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1201
0
    if (!local_buffer_full_) {
1202
0
      return total_probe_length;
1203
0
    }
1204
0
    total_probe_length +=
1205
0
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1206
0
    return total_probe_length;
1207
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1208
1209
 private:
1210
0
  static ProbedItem* AlignToNextItem(void* ptr) {
1211
0
    return reinterpret_cast<ProbedItem*>(AlignUpTo(
1212
0
        reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
1213
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::AlignToNextItem(void*)
1214
1215
0
  ProbedItem* OverflowBufferStart() const {
1216
    // We reuse GrowthInfo memory as well.
1217
0
    return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
1218
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::OverflowBufferStart() const
1219
1220
  // Encodes item when previously allocated buffer is full.
1221
  // At first that happens when local buffer is full.
1222
  // We switch from the local buffer to the control buffer.
1223
  // Every time this function is called, the available buffer is extended till
1224
  // `item.source_offset` byte in the control buffer.
1225
  // After the buffer is extended, this function wouldn't be called till the
1226
  // buffer is exhausted.
1227
  //
1228
  // If there's no space in the control buffer, we fallback to naive algorithm
1229
  // and mark probed elements as kSentinel in the control buffer. In this case,
1230
  // we will call this function for every subsequent probed element.
1231
0
  ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
1232
0
    if (!local_buffer_full_) {
1233
0
      local_buffer_full_ = true;
1234
0
      pos_ = OverflowBufferStart();
1235
0
    }
1236
0
    const size_t source_offset = static_cast<size_t>(item.source_offset);
1237
    // We are in fallback mode so we can't reuse control buffer anymore.
1238
    // Probed elements are marked as kSentinel in the control buffer.
1239
0
    if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
1240
0
                           kNoMarkedElementsSentinel)) {
1241
0
      control_[source_offset] = ctrl_t::kSentinel;
1242
0
      return;
1243
0
    }
1244
    // Refresh the end pointer to the new available position.
1245
    // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
1246
    // to write.
1247
0
    end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
1248
0
    if (ABSL_PREDICT_TRUE(pos_ < end_)) {
1249
0
      *pos_ = item;
1250
0
      ++pos_;
1251
0
      return;
1252
0
    }
1253
0
    control_[source_offset] = ctrl_t::kSentinel;
1254
0
    marked_elements_starting_position_ = source_offset;
1255
    // Now we will always fall down to `ProcessEncodeWithOverflow`.
1256
0
    ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
1257
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
1258
1259
  // Decodes information about probed elements from control buffer and processes
1260
  // marked elements.
1261
  // Finds new position for each element and transfers it to the new slots.
1262
  // Returns the total probe length.
1263
  ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
1264
      CommonFields& common, const PolicyFunctions& __restrict policy,
1265
0
      void* old_slots) const {
1266
0
    ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
1267
0
                           "must not be called when local buffer is not full");
1268
0
    size_t total_probe_length = DecodeAndInsertImpl(
1269
0
        common, policy, OverflowBufferStart(), pos_, old_slots);
1270
0
    if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
1271
0
                          kNoMarkedElementsSentinel)) {
1272
0
      return total_probe_length;
1273
0
    }
1274
0
    total_probe_length +=
1275
0
        ProcessProbedMarkedElements(common, policy, control_, old_slots,
1276
0
                                    marked_elements_starting_position_);
1277
0
    return total_probe_length;
1278
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1279
1280
  static constexpr size_t kBufferSize =
1281
      kProbedElementsBufferSize / sizeof(ProbedItem);
1282
  ProbedItem buffer_[kBufferSize];
1283
  // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
1284
  // otherwise, they're in the overflow buffer.
1285
  ProbedItem* pos_ = buffer_;
1286
  const void* end_ = buffer_ + kBufferSize;
1287
  ctrl_t* const control_;
1288
  size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
1289
  bool local_buffer_full_ = false;
1290
};
1291
1292
// Grows to next capacity with specified encoder type.
1293
// Encoder is used to store probed elements that are processed later.
1294
// Different encoder is used depending on the capacity of the table.
1295
// Returns total probe length.
1296
template <typename Encoder>
1297
size_t GrowToNextCapacity(CommonFields& common,
1298
                          const PolicyFunctions& __restrict policy,
1299
0
                          ctrl_t* old_ctrl, void* old_slots) {
1300
0
  using ProbedItem = typename Encoder::ProbedItem;
1301
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1302
0
  Encoder encoder(old_ctrl);
1303
0
  policy.transfer_unprobed_elements_to_next_capacity(
1304
0
      common, old_ctrl, old_slots, &encoder,
1305
0
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1306
0
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1307
0
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1308
0
      });
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
1309
0
  InitializeMirroredControlBytes(common.control(), common.capacity());
1310
0
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1311
0
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
1312
1313
// Grows to next capacity for relatively small tables so that even if all
1314
// elements are probed, we don't need to overflow the local buffer.
1315
// Returns total probe length.
1316
size_t GrowToNextCapacityThatFitsInLocalBuffer(
1317
    CommonFields& common, const PolicyFunctions& __restrict policy,
1318
0
    ctrl_t* old_ctrl, void* old_slots) {
1319
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
1320
0
  return GrowToNextCapacity<
1321
0
      ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
1322
0
      common, policy, old_ctrl, old_slots);
1323
0
}
1324
1325
// Grows to next capacity with different encodings. Returns total probe length.
1326
// These functions are useful to simplify profile analysis.
1327
size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
1328
                                       const PolicyFunctions& __restrict policy,
1329
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1330
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
1331
0
      common, policy, old_ctrl, old_slots);
1332
0
}
1333
size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
1334
                                       const PolicyFunctions& __restrict policy,
1335
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1336
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
1337
0
      common, policy, old_ctrl, old_slots);
1338
0
}
1339
size_t GrowToNextCapacity16BytesEncoder(
1340
    CommonFields& common, const PolicyFunctions& __restrict policy,
1341
0
    ctrl_t* old_ctrl, void* old_slots) {
1342
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
1343
0
      common, policy, old_ctrl, old_slots);
1344
0
}
1345
1346
// Grows to next capacity for tables with relatively large capacity so that we
1347
// can't guarantee that all probed elements fit in the local buffer. Returns
1348
// total probe length.
1349
size_t GrowToNextCapacityOverflowLocalBuffer(
1350
    CommonFields& common, const PolicyFunctions& __restrict policy,
1351
0
    ctrl_t* old_ctrl, void* old_slots) {
1352
0
  const size_t new_capacity = common.capacity();
1353
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
1354
0
    return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
1355
0
  }
1356
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
1357
0
    return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
1358
0
  }
1359
  // 16 bytes encoding supports the maximum swisstable capacity.
1360
0
  return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
1361
0
}
1362
1363
// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
1364
// capacity of the table. Returns total probe length.
1365
ABSL_ATTRIBUTE_NOINLINE
1366
size_t GrowToNextCapacityDispatch(CommonFields& common,
1367
                                  const PolicyFunctions& __restrict policy,
1368
0
                                  ctrl_t* old_ctrl, void* old_slots) {
1369
0
  const size_t new_capacity = common.capacity();
1370
0
  if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
1371
0
    return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
1372
0
                                                   old_slots);
1373
0
  } else {
1374
0
    return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
1375
0
                                                 old_slots);
1376
0
  }
1377
0
}
1378
1379
void IncrementSmallSizeNonSoo(CommonFields& common,
1380
0
                              const PolicyFunctions& __restrict policy) {
1381
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1382
0
  common.increment_size();
1383
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
1384
0
}
1385
1386
void IncrementSmallSize(CommonFields& common,
1387
0
                        const PolicyFunctions& __restrict policy) {
1388
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1389
0
  if (policy.soo_enabled) {
1390
0
    common.set_full_soo();
1391
0
  } else {
1392
0
    IncrementSmallSizeNonSoo(common, policy);
1393
0
  }
1394
0
}
1395
1396
std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(
1397
    CommonFields& common, const PolicyFunctions& __restrict policy,
1398
2
    absl::FunctionRef<size_t(size_t)> get_hash) {
1399
  // TODO(b/413062340): Refactor to reuse more code with
1400
  // GrowSooTableToNextCapacityAndPrepareInsert.
1401
2
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);
1402
2
  ABSL_SWISSTABLE_ASSERT(!common.empty());
1403
2
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1404
2
  constexpr size_t kOldCapacity = 1;
1405
2
  constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);
1406
2
  ctrl_t* old_ctrl = common.control();
1407
2
  void* old_slots = common.slot_array();
1408
1409
2
  const size_t slot_size = policy.slot_size;
1410
2
  const size_t slot_align = policy.slot_align;
1411
2
  void* alloc = policy.get_char_alloc(common);
1412
2
  HashtablezInfoHandle infoz = common.infoz();
1413
2
  const bool has_infoz = infoz.IsSampled();
1414
2
  common.set_capacity(kNewCapacity);
1415
1416
2
  const auto [new_ctrl, new_slots] =
1417
2
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1418
2
  common.set_control(new_ctrl);
1419
2
  common.set_slots(new_slots);
1420
2
  SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);
1421
1422
2
  if (ABSL_PREDICT_TRUE(!has_infoz)) {
1423
    // When we're sampled, we already have a seed.
1424
2
    common.generate_new_seed(/*has_infoz=*/false);
1425
2
  }
1426
2
  const size_t new_hash = get_hash(common.seed().seed());
1427
2
  h2_t new_h2 = H2(new_hash);
1428
2
  size_t orig_hash =
1429
2
      policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed());
1430
2
  size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1431
2
  InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);
1432
1433
2
  void* old_element_target = NextSlot(new_slots, slot_size);
1434
2
  SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);
1435
2
  policy.transfer_n(&common, old_element_target, old_slots, 1);
1436
1437
2
  void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);
1438
2
  SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);
1439
1440
2
  policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,
1441
2
                 has_infoz);
1442
2
  PrepareInsertCommon(common);
1443
2
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1444
2
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1445
1446
2
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1447
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);
1448
0
  }
1449
2
  return {new_ctrl + offset, new_element_target_slot};
1450
2
}
1451
1452
// Grows to next capacity and prepares insert for the given new_hash.
1453
// Returns the offset of the new element.
1454
size_t GrowToNextCapacityAndPrepareInsert(
1455
    CommonFields& common, const PolicyFunctions& __restrict policy,
1456
4
    size_t new_hash) {
1457
4
  ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
1458
4
  const size_t old_capacity = common.capacity();
1459
4
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1460
4
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));
1461
1462
4
  const size_t new_capacity = NextCapacity(old_capacity);
1463
4
  ctrl_t* old_ctrl = common.control();
1464
4
  void* old_slots = common.slot_array();
1465
1466
4
  common.set_capacity(new_capacity);
1467
4
  const size_t slot_size = policy.slot_size;
1468
4
  const size_t slot_align = policy.slot_align;
1469
4
  void* alloc = policy.get_char_alloc(common);
1470
4
  HashtablezInfoHandle infoz = common.infoz();
1471
4
  const bool has_infoz = infoz.IsSampled();
1472
1473
4
  const auto [new_ctrl, new_slots] =
1474
4
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
1475
4
  common.set_control(new_ctrl);
1476
4
  common.set_slots(new_slots);
1477
4
  SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
1478
1479
4
  h2_t new_h2 = H2(new_hash);
1480
4
  size_t total_probe_length = 0;
1481
4
  FindInfo find_info;
1482
4
  if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
1483
4
    size_t offset;
1484
4
    GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
1485
4
                                           new_capacity);
1486
    // We put the new element either at the beginning or at the end of the
1487
    // table with approximately equal probability.
1488
4
    offset =
1489
4
        SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;
1490
1491
4
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
1492
4
    SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
1493
4
    find_info = FindInfo{offset, 0};
1494
    // Single group tables have all slots full on resize. So we can transfer
1495
    // all slots without checking the control bytes.
1496
4
    ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
1497
4
    void* target = NextSlot(new_slots, slot_size);
1498
4
    SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
1499
4
    policy.transfer_n(&common, target, old_slots, old_capacity);
1500
4
  } else {
1501
0
    total_probe_length =
1502
0
        GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
1503
0
    find_info = find_first_non_full(common, new_hash);
1504
0
    SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
1505
0
  }
1506
4
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1507
4
  (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
1508
4
                    has_infoz);
1509
4
  PrepareInsertCommon(common);
1510
4
  ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
1511
4
                  common.size());
1512
1513
4
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1514
0
    ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,
1515
0
                        find_info.probe_length);
1516
0
  }
1517
4
  return find_info.offset;
1518
4
}
1519
1520
}  // namespace
1521
1522
std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(
1523
    CommonFields& common, const PolicyFunctions& __restrict policy,
1524
4
    absl::FunctionRef<size_t(size_t)> get_hash) {
1525
4
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1526
4
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1527
4
  if (common.capacity() == 1) {
1528
2
    if (common.empty()) {
1529
0
      IncrementSmallSizeNonSoo(common, policy);
1530
0
      return {SooControl(), common.slot_array()};
1531
2
    } else {
1532
2
      return Grow1To3AndPrepareInsert(common, policy, get_hash);
1533
2
    }
1534
2
  }
1535
1536
  // Growing from 0 to 1 capacity.
1537
2
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);
1538
2
  constexpr size_t kNewCapacity = 1;
1539
1540
2
  common.set_capacity(kNewCapacity);
1541
2
  HashtablezInfoHandle infoz;
1542
2
  const bool should_sample =
1543
2
      policy.is_hashtablez_eligible && ShouldSampleNextTable();
1544
2
  if (ABSL_PREDICT_FALSE(should_sample)) {
1545
0
    infoz = ForcedTrySample(policy.slot_size, policy.key_size,
1546
0
                            policy.value_size, policy.soo_capacity());
1547
0
  }
1548
2
  const bool has_infoz = infoz.IsSampled();
1549
2
  void* alloc = policy.get_char_alloc(common);
1550
1551
2
  const auto [new_ctrl, new_slots] =
1552
2
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1553
2
  common.set_control(new_ctrl);
1554
2
  common.set_slots(new_slots);
1555
1556
2
  static_assert(NextCapacity(0) == 1);
1557
2
  PrepareInsertCommon(common);
1558
1559
2
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1560
0
    common.generate_new_seed(/*has_infoz=*/true);
1561
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz,
1562
0
                                        get_hash(common.seed().seed()));
1563
0
  }
1564
2
  return {SooControl(), new_slots};
1565
2
}
1566
1567
namespace {
1568
1569
// Called whenever the table needs to vacate empty slots either by removing
1570
// tombstones via rehash or growth to next capacity.
1571
ABSL_ATTRIBUTE_NOINLINE
1572
size_t RehashOrGrowToNextCapacityAndPrepareInsert(
1573
    CommonFields& common, const PolicyFunctions& __restrict policy,
1574
0
    size_t new_hash) {
1575
0
  const size_t cap = common.capacity();
1576
0
  ABSL_ASSUME(cap > 0);
1577
0
  if (cap > Group::kWidth &&
1578
      // Do these calculations in 64-bit to avoid overflow.
1579
0
      common.size() * uint64_t{32} <= cap * uint64_t{25}) {
1580
    // Squash DELETED without growing if there is enough capacity.
1581
    //
1582
    // Rehash in place if the current size is <= 25/32 of capacity.
1583
    // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
1584
    // faster than resize, and 2) it takes quite a bit of work to add
1585
    // tombstones.  In the worst case, seems to take approximately 4
1586
    // insert/erase pairs to create a single tombstone and so if we are
1587
    // rehashing because of tombstones, we can afford to rehash-in-place as
1588
    // long as we are reclaiming at least 1/8 the capacity without doing more
1589
    // than 2X the work.  (Where "work" is defined to be size() for rehashing
1590
    // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
1591
    // place is faster per operation than inserting or even doubling the size
1592
    // of the table, so we actually afford to reclaim even less space from a
1593
    // resize-in-place.  The decision is to rehash in place if we can reclaim
1594
    // at about 1/8th of the usable capacity (specifically 3/28 of the
1595
    // capacity) which means that the total cost of rehashing will be a small
1596
    // fraction of the total work.
1597
    //
1598
    // Here is output of an experiment using the BM_CacheInSteadyState
1599
    // benchmark running the old case (where we rehash-in-place only if we can
1600
    // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
1601
    // if we can recover 3/32*capacity).
1602
    //
1603
    // Note that although in the worst-case number of rehashes jumped up from
1604
    // 15 to 190, but the number of operations per second is almost the same.
1605
    //
1606
    // Abridged output of running BM_CacheInSteadyState benchmark from
1607
    // raw_hash_set_benchmark.   N is the number of insert/erase operations.
1608
    //
1609
    //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
1610
    // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
1611
    //  448 | 145284       0.44        18 | 140118       0.44        19
1612
    //  493 | 152546       0.24        11 | 151417       0.48        28
1613
    //  538 | 151439       0.26        11 | 151152       0.53        38
1614
    //  583 | 151765       0.28        11 | 150572       0.57        50
1615
    //  628 | 150241       0.31        11 | 150853       0.61        66
1616
    //  672 | 149602       0.33        12 | 150110       0.66        90
1617
    //  717 | 149998       0.35        12 | 149531       0.70       129
1618
    //  762 | 149836       0.37        13 | 148559       0.74       190
1619
    //  807 | 149736       0.39        14 | 151107       0.39        14
1620
    //  852 | 150204       0.42        15 | 151019       0.42        15
1621
0
    return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);
1622
0
  } else {
1623
    // Otherwise grow the container.
1624
0
    return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);
1625
0
  }
1626
0
}
1627
1628
// Slow path for PrepareInsertLarge that is called when the table has deleted
1629
// slots or need to be resized or rehashed.
1630
size_t PrepareInsertLargeSlow(CommonFields& common,
1631
                              const PolicyFunctions& __restrict policy,
1632
4
                              size_t hash) {
1633
4
  const GrowthInfo growth_info = common.growth_info();
1634
4
  ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
1635
4
  if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
1636
    // Table without deleted slots (>95% cases) that needs to be resized.
1637
4
    ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
1638
4
                           growth_info.GetGrowthLeft() == 0);
1639
4
    return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
1640
4
  }
1641
0
  if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
1642
    // Table with deleted slots that needs to be rehashed or resized.
1643
0
    return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);
1644
0
  }
1645
  // Table with deleted slots that has space for the inserting element.
1646
0
  FindInfo target = find_first_non_full(common, hash);
1647
0
  PrepareInsertCommon(common);
1648
0
  common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
1649
0
  SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);
1650
0
  common.infoz().RecordInsertMiss(hash, target.probe_length);
1651
0
  return target.offset;
1652
0
}
1653
1654
// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
1655
// forces the table to be sampled and prepares the insert.
1656
// SOO tables need to switch from SOO to heap in order to store the infoz.
1657
// Requires:
1658
//   1. `c.capacity() == SooCapacity()`.
1659
//   2. `c.empty()`.
1660
ABSL_ATTRIBUTE_NOINLINE size_t
1661
GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1662
    CommonFields& common, const PolicyFunctions& __restrict policy,
1663
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1664
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
1665
0
                                   /*force_infoz=*/true);
1666
0
  PrepareInsertCommon(common);
1667
0
  common.growth_info().OverwriteEmptyAsFull();
1668
0
  const size_t new_hash = get_hash(common.seed().seed());
1669
0
  SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
1670
0
                            policy.slot_size);
1671
0
  common.infoz().RecordInsertMiss(new_hash, /*distance_from_desired=*/0);
1672
0
  return SooSlotIndex();
1673
0
}
1674
1675
// Resizes empty non-allocated table to the capacity to fit new_size elements.
1676
// Requires:
1677
//   1. `c.capacity() == policy.soo_capacity()`.
1678
//   2. `c.empty()`.
1679
//   3. `new_size > policy.soo_capacity()`.
1680
// The table will be attempted to be sampled.
1681
void ReserveEmptyNonAllocatedTableToFitNewSize(
1682
    CommonFields& common, const PolicyFunctions& __restrict policy,
1683
0
    size_t new_size) {
1684
0
  ValidateMaxSize(new_size, policy.slot_size);
1685
0
  ABSL_ASSUME(new_size > 0);
1686
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
1687
0
                                   /*force_infoz=*/false);
1688
  // This is after resize, to ensure that we have completed the allocation
1689
  // and have potentially sampled the hashtable.
1690
0
  common.infoz().RecordReservation(new_size);
1691
0
}
1692
1693
// Type erased version of raw_hash_set::reserve for tables that have an
1694
// allocated backing array.
1695
//
1696
// Requires:
1697
//   1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.
1698
// Reserving already allocated tables is considered to be a rare case.
1699
ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
1700
    CommonFields& common, const PolicyFunctions& __restrict policy,
1701
0
    size_t new_size) {
1702
0
  const size_t cap = common.capacity();
1703
0
  ValidateMaxSize(new_size, policy.slot_size);
1704
0
  ABSL_ASSUME(new_size > 0);
1705
0
  const size_t new_capacity = SizeToCapacity(new_size);
1706
0
  if (cap == policy.soo_capacity()) {
1707
0
    ABSL_SWISSTABLE_ASSERT(!common.empty());
1708
0
    ResizeFullSooTable(common, policy, new_capacity,
1709
0
                       ResizeFullSooTableSamplingMode::kNoSampling);
1710
0
  } else {
1711
0
    ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());
1712
    // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
1713
0
    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1714
0
  }
1715
0
  common.infoz().RecordReservation(new_size);
1716
0
}
1717
1718
// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
1719
// table to be sampled. SOO tables need to switch from SOO to heap in order to
1720
// store the infoz. No-op if sampling is disabled or not possible.
1721
void GrowFullSooTableToNextCapacityForceSampling(
1722
0
    CommonFields& common, const PolicyFunctions& __restrict policy) {
1723
0
  AssertFullSoo(common, policy);
1724
0
  ResizeFullSooTable(
1725
0
      common, policy, NextCapacity(SooCapacity()),
1726
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
1727
0
}
1728
1729
}  // namespace
1730
1731
10
void* GetRefForEmptyClass(CommonFields& common) {
1732
  // Empty base optimization typically make the empty base class address to be
1733
  // the same as the first address of the derived class object.
1734
  // But we generally assume that for empty classes we can return any valid
1735
  // pointer.
1736
10
  return &common;
1737
10
}
1738
1739
void ResizeAllocatedTableWithSeedChange(
1740
    CommonFields& common, const PolicyFunctions& __restrict policy,
1741
0
    size_t new_capacity) {
1742
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
1743
0
      common, policy, new_capacity, common.infoz());
1744
0
}
1745
1746
void ReserveEmptyNonAllocatedTableToFitBucketCount(
1747
    CommonFields& common, const PolicyFunctions& __restrict policy,
1748
0
    size_t bucket_count) {
1749
0
  size_t new_capacity = NormalizeCapacity(bucket_count);
1750
0
  ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);
1751
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1752
0
                                   /*force_infoz=*/false);
1753
0
}
1754
1755
// Resizes a full SOO table to the NextCapacity(SooCapacity()).
1756
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
1757
size_t GrowSooTableToNextCapacityAndPrepareInsert(
1758
    CommonFields& common, const PolicyFunctions& __restrict policy,
1759
0
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1760
0
  AssertSoo(common, policy);
1761
0
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1762
    // The table is empty, it is only used for forced sampling of SOO tables.
1763
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1764
0
        common, policy, get_hash);
1765
0
  }
1766
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1767
0
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1768
0
  const size_t slot_size = policy.slot_size;
1769
0
  void* alloc = policy.get_char_alloc(common);
1770
0
  common.set_capacity(kNewCapacity);
1771
1772
  // Since the table is not empty, it will not be sampled.
1773
  // The decision to sample was already made during the first insertion.
1774
  //
1775
  // We do not set control and slots in CommonFields yet to avoid overriding
1776
  // SOO data.
1777
0
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1778
0
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1779
1780
0
  PrepareInsertCommon(common);
1781
0
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1782
0
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1783
0
  common.generate_new_seed(/*has_infoz=*/false);
1784
0
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1785
0
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1786
0
  const size_t new_hash = get_hash(common.seed().seed());
1787
1788
0
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1789
0
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1790
0
                                      new_ctrl);
1791
1792
0
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1793
0
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1794
0
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1795
0
  if constexpr (TransferUsesMemcpy) {
1796
    // Target slot is placed at index 1, but capacity is at
1797
    // minimum 3. So we are allowed to copy at least twice as much
1798
    // memory.
1799
0
    static_assert(SooSlotIndex() == 1);
1800
0
    static_assert(SooSlotMemcpySize > 0);
1801
0
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1802
0
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1803
0
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1804
0
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1805
0
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1806
0
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1807
0
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1808
0
  } else {
1809
0
    static_assert(SooSlotMemcpySize == 0);
1810
0
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1811
0
  }
1812
0
  common.set_control(new_ctrl);
1813
0
  common.set_slots(new_slots);
1814
1815
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1816
  // have been resized to the next capacity.
1817
0
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1818
0
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1819
0
                                slot_size);
1820
0
  return offset;
1821
0
}
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<0ul, false>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<1ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<4ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<8ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<16ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
1822
1823
void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
1824
0
            size_t n) {
1825
0
  const size_t cap = common.capacity();
1826
1827
0
  auto clear_backing_array = [&]() {
1828
0
    ClearBackingArray(common, policy, policy.get_char_alloc(common),
1829
0
                      /*reuse=*/false, policy.soo_enabled);
1830
0
  };
1831
1832
0
  const size_t slot_size = policy.slot_size;
1833
1834
0
  if (n == 0) {
1835
0
    if (cap <= policy.soo_capacity()) return;
1836
0
    if (common.empty()) {
1837
0
      clear_backing_array();
1838
0
      return;
1839
0
    }
1840
0
    if (common.size() <= policy.soo_capacity()) {
1841
      // When the table is already sampled, we keep it sampled.
1842
0
      if (common.infoz().IsSampled()) {
1843
0
        static constexpr size_t kInitialSampledCapacity =
1844
0
            NextCapacity(SooCapacity());
1845
0
        if (cap > kInitialSampledCapacity) {
1846
0
          ResizeAllocatedTableWithSeedChange(common, policy,
1847
0
                                             kInitialSampledCapacity);
1848
0
        }
1849
        // This asserts that we didn't lose sampling coverage in `resize`.
1850
0
        ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
1851
0
        return;
1852
0
      }
1853
0
      ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
1854
0
      ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
1855
0
      HeapOrSoo tmp_slot;
1856
0
      size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
1857
0
      policy.transfer_n(
1858
0
          &common, &tmp_slot,
1859
0
          SlotAddress(common.slot_array(), begin_offset, slot_size), 1);
1860
0
      clear_backing_array();
1861
0
      policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);
1862
0
      common.set_full_soo();
1863
0
      return;
1864
0
    }
1865
0
  }
1866
1867
0
  ValidateMaxSize(n, policy.slot_size);
1868
  // bitor is a faster way of doing `max` here. We will round up to the next
1869
  // power-of-2-minus-1, so bitor is good enough.
1870
0
  const size_t new_capacity =
1871
0
      NormalizeCapacity(n | SizeToCapacity(common.size()));
1872
  // n == 0 unconditionally rehashes as per the standard.
1873
0
  if (n == 0 || new_capacity > cap) {
1874
0
    if (cap == policy.soo_capacity()) {
1875
0
      if (common.empty()) {
1876
0
        ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1877
0
                                         /*force_infoz=*/false);
1878
0
      } else {
1879
0
        ResizeFullSooTable(common, policy, new_capacity,
1880
0
                           ResizeFullSooTableSamplingMode::kNoSampling);
1881
0
      }
1882
0
    } else {
1883
0
      ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1884
0
    }
1885
    // This is after resize, to ensure that we have completed the allocation
1886
    // and have potentially sampled the hashtable.
1887
0
    common.infoz().RecordReservation(n);
1888
0
  }
1889
0
}
1890
1891
void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,
1892
          const CommonFields& other,
1893
0
          absl::FunctionRef<void(void*, const void*)> copy_fn) {
1894
0
  const size_t size = other.size();
1895
0
  ABSL_SWISSTABLE_ASSERT(size > 0);
1896
0
  const size_t soo_capacity = policy.soo_capacity();
1897
0
  const size_t slot_size = policy.slot_size;
1898
0
  const bool soo_enabled = policy.soo_enabled;
1899
0
  if (size == 1) {
1900
0
    if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1);
1901
0
    IncrementSmallSize(common, policy);
1902
0
    const size_t other_capacity = other.capacity();
1903
0
    const void* other_slot =
1904
0
        other_capacity <= soo_capacity ? other.soo_data()
1905
0
        : other.is_small()
1906
0
            ? other.slot_array()
1907
0
            : SlotAddress(other.slot_array(),
1908
0
                          FindFirstFullSlot(0, other_capacity, other.control()),
1909
0
                          slot_size);
1910
0
    copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot);
1911
1912
0
    if (soo_enabled && policy.is_hashtablez_eligible &&
1913
0
        ShouldSampleNextTable()) {
1914
0
      GrowFullSooTableToNextCapacityForceSampling(common, policy);
1915
0
    }
1916
0
    return;
1917
0
  }
1918
1919
0
  ReserveTableToFitNewSize(common, policy, size);
1920
0
  auto infoz = common.infoz();
1921
0
  ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);
1922
0
  const size_t cap = common.capacity();
1923
0
  ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);
1924
0
  size_t offset = cap;
1925
0
  const void* hash_fn = policy.hash_fn(common);
1926
0
  auto hasher = policy.hash_slot;
1927
0
  const size_t seed = common.seed().seed();
1928
0
  IterateOverFullSlotsImpl(
1929
0
      other, slot_size, [&](const ctrl_t*, void* that_slot) {
1930
        // The table is guaranteed to be empty, so we can do faster than
1931
        // a full `insert`.
1932
0
        const size_t hash = (*hasher)(hash_fn, that_slot, seed);
1933
0
        FindInfo target = find_first_non_full(common, hash);
1934
0
        infoz.RecordInsertMiss(hash, target.probe_length);
1935
0
        offset = target.offset;
1936
0
        SetCtrl(common, offset, H2(hash), slot_size);
1937
0
        copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);
1938
0
        common.maybe_increment_generation_on_insert();
1939
0
      });
1940
0
  common.increment_size(size);
1941
0
  common.growth_info().OverwriteManyEmptyAsFull(size);
1942
0
}
1943
1944
void ReserveTableToFitNewSize(CommonFields& common,
1945
                              const PolicyFunctions& __restrict policy,
1946
0
                              size_t new_size) {
1947
0
  common.reset_reserved_growth(new_size);
1948
0
  common.set_reservation_size(new_size);
1949
0
  ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());
1950
0
  const size_t cap = common.capacity();
1951
0
  if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) {
1952
0
    return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
1953
0
  }
1954
1955
0
  ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
1956
0
  ABSL_SWISSTABLE_ASSERT(cap > 0);
1957
0
  const size_t max_size_before_growth =
1958
0
      IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();
1959
0
  if (new_size <= max_size_before_growth) {
1960
0
    return;
1961
0
  }
1962
0
  ReserveAllocatedTable(common, policy, new_size);
1963
0
}
1964
1965
namespace {
1966
size_t PrepareInsertLargeImpl(CommonFields& common,
1967
                              const PolicyFunctions& __restrict policy,
1968
                              size_t hash,
1969
                              Group::NonIterableBitMaskType mask_empty,
1970
12
                              FindInfo target_group) {
1971
12
  ABSL_SWISSTABLE_ASSERT(!common.is_small());
1972
12
  const GrowthInfo growth_info = common.growth_info();
1973
  // When there are no deleted slots in the table
1974
  // and growth_left is positive, we can insert at the first
1975
  // empty slot in the probe sequence (target).
1976
12
  if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) {
1977
4
    return PrepareInsertLargeSlow(common, policy, hash);
1978
4
  }
1979
8
  PrepareInsertCommon(common);
1980
8
  common.growth_info().OverwriteEmptyAsFull();
1981
8
  target_group.offset += mask_empty.LowestBitSet();
1982
8
  target_group.offset &= common.capacity();
1983
8
  SetCtrl(common, target_group.offset, H2(hash), policy.slot_size);
1984
8
  common.infoz().RecordInsertMiss(hash, target_group.probe_length);
1985
8
  return target_group.offset;
1986
12
}
1987
}  // namespace
1988
1989
size_t PrepareInsertLarge(CommonFields& common,
1990
                          const PolicyFunctions& __restrict policy, size_t hash,
1991
                          Group::NonIterableBitMaskType mask_empty,
1992
12
                          FindInfo target_group) {
1993
  // NOLINTNEXTLINE(misc-static-assert)
1994
12
  ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled());
1995
12
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
1996
12
}
1997
1998
size_t PrepareInsertLargeGenerationsEnabled(
1999
    CommonFields& common, const PolicyFunctions& policy, size_t hash,
2000
    Group::NonIterableBitMaskType mask_empty, FindInfo target_group,
2001
0
    absl::FunctionRef<size_t(size_t)> recompute_hash) {
2002
  // NOLINTNEXTLINE(misc-static-assert)
2003
0
  ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled());
2004
0
  if (common.should_rehash_for_bug_detection_on_insert()) {
2005
    // Move to a different heap allocation in order to detect bugs.
2006
0
    const size_t cap = common.capacity();
2007
0
    ResizeAllocatedTableWithSeedChange(
2008
0
        common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));
2009
0
    hash = recompute_hash(common.seed().seed());
2010
0
    std::tie(target_group, mask_empty) =
2011
0
        find_first_non_full_group(common, hash);
2012
0
  }
2013
0
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
2014
0
}
2015
2016
namespace {
2017
// Returns true if the following is true
2018
// 1. OptimalMemcpySizeForSooSlotTransfer(left) >
2019
//    OptimalMemcpySizeForSooSlotTransfer(left - 1)
2020
// 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,
2021
// right].
2022
// This function is used to verify that we have all the possible template
2023
// instantiations for GrowFullSooTableToNextCapacity.
2024
// With this verification the problem may be detected at compile time instead of
2025
// link time.
2026
constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,
2027
0
                                                              size_t right) {
2028
0
  size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);
2029
0
  if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) {
2030
0
    return false;
2031
0
  }
2032
0
  for (size_t i = left + 1; i <= right; ++i) {
2033
0
    if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) {
2034
0
      return false;
2035
0
    }
2036
0
  }
2037
0
  return true;
2038
0
}
2039
}  // namespace
2040
2041
// Extern template instantiation for inline function.
2042
template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
2043
                                              size_t old_capacity,
2044
                                              ctrl_t* new_ctrl,
2045
                                              size_t new_capacity);
2046
2047
// We need to instantiate ALL possible template combinations because we define
2048
// the function in the cc file.
2049
template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
2050
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2051
    bool);
2052
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2053
    OptimalMemcpySizeForSooSlotTransfer(1), true>(
2054
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2055
    bool);
2056
2057
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
2058
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2059
    OptimalMemcpySizeForSooSlotTransfer(3), true>(
2060
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2061
    bool);
2062
2063
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
2064
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2065
    OptimalMemcpySizeForSooSlotTransfer(8), true>(
2066
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2067
    bool);
2068
2069
#if UINTPTR_MAX == UINT32_MAX
2070
static_assert(MaxSooSlotSize() == 8);
2071
#else
2072
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
2073
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2074
    OptimalMemcpySizeForSooSlotTransfer(16), true>(
2075
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2076
    bool);
2077
static_assert(MaxSooSlotSize() == 16);
2078
#endif
2079
2080
template void* AllocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2081
                                    std::allocator<char>>(void* alloc,
2082
                                                          size_t n);
2083
template void DeallocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2084
                                     std::allocator<char>>(
2085
    void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
2086
    size_t slot_align, bool had_infoz);
2087
2088
}  // namespace container_internal
2089
ABSL_NAMESPACE_END
2090
}  // namespace absl