Coverage Report

Created: 2025-07-17 06:14

/src/abseil-cpp/absl/container/internal/raw_hash_set.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/container/internal/raw_hash_set.h"
16
17
#include <atomic>
18
#include <cassert>
19
#include <cstddef>
20
#include <cstdint>
21
#include <cstring>
22
#include <utility>
23
24
#include "absl/base/attributes.h"
25
#include "absl/base/config.h"
26
#include "absl/base/dynamic_annotations.h"
27
#include "absl/base/internal/endian.h"
28
#include "absl/base/internal/raw_logging.h"
29
#include "absl/base/optimization.h"
30
#include "absl/container/internal/container_memory.h"
31
#include "absl/container/internal/hashtable_control_bytes.h"
32
#include "absl/container/internal/hashtablez_sampler.h"
33
#include "absl/container/internal/raw_hash_set_resize_impl.h"
34
#include "absl/functional/function_ref.h"
35
#include "absl/hash/hash.h"
36
37
namespace absl {
38
ABSL_NAMESPACE_BEGIN
39
namespace container_internal {
40
41
// Represents a control byte corresponding to a full slot with arbitrary hash.
42
0
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
43
44
// A single control byte for default-constructed iterators. We leave it
45
// uninitialized because reading this memory is a bug.
46
ABSL_DLL ctrl_t kDefaultIterControl;
47
48
// We need one full byte followed by a sentinel byte for iterator::operator++.
49
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(),
50
                                                        ctrl_t::kSentinel};
51
52
namespace {
53
54
#ifdef ABSL_SWISSTABLE_ASSERT
55
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
56
#else
57
// We use this macro for assertions that users may see when the table is in an
58
// invalid state that sanitizers may help diagnose.
59
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
60
19.9M
  assert((CONDITION) && "Try enabling sanitizers.")
61
#endif
62
63
0
[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void HashTableSizeOverflow() {
64
0
  ABSL_RAW_LOG(FATAL, "Hash table size overflow");
65
0
}
66
67
0
void ValidateMaxSize(size_t size, size_t slot_size) {
68
0
  if (IsAboveValidSize(size, slot_size)) {
69
0
    HashTableSizeOverflow();
70
0
  }
71
0
}
72
73
// Returns "random" seed.
74
0
inline size_t RandomSeed() {
75
0
#ifdef ABSL_HAVE_THREAD_LOCAL
76
0
  static thread_local size_t counter = 0;
77
0
  size_t value = ++counter;
78
#else   // ABSL_HAVE_THREAD_LOCAL
79
  static std::atomic<size_t> counter(0);
80
  size_t value = counter.fetch_add(1, std::memory_order_relaxed);
81
#endif  // ABSL_HAVE_THREAD_LOCAL
82
0
  return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
83
0
}
84
85
0
bool ShouldRehashForBugDetection(size_t capacity) {
86
  // Note: we can't use the abseil-random library because abseil-random
87
  // depends on swisstable. We want to return true with probability
88
  // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
89
  // we probe based on a random hash and see if the offset is less than
90
  // RehashProbabilityConstant().
91
0
  return probe(capacity, absl::HashOf(RandomSeed())).offset() <
92
0
         RehashProbabilityConstant();
93
0
}
94
95
// Find a non-deterministic hash for single group table.
96
// Last two bits are used to find a position for a newly inserted element after
97
// resize.
98
// This function basically using H2 last bits to save on shift operation.
99
121k
size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
100
121k
  return hash ^ seed.seed();
101
121k
}
102
103
// Returns the offset of the new element after resize from capacity 1 to 3.
104
55.0k
size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) {
105
  // After resize from capacity 1 to 3, we always have exactly the slot with
106
  // index 1 occupied, so we need to insert either at index 0 or index 2.
107
55.0k
  static_assert(SooSlotIndex() == 1);
108
55.0k
  return SingleGroupTableH1(hash, seed) & 2;
109
55.0k
}
110
111
// Returns the address of the slot `i` iterations after `slot` assuming each
112
// slot has the specified size.
113
66.0k
inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) {
114
66.0k
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +
115
66.0k
                                 slot_size * i);
116
66.0k
}
117
118
// Returns the address of the slot just before `slot` assuming each slot has the
119
// specified size.
120
0
inline void* PrevSlot(void* slot, size_t slot_size) {
121
0
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
122
0
}
123
124
}  // namespace
125
126
0
GenerationType* EmptyGeneration() {
127
0
  if (SwisstableGenerationsEnabled()) {
128
0
    constexpr size_t kNumEmptyGenerations = 1024;
129
0
    static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
130
0
    return const_cast<GenerationType*>(
131
0
        &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
132
0
  }
133
0
  return nullptr;
134
0
}
135
136
bool CommonFieldsGenerationInfoEnabled::
137
0
    should_rehash_for_bug_detection_on_insert(size_t capacity) const {
138
0
  if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
139
0
  if (reserved_growth_ > 0) return false;
140
0
  return ShouldRehashForBugDetection(capacity);
141
0
}
142
143
bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
144
0
    size_t capacity) const {
145
0
  return ShouldRehashForBugDetection(capacity);
146
0
}
147
148
namespace {
149
150
FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
151
178k
                                     size_t capacity) {
152
178k
  auto seq = probe_h1(capacity, h1);
153
178k
  if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
154
91.3k
    return {seq.offset(), /*probe_length=*/0};
155
91.3k
  }
156
88.6k
  while (true) {
157
88.6k
    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
158
88.6k
    auto mask = g.MaskEmptyOrDeleted();
159
88.6k
    if (mask) {
160
87.3k
      return {seq.offset(mask.LowestBitSet()), seq.index()};
161
87.3k
    }
162
1.30k
    seq.next();
163
1.30k
    ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
164
1.30k
  }
165
87.3k
}
166
167
// Probes an array of control bits using a probe sequence derived from `hash`,
168
// and returns the offset corresponding to the first deleted or empty slot.
169
//
170
// Behavior when the entire table is full is undefined.
171
//
172
// NOTE: this function must work with tables having both empty and deleted
173
// slots in the same group. Such tables appear during `erase()`.
174
102k
FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
175
102k
  return find_first_non_full_from_h1(common.control(), H1(hash),
176
102k
                                     common.capacity());
177
102k
}
178
179
// Whether a table fits in half a group. A half-group table fits entirely into a
180
// probing group, i.e., has a capacity < `Group::kWidth`.
181
//
182
// In half-group mode we are able to use the whole capacity. The extra control
183
// bytes give us at least one "empty" control byte to stop the iteration.
184
// This is important to make 1 a valid capacity.
185
//
186
// In half-group mode only the first `capacity` control bytes after the sentinel
187
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
188
// represent a real slot.
189
0
constexpr bool is_half_group(size_t capacity) {
190
0
  return capacity < Group::kWidth - 1;
191
0
}
192
193
template <class Fn>
194
0
void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) {
195
0
  const size_t cap = c.capacity();
196
0
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap));
197
0
  const ctrl_t* ctrl = c.control();
198
0
  void* slot = c.slot_array();
199
0
  if (is_half_group(cap)) {
200
    // Mirrored/cloned control bytes in half-group table are also located in the
201
    // first group (starting from position 0). We are taking group from position
202
    // `capacity` in order to avoid duplicates.
203
204
    // Half-group tables capacity fits into portable group, where
205
    // GroupPortableImpl::MaskFull is more efficient for the
206
    // capacity <= GroupPortableImpl::kWidth.
207
0
    ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
208
0
                           "unexpectedly large half-group capacity");
209
0
    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
210
0
                  "unexpected group width");
211
    // Group starts from kSentinel slot, so indices in the mask will
212
    // be increased by 1.
213
0
    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
214
0
    --ctrl;
215
0
    slot = PrevSlot(slot, slot_size);
216
0
    for (uint32_t i : mask) {
217
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
218
0
    }
219
0
    return;
220
0
  }
221
0
  size_t remaining = c.size();
222
0
  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
223
0
  while (remaining != 0) {
224
0
    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
225
0
      ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
226
0
                             "hash table was modified unexpectedly");
227
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
228
0
      --remaining;
229
0
    }
230
0
    ctrl += Group::kWidth;
231
0
    slot = NextSlot(slot, slot_size, Group::kWidth);
232
0
    ABSL_SWISSTABLE_ASSERT(
233
0
        (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
234
0
        "hash table was modified unexpectedly");
235
0
  }
236
  // NOTE: erasure of the current element is allowed in callback for
237
  // absl::erase_if specialization. So we use `>=`.
238
0
  ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
239
0
                         "hash table was modified unexpectedly");
240
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)> >(absl::container_internal::CommonFields const&, unsigned long, absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)>)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0>(absl::container_internal::CommonFields const&, unsigned long, absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0)
241
242
}  // namespace
243
244
0
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
245
0
  ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
246
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
247
0
  for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
248
0
    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
249
0
  }
250
  // Copy the cloned ctrl bytes.
251
0
  std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
252
0
  ctrl[capacity] = ctrl_t::kSentinel;
253
0
}
254
255
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
256
0
                          absl::FunctionRef<void(const ctrl_t*, void*)> cb) {
257
0
  IterateOverFullSlotsImpl(c, slot_size, cb);
258
0
}
259
260
namespace {
261
262
354k
void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
263
354k
  growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
264
354k
}
265
266
185k
void ResetGrowthLeft(CommonFields& common) {
267
185k
  ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
268
185k
}
269
270
// Finds guaranteed to exists empty slot from the given position.
271
// NOTE: this function is almost never triggered inside of the
272
// DropDeletesWithoutResize, so we keep it simple.
273
// The table is rather sparse, so empty slot will be found very quickly.
274
0
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
275
0
  for (size_t i = start; i < end; ++i) {
276
0
    if (IsEmpty(ctrl[i])) {
277
0
      return i;
278
0
    }
279
0
  }
280
0
  ABSL_UNREACHABLE();
281
0
}
282
283
// Finds guaranteed to exist full slot starting from the given position.
284
// NOTE: this function is only triggered for rehash(0), when we need to
285
// go back to SOO state, so we keep it simple.
286
0
size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) {
287
0
  for (size_t i = start; i < end; ++i) {
288
0
    if (IsFull(ctrl[i])) {
289
0
      return i;
290
0
    }
291
0
  }
292
0
  ABSL_UNREACHABLE();
293
0
}
294
295
8.82M
void PrepareInsertCommon(CommonFields& common) {
296
8.82M
  common.increment_size();
297
8.82M
  common.maybe_increment_generation_on_insert();
298
8.82M
}
299
300
size_t DropDeletesWithoutResizeAndPrepareInsert(
301
    CommonFields& common, const PolicyFunctions& __restrict policy,
302
0
    size_t new_hash) {
303
0
  void* set = &common;
304
0
  void* slot_array = common.slot_array();
305
0
  const size_t capacity = common.capacity();
306
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
307
0
  ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
308
  // Algorithm:
309
  // - mark all DELETED slots as EMPTY
310
  // - mark all FULL slots as DELETED
311
  // - for each slot marked as DELETED
312
  //     hash = Hash(element)
313
  //     target = find_first_non_full(hash)
314
  //     if target is in the same group
315
  //       mark slot as FULL
316
  //     else if target is EMPTY
317
  //       transfer element to target
318
  //       mark slot as EMPTY
319
  //       mark target as FULL
320
  //     else if target is DELETED
321
  //       swap current element with target element
322
  //       mark target as FULL
323
  //       repeat procedure for current slot with moved from element (target)
324
0
  ctrl_t* ctrl = common.control();
325
0
  ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
326
0
  const void* hash_fn = policy.hash_fn(common);
327
0
  auto hasher = policy.hash_slot;
328
0
  auto transfer_n = policy.transfer_n;
329
0
  const size_t slot_size = policy.slot_size;
330
331
0
  size_t total_probe_length = 0;
332
0
  void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
333
334
  // The index of an empty slot that can be used as temporary memory for
335
  // the swap operation.
336
0
  constexpr size_t kUnknownId = ~size_t{};
337
0
  size_t tmp_space_id = kUnknownId;
338
339
0
  for (size_t i = 0; i != capacity;
340
0
       ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
341
0
    ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
342
0
    if (IsEmpty(ctrl[i])) {
343
0
      tmp_space_id = i;
344
0
      continue;
345
0
    }
346
0
    if (!IsDeleted(ctrl[i])) continue;
347
0
    const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed());
348
0
    const FindInfo target = find_first_non_full(common, hash);
349
0
    const size_t new_i = target.offset;
350
0
    total_probe_length += target.probe_length;
351
352
    // Verify if the old and new i fall within the same group wrt the hash.
353
    // If they do, we don't need to move the object as it falls already in the
354
    // best probe we can.
355
0
    const size_t probe_offset = probe(common, hash).offset();
356
0
    const h2_t h2 = H2(hash);
357
0
    const auto probe_index = [probe_offset, capacity](size_t pos) {
358
0
      return ((pos - probe_offset) & capacity) / Group::kWidth;
359
0
    };
360
361
    // Element doesn't move.
362
0
    if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
363
0
      SetCtrlInLargeTable(common, i, h2, slot_size);
364
0
      continue;
365
0
    }
366
367
0
    void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
368
0
    if (IsEmpty(ctrl[new_i])) {
369
      // Transfer element to the empty spot.
370
      // SetCtrl poisons/unpoisons the slots so we have to call it at the
371
      // right time.
372
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
373
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
374
0
      SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);
375
      // Initialize or change empty space id.
376
0
      tmp_space_id = i;
377
0
    } else {
378
0
      ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
379
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
380
      // Until we are done rehashing, DELETED marks previously FULL slots.
381
382
0
      if (tmp_space_id == kUnknownId) {
383
0
        tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
384
0
      }
385
0
      void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
386
0
      SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
387
388
      // Swap i and new_i elements.
389
0
      (*transfer_n)(set, tmp_space, new_slot_ptr, 1);
390
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
391
0
      (*transfer_n)(set, slot_ptr, tmp_space, 1);
392
393
0
      SanitizerPoisonMemoryRegion(tmp_space, slot_size);
394
395
      // repeat the processing of the ith slot
396
0
      --i;
397
0
      slot_ptr = PrevSlot(slot_ptr, slot_size);
398
0
    }
399
0
  }
400
  // Prepare insert for the new element.
401
0
  PrepareInsertCommon(common);
402
0
  ResetGrowthLeft(common);
403
0
  FindInfo find_info = find_first_non_full(common, new_hash);
404
0
  SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);
405
0
  common.infoz().RecordInsert(new_hash, find_info.probe_length);
406
0
  common.infoz().RecordRehash(total_probe_length);
407
0
  return find_info.offset;
408
0
}
409
410
0
bool WasNeverFull(CommonFields& c, size_t index) {
411
0
  if (is_single_group(c.capacity())) {
412
0
    return true;
413
0
  }
414
0
  const size_t index_before = (index - Group::kWidth) & c.capacity();
415
0
  const auto empty_after = Group(c.control() + index).MaskEmpty();
416
0
  const auto empty_before = Group(c.control() + index_before).MaskEmpty();
417
418
  // We count how many consecutive non empties we have to the right and to the
419
  // left of `it`. If the sum is >= kWidth then there is at least one probe
420
  // window that might have seen a full group.
421
0
  return empty_before && empty_after &&
422
0
         static_cast<size_t>(empty_after.TrailingZeros()) +
423
0
                 empty_before.LeadingZeros() <
424
0
             Group::kWidth;
425
0
}
426
427
// Updates the control bytes to indicate a completely empty table such that all
428
// control bytes are kEmpty except for the kSentinel byte.
429
185k
void ResetCtrl(CommonFields& common, size_t slot_size) {
430
185k
  const size_t capacity = common.capacity();
431
185k
  ctrl_t* ctrl = common.control();
432
185k
  static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
433
185k
  if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
434
171k
    if (IsSmallCapacity(capacity)) return;
435
171k
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
436
171k
    std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
437
171k
                Group::kWidth);
438
171k
    if (capacity == kTwoGroupCapacity) {
439
79.8k
      std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),
440
79.8k
                  Group::kWidth);
441
79.8k
    }
442
171k
  } else {
443
14.0k
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
444
14.0k
                capacity + 1 + NumClonedBytes());
445
14.0k
  }
446
185k
  ctrl[capacity] = ctrl_t::kSentinel;
447
185k
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
448
185k
}
449
450
// Initializes control bytes for growing from capacity 1 to 3.
451
// `orig_h2` is placed in the position `SooSlotIndex()`.
452
// `new_h2` is placed in the position `new_offset`.
453
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes(
454
55.0k
    h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) {
455
55.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
456
55.0k
  static_assert(kNewCapacity == 3);
457
55.0k
  static_assert(is_single_group(kNewCapacity));
458
55.0k
  static_assert(SooSlotIndex() == 1);
459
55.0k
  ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
460
461
55.0k
  static constexpr uint64_t kEmptyXorSentinel =
462
55.0k
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
463
55.0k
      static_cast<uint8_t>(ctrl_t::kSentinel);
464
55.0k
  static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
465
55.0k
  static constexpr size_t kMirroredSooSlotIndex =
466
55.0k
      SooSlotIndex() + kNewCapacity + 1;
467
  // The first 8 bytes, where SOO slot original and mirrored positions are
468
  // replaced with 0.
469
  // Result will look like: E0ESE0EE
470
55.0k
  static constexpr uint64_t kFirstCtrlBytesWithZeroes =
471
55.0k
      k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
472
55.0k
      (kEmptyXorSentinel << (8 * kNewCapacity)) ^
473
55.0k
      (kEmpty64 << (8 * kMirroredSooSlotIndex));
474
475
55.0k
  const uint64_t soo_h2 = static_cast<uint64_t>(orig_h2);
476
55.0k
  const uint64_t new_h2_xor_empty =
477
55.0k
      static_cast<uint64_t>(new_h2 ^ static_cast<uint8_t>(ctrl_t::kEmpty));
478
  // Fill the original and mirrored bytes for SOO slot.
479
  // Result will look like:
480
  // EHESEHEE
481
  // Where H = soo_h2, E = kEmpty, S = kSentinel.
482
55.0k
  uint64_t first_ctrl_bytes =
483
55.0k
      ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
484
55.0k
      (soo_h2 << (8 * kMirroredSooSlotIndex));
485
  // Replace original and mirrored empty bytes for the new position.
486
  // Result for new_offset 0 will look like:
487
  // NHESNHEE
488
  // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
489
  // Result for new_offset 2 will look like:
490
  // EHNSEHNE
491
55.0k
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
492
55.0k
  size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
493
55.0k
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
494
495
  // Fill last bytes with kEmpty.
496
55.0k
  std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
497
55.0k
              Group::kWidth);
498
  // Overwrite the first 8 bytes with first_ctrl_bytes.
499
55.0k
  absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
500
501
  // Example for group size 16:
502
  // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE
503
  // new_offset 0:
504
  // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE
505
  // new_offset 2:
506
  // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE
507
508
  // Example for group size 8:
509
  // new_ctrl after 1st memset =      ???EEEEEEEE
510
  // new_offset 0:
511
  // new_ctrl after 2nd store  =      NHESNHEEEEE
512
  // new_offset 2:
513
  // new_ctrl after 2nd store  =      EHNSEHNEEEE
514
55.0k
}
515
516
}  // namespace
517
518
0
void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) {
519
0
  ABSL_SWISSTABLE_ASSERT(c.is_small());
520
0
  if (soo_enabled) {
521
0
    c.set_empty_soo();
522
0
    return;
523
0
  }
524
0
  c.decrement_size();
525
0
  c.infoz().RecordErase();
526
0
  SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);
527
0
}
528
529
0
void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) {
530
0
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
531
0
  ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");
532
0
  c.decrement_size();
533
0
  c.infoz().RecordErase();
534
535
0
  size_t index = static_cast<size_t>(ctrl - c.control());
536
537
0
  if (WasNeverFull(c, index)) {
538
0
    SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
539
0
    c.growth_info().OverwriteFullAsEmpty();
540
0
    return;
541
0
  }
542
543
0
  c.growth_info().OverwriteFullAsDeleted();
544
0
  SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);
545
0
}
546
547
void ClearBackingArray(CommonFields& c,
548
                       const PolicyFunctions& __restrict policy, void* alloc,
549
202k
                       bool reuse, bool soo_enabled) {
550
202k
  if (reuse) {
551
185k
    c.set_size_to_zero();
552
185k
    ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
553
185k
    ResetCtrl(c, policy.slot_size);
554
185k
    ResetGrowthLeft(c);
555
185k
    c.infoz().RecordStorageChanged(0, c.capacity());
556
185k
  } else {
557
    // We need to record infoz before calling dealloc, which will unregister
558
    // infoz.
559
16.3k
    c.infoz().RecordClearedReservation();
560
16.3k
    c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
561
16.3k
    c.infoz().Unregister();
562
16.3k
    (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,
563
16.3k
                      policy.slot_align, c.has_infoz());
564
16.3k
    c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}};
565
16.3k
  }
566
202k
}
567
568
namespace {
569
570
enum class ResizeNonSooMode {
571
  kGuaranteedEmpty,
572
  kGuaranteedAllocated,
573
};
574
575
// Iterates over full slots in old table, finds new positions for them and
576
// transfers the slots.
577
// This function is used for reserving or rehashing non-empty tables.
578
// This use case is rare so the function is type erased.
579
// Returns the total probe length.
580
size_t FindNewPositionsAndTransferSlots(
581
    CommonFields& common, const PolicyFunctions& __restrict policy,
582
0
    ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) {
583
0
  void* new_slots = common.slot_array();
584
0
  const void* hash_fn = policy.hash_fn(common);
585
0
  const size_t slot_size = policy.slot_size;
586
0
  const size_t seed = common.seed().seed();
587
588
0
  const auto insert_slot = [&](void* slot) {
589
0
    size_t hash = policy.hash_slot(hash_fn, slot, seed);
590
0
    FindInfo target;
591
0
    if (common.is_small()) {
592
0
      target = FindInfo{0, 0};
593
0
    } else {
594
0
      target = find_first_non_full(common, hash);
595
0
      SetCtrl(common, target.offset, H2(hash), slot_size);
596
0
    }
597
0
    policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
598
0
                      slot, 1);
599
0
    return target.probe_length;
600
0
  };
601
0
  if (IsSmallCapacity(old_capacity)) {
602
0
    if (common.size() == 1) insert_slot(old_slots);
603
0
    return 0;
604
0
  }
605
0
  size_t total_probe_length = 0;
606
0
  for (size_t i = 0; i < old_capacity; ++i) {
607
0
    if (IsFull(old_ctrl[i])) {
608
0
      total_probe_length += insert_slot(old_slots);
609
0
    }
610
0
    old_slots = NextSlot(old_slots, slot_size);
611
0
  }
612
0
  return total_probe_length;
613
0
}
614
615
void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,
616
                             size_t hash, size_t total_probe_length,
617
0
                             size_t distance_from_desired) {
618
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
619
0
  infoz.RecordStorageChanged(common.size() - 1, common.capacity());
620
0
  infoz.RecordRehash(total_probe_length);
621
0
  infoz.RecordInsert(hash, distance_from_desired);
622
0
  common.set_has_infoz();
623
  // TODO(b/413062340): we could potentially store infoz in place of the
624
  // control pointer for the capacity 1 case.
625
0
  common.set_infoz(infoz);
626
0
}
627
628
// Specialization to avoid passing two 0s from hot function.
629
ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(
630
0
    CommonFields& common, HashtablezInfoHandle infoz, size_t hash) {
631
0
  ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,
632
0
                          /*distance_from_desired=*/0);
633
0
}
634
635
ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,
636
                                                 HashtablezInfoHandle infoz,
637
                                                 size_t hash,
638
                                                 size_t total_probe_length,
639
0
                                                 size_t distance_from_desired) {
640
0
  ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,
641
0
                          distance_from_desired);
642
0
}
643
644
ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,
645
                                                 HashtablezInfoHandle infoz,
646
0
                                                 size_t total_probe_length) {
647
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
648
0
  infoz.RecordStorageChanged(common.size(), common.capacity());
649
0
  infoz.RecordRehash(total_probe_length);
650
0
  common.set_has_infoz();
651
0
  common.set_infoz(infoz);
652
0
}
653
654
struct BackingArrayPtrs {
655
  ctrl_t* ctrl;
656
  void* slots;
657
};
658
659
BackingArrayPtrs AllocBackingArray(CommonFields& common,
660
                                   const PolicyFunctions& __restrict policy,
661
                                   size_t new_capacity, bool has_infoz,
662
223k
                                   void* alloc) {
663
223k
  RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align,
664
223k
                          has_infoz);
665
223k
  char* mem = static_cast<char*>(policy.alloc(alloc, layout.alloc_size()));
666
223k
  const GenerationType old_generation = common.generation();
667
223k
  common.set_generation_ptr(
668
223k
      reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
669
223k
  common.set_generation(NextGeneration(old_generation));
670
671
223k
  return {reinterpret_cast<ctrl_t*>(mem + layout.control_offset()),
672
223k
          mem + layout.slot_offset()};
673
223k
}
674
675
template <ResizeNonSooMode kMode>
676
void ResizeNonSooImpl(CommonFields& common,
677
                      const PolicyFunctions& __restrict policy,
678
0
                      size_t new_capacity, HashtablezInfoHandle infoz) {
679
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
680
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
681
682
0
  const size_t old_capacity = common.capacity();
683
0
  [[maybe_unused]] ctrl_t* old_ctrl;
684
0
  [[maybe_unused]] void* old_slots;
685
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
686
0
    old_ctrl = common.control();
687
0
    old_slots = common.slot_array();
688
0
  }
689
690
0
  const size_t slot_size = policy.slot_size;
691
0
  const size_t slot_align = policy.slot_align;
692
0
  const bool has_infoz = infoz.IsSampled();
693
0
  void* alloc = policy.get_char_alloc(common);
694
695
0
  common.set_capacity(new_capacity);
696
0
  const auto [new_ctrl, new_slots] =
697
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
698
0
  common.set_control(new_ctrl);
699
0
  common.set_slots(new_slots);
700
0
  common.generate_new_seed(has_infoz);
701
702
0
  size_t total_probe_length = 0;
703
0
  ResetCtrl(common, slot_size);
704
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
705
0
                         old_capacity == policy.soo_capacity());
706
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
707
0
                         old_capacity > 0);
708
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
709
0
    total_probe_length = FindNewPositionsAndTransferSlots(
710
0
        common, policy, old_ctrl, old_slots, old_capacity);
711
0
    (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
712
0
                      has_infoz);
713
0
    ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
714
0
                    common.size());
715
0
  } else {
716
0
    GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
717
0
        CapacityToGrowth(new_capacity));
718
0
  }
719
720
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
721
0
    ReportResizeToInfoz(common, infoz, total_probe_length);
722
0
  }
723
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)0>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)1>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
724
725
void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
726
                                      const PolicyFunctions& __restrict policy,
727
0
                                      size_t new_capacity, bool force_infoz) {
728
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
729
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
730
0
  ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);
731
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());
732
0
  ABSL_SWISSTABLE_ASSERT(common.empty());
733
0
  const size_t slot_size = policy.slot_size;
734
0
  HashtablezInfoHandle infoz;
735
0
  const bool should_sample =
736
0
      policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
737
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
738
0
    infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
739
0
                            policy.soo_capacity());
740
0
  }
741
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,
742
0
                                                       new_capacity, infoz);
743
0
}
744
745
// If the table was SOO, initializes new control bytes and transfers slot.
746
// After transferring the slot, sets control and slots in CommonFields.
747
// It is rare to resize an SOO table with one element to a large size.
748
// Requires: `c` contains SOO data.
749
void InsertOldSooSlotAndInitializeControlBytes(
750
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl,
751
0
    void* new_slots, bool has_infoz) {
752
0
  ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());
753
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
754
0
  size_t new_capacity = c.capacity();
755
756
0
  c.generate_new_seed(has_infoz);
757
758
0
  const size_t soo_slot_hash =
759
0
      policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed());
760
0
  size_t offset = probe(new_capacity, soo_slot_hash).offset();
761
0
  offset = offset == new_capacity ? 0 : offset;
762
0
  SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
763
0
  void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
764
0
  SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
765
0
  policy.transfer_n(&c, target_slot, c.soo_data(), 1);
766
0
  c.set_control(new_ctrl);
767
0
  c.set_slots(new_slots);
768
0
  ResetCtrl(c, policy.slot_size);
769
0
  SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size);
770
0
}
771
772
enum class ResizeFullSooTableSamplingMode {
773
  kNoSampling,
774
  // Force sampling. If the table was still not sampled, do not resize.
775
  kForceSampleNoResizeIfUnsampled,
776
};
777
778
void AssertSoo([[maybe_unused]] CommonFields& common,
779
55.0k
               [[maybe_unused]] const PolicyFunctions& policy) {
780
55.0k
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
781
55.0k
  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());
782
55.0k
}
783
void AssertFullSoo([[maybe_unused]] CommonFields& common,
784
0
                   [[maybe_unused]] const PolicyFunctions& policy) {
785
0
  AssertSoo(common, policy);
786
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
787
0
}
788
789
void ResizeFullSooTable(CommonFields& common,
790
                        const PolicyFunctions& __restrict policy,
791
                        size_t new_capacity,
792
0
                        ResizeFullSooTableSamplingMode sampling_mode) {
793
0
  AssertFullSoo(common, policy);
794
0
  const size_t slot_size = policy.slot_size;
795
0
  void* alloc = policy.get_char_alloc(common);
796
797
0
  HashtablezInfoHandle infoz;
798
0
  bool has_infoz = false;
799
0
  if (sampling_mode ==
800
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) {
801
0
    if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) {
802
0
      infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
803
0
                              policy.soo_capacity());
804
0
    }
805
806
0
    if (!infoz.IsSampled()) return;
807
0
    has_infoz = true;
808
0
  }
809
810
0
  common.set_capacity(new_capacity);
811
812
  // We do not set control and slots in CommonFields yet to avoid overriding
813
  // SOO data.
814
0
  const auto [new_ctrl, new_slots] =
815
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
816
817
0
  InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots,
818
0
                                            has_infoz);
819
0
  ResetGrowthLeft(common);
820
0
  if (has_infoz) {
821
0
    common.set_has_infoz();
822
0
    common.set_infoz(infoz);
823
0
    infoz.RecordStorageChanged(common.size(), new_capacity);
824
0
  }
825
0
}
826
827
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,
828
                                            size_t old_capacity,
829
                                            ctrl_t* __restrict new_ctrl,
830
66.0k
                                            size_t new_capacity) {
831
66.0k
  ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
832
66.0k
  constexpr size_t kHalfWidth = Group::kWidth / 2;
833
66.0k
  ABSL_ASSUME(old_capacity < kHalfWidth);
834
66.0k
  ABSL_ASSUME(old_capacity > 0);
835
66.0k
  static_assert(Group::kWidth == 8 || Group::kWidth == 16,
836
66.0k
                "Group size is not supported.");
837
838
  // NOTE: operations are done with compile time known size = 8.
839
  // Compiler optimizes that into single ASM operation.
840
841
  // Load the bytes from old_capacity. This contains
842
  // - the sentinel byte
843
  // - all the old control bytes
844
  // - the rest is filled with kEmpty bytes
845
  // Example:
846
  // old_ctrl =     012S012EEEEEEEEE...
847
  // copied_bytes = S012EEEE
848
66.0k
  uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);
849
850
  // We change the sentinel byte to kEmpty before storing to both the start of
851
  // the new_ctrl, and past the end of the new_ctrl later for the new cloned
852
  // bytes. Note that this is faster than setting the sentinel byte to kEmpty
853
  // after the copy directly in new_ctrl because we are limited on store
854
  // bandwidth.
855
66.0k
  static constexpr uint64_t kEmptyXorSentinel =
856
66.0k
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
857
66.0k
      static_cast<uint8_t>(ctrl_t::kSentinel);
858
859
  // Replace the first byte kSentinel with kEmpty.
860
  // Resulting bytes will be shifted by one byte old control blocks.
861
  // Example:
862
  // old_ctrl = 012S012EEEEEEEEE...
863
  // before =   S012EEEE
864
  // after  =   E012EEEE
865
66.0k
  copied_bytes ^= kEmptyXorSentinel;
866
867
66.0k
  if (Group::kWidth == 8) {
868
    // With group size 8, we can grow with two write operations.
869
0
    ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
870
0
                           "old_capacity is too large for group size 8");
871
0
    absl::little_endian::Store64(new_ctrl, copied_bytes);
872
873
0
    static constexpr uint64_t kSentinal64 =
874
0
        static_cast<uint8_t>(ctrl_t::kSentinel);
875
876
    // Prepend kSentinel byte to the beginning of copied_bytes.
877
    // We have maximum 3 non-empty bytes at the beginning of copied_bytes for
878
    // group size 8.
879
    // Example:
880
    // old_ctrl = 012S012EEEE
881
    // before =   E012EEEE
882
    // after  =   SE012EEE
883
0
    copied_bytes = (copied_bytes << 8) ^ kSentinal64;
884
0
    absl::little_endian::Store64(new_ctrl + new_capacity, copied_bytes);
885
    // Example for capacity 3:
886
    // old_ctrl = 012S012EEEE
887
    // After the first store:
888
    //           >!
889
    // new_ctrl = E012EEEE???????
890
    // After the second store:
891
    //                  >!
892
    // new_ctrl = E012EEESE012EEE
893
0
    return;
894
0
  }
895
896
66.0k
  ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);  // NOLINT(misc-static-assert)
897
898
  // Fill the second half of the main control bytes with kEmpty.
899
  // For small capacity that may write into mirrored control bytes.
900
  // It is fine as we will overwrite all the bytes later.
901
66.0k
  std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
902
66.0k
              kHalfWidth);
903
  // Fill the second half of the mirrored control bytes with kEmpty.
904
66.0k
  std::memset(new_ctrl + new_capacity + kHalfWidth,
905
66.0k
              static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
906
  // Copy the first half of the non-mirrored control bytes.
907
66.0k
  absl::little_endian::Store64(new_ctrl, copied_bytes);
908
66.0k
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
909
  // Copy the first half of the mirrored control bytes.
910
66.0k
  absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
911
912
  // Example for growth capacity 1->3:
913
  // old_ctrl =                  0S0EEEEEEEEEEEEEE
914
  // new_ctrl at the end =       E0ESE0EEEEEEEEEEEEE
915
  //                                    >!
916
  // new_ctrl after 1st memset = ????????EEEEEEEE???
917
  //                                       >!
918
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEE
919
  //                            >!
920
  // new_ctrl after 1st store =  E0EEEEEEEEEEEEEEEEE
921
  // new_ctrl after kSentinel =  E0ESEEEEEEEEEEEEEEE
922
  //                                >!
923
  // new_ctrl after 2nd store =  E0ESE0EEEEEEEEEEEEE
924
925
  // Example for growth capacity 3->7:
926
  // old_ctrl =                  012S012EEEEEEEEEEEE
927
  // new_ctrl at the end =       E012EEESE012EEEEEEEEEEE
928
  //                                    >!
929
  // new_ctrl after 1st memset = ????????EEEEEEEE???????
930
  //                                           >!
931
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEEEEEE
932
  //                            >!
933
  // new_ctrl after 1st store =  E012EEEEEEEEEEEEEEEEEEE
934
  // new_ctrl after kSentinel =  E012EEESEEEEEEEEEEEEEEE
935
  //                                >!
936
  // new_ctrl after 2nd store =  E012EEESE012EEEEEEEEEEE
937
938
  // Example for growth capacity 7->15:
939
  // old_ctrl =                  0123456S0123456EEEEEEEE
940
  // new_ctrl at the end =       E0123456EEEEEEESE0123456EEEEEEE
941
  //                                    >!
942
  // new_ctrl after 1st memset = ????????EEEEEEEE???????????????
943
  //                                                   >!
944
  // new_ctrl after 2nd memset = ????????EEEEEEEE???????EEEEEEEE
945
  //                            >!
946
  // new_ctrl after 1st store =  E0123456EEEEEEEE???????EEEEEEEE
947
  // new_ctrl after kSentinel =  E0123456EEEEEEES???????EEEEEEEE
948
  //                                            >!
949
  // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE
950
66.0k
}
951
952
// Size of the buffer we allocate on stack for storing probed elements in
953
// GrowToNextCapacity algorithm.
954
constexpr size_t kProbedElementsBufferSize = 512;
955
956
// Decodes information about probed elements from contiguous memory.
957
// Finds new position for each element and transfers it to the new slots.
958
// Returns the total probe length.
959
template <typename ProbedItem>
960
ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
961
    CommonFields& c, const PolicyFunctions& __restrict policy,
962
31.4k
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
963
31.4k
  const size_t new_capacity = c.capacity();
964
965
31.4k
  void* new_slots = c.slot_array();
966
31.4k
  ctrl_t* new_ctrl = c.control();
967
31.4k
  size_t total_probe_length = 0;
968
969
31.4k
  const size_t slot_size = policy.slot_size;
970
31.4k
  auto transfer_n = policy.transfer_n;
971
972
107k
  for (; start < end; ++start) {
973
76.5k
    const FindInfo target = find_first_non_full_from_h1(
974
76.5k
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
975
76.5k
    total_probe_length += target.probe_length;
976
76.5k
    const size_t old_index = static_cast<size_t>(start->source_offset);
977
76.5k
    const size_t new_i = target.offset;
978
76.5k
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
979
76.5k
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
980
76.5k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
981
76.5k
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
982
76.5k
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
983
76.5k
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
984
76.5k
    transfer_n(&c, dst_slot, src_slot, 1);
985
76.5k
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
986
76.5k
  }
987
31.4k
  return total_probe_length;
988
31.4k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned int, 32ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, void*)
Line
Count
Source
962
31.4k
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
963
31.4k
  const size_t new_capacity = c.capacity();
964
965
31.4k
  void* new_slots = c.slot_array();
966
31.4k
  ctrl_t* new_ctrl = c.control();
967
31.4k
  size_t total_probe_length = 0;
968
969
31.4k
  const size_t slot_size = policy.slot_size;
970
31.4k
  auto transfer_n = policy.transfer_n;
971
972
107k
  for (; start < end; ++start) {
973
76.5k
    const FindInfo target = find_first_non_full_from_h1(
974
76.5k
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
975
76.5k
    total_probe_length += target.probe_length;
976
76.5k
    const size_t old_index = static_cast<size_t>(start->source_offset);
977
76.5k
    const size_t new_i = target.offset;
978
76.5k
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
979
76.5k
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
980
76.5k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
981
76.5k
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
982
76.5k
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
983
76.5k
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
984
76.5k
    transfer_n(&c, dst_slot, src_slot, 1);
985
76.5k
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
986
76.5k
  }
987
31.4k
  return total_probe_length;
988
31.4k
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 64ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 122ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, void*)
989
990
// Sentinel value for the start of marked elements.
991
// Signals that there are no marked elements.
992
constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
993
994
// Process probed elements that did not fit into available buffers.
995
// We marked them in control bytes as kSentinel.
996
// Hash recomputation and full probing is done here.
997
// This use case should be extremely rare.
998
ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(
999
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,
1000
0
    void* old_slots, size_t start) {
1001
0
  size_t old_capacity = PreviousCapacity(c.capacity());
1002
0
  const size_t slot_size = policy.slot_size;
1003
0
  void* new_slots = c.slot_array();
1004
0
  size_t total_probe_length = 0;
1005
0
  const void* hash_fn = policy.hash_fn(c);
1006
0
  auto hash_slot = policy.hash_slot;
1007
0
  auto transfer_n = policy.transfer_n;
1008
0
  const size_t seed = c.seed().seed();
1009
0
  for (size_t old_index = start; old_index < old_capacity; ++old_index) {
1010
0
    if (old_ctrl[old_index] != ctrl_t::kSentinel) {
1011
0
      continue;
1012
0
    }
1013
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1014
0
    const size_t hash = hash_slot(hash_fn, src_slot, seed);
1015
0
    const FindInfo target = find_first_non_full(c, hash);
1016
0
    total_probe_length += target.probe_length;
1017
0
    const size_t new_i = target.offset;
1018
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1019
0
    SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
1020
0
    transfer_n(&c, dst_slot, src_slot, 1);
1021
0
  }
1022
0
  return total_probe_length;
1023
0
}
1024
1025
// The largest old capacity for which it is guaranteed that all probed elements
1026
// fit in ProbedItemEncoder's local buffer.
1027
// For such tables, `encode_probed_element` is trivial.
1028
constexpr size_t kMaxLocalBufferOldCapacity =
1029
    kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
1030
static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
1031
constexpr size_t kMaxLocalBufferNewCapacity =
1032
    NextCapacity(kMaxLocalBufferOldCapacity);
1033
static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
1034
static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
1035
              ProbedItem4Bytes::kMaxNewCapacity);
1036
1037
// Initializes mirrored control bytes after
1038
// transfer_unprobed_elements_to_next_capacity.
1039
102k
void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
1040
102k
  std::memcpy(new_ctrl + new_capacity,
1041
              // We own GrowthInfo just before control bytes. So it is ok
1042
              // to read one byte from it.
1043
102k
              new_ctrl - 1, Group::kWidth);
1044
102k
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1045
102k
}
1046
1047
// Encodes probed elements into available memory.
1048
// At first, a local (on stack) buffer is used. The size of the buffer is
1049
// kProbedElementsBufferSize bytes.
1050
// When the local buffer is full, we switch to `control_` buffer. We are allowed
1051
// to overwrite `control_` buffer till the `source_offset` byte. In case we have
1052
// no space in `control_` buffer, we fallback to a naive algorithm for all the
1053
// rest of the probed elements. We mark elements as kSentinel in control bytes
1054
// and later process them fully. See ProcessMarkedElements for details. It
1055
// should be extremely rare.
1056
template <typename ProbedItemType,
1057
          // If true, we only use the local buffer and never switch to the
1058
          // control buffer.
1059
          bool kGuaranteedFitToBuffer = false>
1060
class ProbedItemEncoder {
1061
 public:
1062
  using ProbedItem = ProbedItemType;
1063
102k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Line
Count
Source
1063
85.8k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Line
Count
Source
1063
16.3k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
1064
1065
  // Encode item into the best available location.
1066
76.5k
  void EncodeItem(ProbedItem item) {
1067
76.5k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1068
0
      return ProcessEncodeWithOverflow(item);
1069
0
    }
1070
76.5k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1071
76.5k
    *pos_ = item;
1072
76.5k
    ++pos_;
1073
76.5k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Line
Count
Source
1066
47.3k
  void EncodeItem(ProbedItem item) {
1067
47.3k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1068
0
      return ProcessEncodeWithOverflow(item);
1069
0
    }
1070
47.3k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1071
47.3k
    *pos_ = item;
1072
47.3k
    ++pos_;
1073
47.3k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Line
Count
Source
1066
29.2k
  void EncodeItem(ProbedItem item) {
1067
29.2k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1068
0
      return ProcessEncodeWithOverflow(item);
1069
0
    }
1070
29.2k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1071
29.2k
    *pos_ = item;
1072
29.2k
    ++pos_;
1073
29.2k
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
1074
1075
  // Decodes information about probed elements from all available sources.
1076
  // Finds new position for each element and transfers it to the new slots.
1077
  // Returns the total probe length.
1078
  size_t DecodeAndInsertToTable(CommonFields& common,
1079
                                const PolicyFunctions& __restrict policy,
1080
102k
                                void* old_slots) const {
1081
102k
    if (pos_ == buffer_) {
1082
70.7k
      return 0;
1083
70.7k
    }
1084
31.4k
    if constexpr (kGuaranteedFitToBuffer) {
1085
23.0k
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1086
23.0k
    }
1087
0
    size_t total_probe_length = DecodeAndInsertImpl(
1088
31.4k
        common, policy, buffer_,
1089
31.4k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1090
31.4k
    if (!local_buffer_full_) {
1091
8.34k
      return total_probe_length;
1092
8.34k
    }
1093
23.0k
    total_probe_length +=
1094
23.0k
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1095
23.0k
    return total_probe_length;
1096
31.4k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Line
Count
Source
1080
85.8k
                                void* old_slots) const {
1081
85.8k
    if (pos_ == buffer_) {
1082
62.7k
      return 0;
1083
62.7k
    }
1084
23.0k
    if constexpr (kGuaranteedFitToBuffer) {
1085
23.0k
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1086
23.0k
    }
1087
0
    size_t total_probe_length = DecodeAndInsertImpl(
1088
23.0k
        common, policy, buffer_,
1089
23.0k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1090
23.0k
    if (!local_buffer_full_) {
1091
0
      return total_probe_length;
1092
0
    }
1093
23.0k
    total_probe_length +=
1094
23.0k
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1095
23.0k
    return total_probe_length;
1096
23.0k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Line
Count
Source
1080
16.3k
                                void* old_slots) const {
1081
16.3k
    if (pos_ == buffer_) {
1082
7.96k
      return 0;
1083
7.96k
    }
1084
    if constexpr (kGuaranteedFitToBuffer) {
1085
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1086
    }
1087
8.34k
    size_t total_probe_length = DecodeAndInsertImpl(
1088
8.34k
        common, policy, buffer_,
1089
8.34k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1090
8.34k
    if (!local_buffer_full_) {
1091
8.34k
      return total_probe_length;
1092
8.34k
    }
1093
0
    total_probe_length +=
1094
0
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1095
0
    return total_probe_length;
1096
8.34k
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1097
1098
 private:
1099
0
  static ProbedItem* AlignToNextItem(void* ptr) {
1100
0
    return reinterpret_cast<ProbedItem*>(AlignUpTo(
1101
0
        reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
1102
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::AlignToNextItem(void*)
1103
1104
0
  ProbedItem* OverflowBufferStart() const {
1105
    // We reuse GrowthInfo memory as well.
1106
0
    return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
1107
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::OverflowBufferStart() const
1108
1109
  // Encodes item when previously allocated buffer is full.
1110
  // At first that happens when local buffer is full.
1111
  // We switch from the local buffer to the control buffer.
1112
  // Every time this function is called, the available buffer is extended till
1113
  // `item.source_offset` byte in the control buffer.
1114
  // After the buffer is extended, this function wouldn't be called till the
1115
  // buffer is exhausted.
1116
  //
1117
  // If there's no space in the control buffer, we fallback to naive algorithm
1118
  // and mark probed elements as kSentinel in the control buffer. In this case,
1119
  // we will call this function for every subsequent probed element.
1120
0
  ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
1121
0
    if (!local_buffer_full_) {
1122
0
      local_buffer_full_ = true;
1123
0
      pos_ = OverflowBufferStart();
1124
0
    }
1125
0
    const size_t source_offset = static_cast<size_t>(item.source_offset);
1126
    // We are in fallback mode so we can't reuse control buffer anymore.
1127
    // Probed elements are marked as kSentinel in the control buffer.
1128
0
    if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
1129
0
                           kNoMarkedElementsSentinel)) {
1130
0
      control_[source_offset] = ctrl_t::kSentinel;
1131
0
      return;
1132
0
    }
1133
    // Refresh the end pointer to the new available position.
1134
    // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
1135
    // to write.
1136
0
    end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
1137
0
    if (ABSL_PREDICT_TRUE(pos_ < end_)) {
1138
0
      *pos_ = item;
1139
0
      ++pos_;
1140
0
      return;
1141
0
    }
1142
0
    control_[source_offset] = ctrl_t::kSentinel;
1143
0
    marked_elements_starting_position_ = source_offset;
1144
    // Now we will always fall down to `ProcessEncodeWithOverflow`.
1145
0
    ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
1146
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
1147
1148
  // Decodes information about probed elements from control buffer and processes
1149
  // marked elements.
1150
  // Finds new position for each element and transfers it to the new slots.
1151
  // Returns the total probe length.
1152
  ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
1153
      CommonFields& common, const PolicyFunctions& __restrict policy,
1154
0
      void* old_slots) const {
1155
0
    ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
1156
0
                           "must not be called when local buffer is not full");
1157
0
    size_t total_probe_length = DecodeAndInsertImpl(
1158
0
        common, policy, OverflowBufferStart(), pos_, old_slots);
1159
0
    if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
1160
0
                          kNoMarkedElementsSentinel)) {
1161
0
      return total_probe_length;
1162
0
    }
1163
0
    total_probe_length +=
1164
0
        ProcessProbedMarkedElements(common, policy, control_, old_slots,
1165
0
                                    marked_elements_starting_position_);
1166
0
    return total_probe_length;
1167
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1168
1169
  static constexpr size_t kBufferSize =
1170
      kProbedElementsBufferSize / sizeof(ProbedItem);
1171
  ProbedItem buffer_[kBufferSize];
1172
  // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
1173
  // otherwise, they're in the overflow buffer.
1174
  ProbedItem* pos_ = buffer_;
1175
  const void* end_ = buffer_ + kBufferSize;
1176
  ctrl_t* const control_;
1177
  size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
1178
  bool local_buffer_full_ = false;
1179
};
1180
1181
// Grows to next capacity with specified encoder type.
1182
// Encoder is used to store probed elements that are processed later.
1183
// Different encoder is used depending on the capacity of the table.
1184
// Returns total probe length.
1185
template <typename Encoder>
1186
size_t GrowToNextCapacity(CommonFields& common,
1187
                          const PolicyFunctions& __restrict policy,
1188
102k
                          ctrl_t* old_ctrl, void* old_slots) {
1189
102k
  using ProbedItem = typename Encoder::ProbedItem;
1190
102k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1191
102k
  Encoder encoder(old_ctrl);
1192
102k
  policy.transfer_unprobed_elements_to_next_capacity(
1193
102k
      common, old_ctrl, old_slots, &encoder,
1194
102k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1195
76.5k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1196
76.5k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1197
76.5k
      });
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Line
Count
Source
1194
47.3k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1195
47.3k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1196
47.3k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1197
47.3k
      });
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Line
Count
Source
1194
29.2k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1195
29.2k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1196
29.2k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1197
29.2k
      });
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
1198
102k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1199
102k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1200
102k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Line
Count
Source
1188
85.8k
                          ctrl_t* old_ctrl, void* old_slots) {
1189
85.8k
  using ProbedItem = typename Encoder::ProbedItem;
1190
85.8k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1191
85.8k
  Encoder encoder(old_ctrl);
1192
85.8k
  policy.transfer_unprobed_elements_to_next_capacity(
1193
85.8k
      common, old_ctrl, old_slots, &encoder,
1194
85.8k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1195
85.8k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1196
85.8k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1197
85.8k
      });
1198
85.8k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1199
85.8k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1200
85.8k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Line
Count
Source
1188
16.3k
                          ctrl_t* old_ctrl, void* old_slots) {
1189
16.3k
  using ProbedItem = typename Encoder::ProbedItem;
1190
16.3k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1191
16.3k
  Encoder encoder(old_ctrl);
1192
16.3k
  policy.transfer_unprobed_elements_to_next_capacity(
1193
16.3k
      common, old_ctrl, old_slots, &encoder,
1194
16.3k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1195
16.3k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1196
16.3k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1197
16.3k
      });
1198
16.3k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1199
16.3k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1200
16.3k
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
1201
1202
// Grows to next capacity for relatively small tables so that even if all
1203
// elements are probed, we don't need to overflow the local buffer.
1204
// Returns total probe length.
1205
size_t GrowToNextCapacityThatFitsInLocalBuffer(
1206
    CommonFields& common, const PolicyFunctions& __restrict policy,
1207
85.8k
    ctrl_t* old_ctrl, void* old_slots) {
1208
85.8k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
1209
85.8k
  return GrowToNextCapacity<
1210
85.8k
      ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
1211
85.8k
      common, policy, old_ctrl, old_slots);
1212
85.8k
}
1213
1214
// Grows to next capacity with different encodings. Returns total probe length.
1215
// These functions are useful to simplify profile analysis.
1216
size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
1217
                                       const PolicyFunctions& __restrict policy,
1218
16.3k
                                       ctrl_t* old_ctrl, void* old_slots) {
1219
16.3k
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
1220
16.3k
      common, policy, old_ctrl, old_slots);
1221
16.3k
}
1222
size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
1223
                                       const PolicyFunctions& __restrict policy,
1224
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1225
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
1226
0
      common, policy, old_ctrl, old_slots);
1227
0
}
1228
size_t GrowToNextCapacity16BytesEncoder(
1229
    CommonFields& common, const PolicyFunctions& __restrict policy,
1230
0
    ctrl_t* old_ctrl, void* old_slots) {
1231
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
1232
0
      common, policy, old_ctrl, old_slots);
1233
0
}
1234
1235
// Grows to next capacity for tables with relatively large capacity so that we
1236
// can't guarantee that all probed elements fit in the local buffer. Returns
1237
// total probe length.
1238
size_t GrowToNextCapacityOverflowLocalBuffer(
1239
    CommonFields& common, const PolicyFunctions& __restrict policy,
1240
16.3k
    ctrl_t* old_ctrl, void* old_slots) {
1241
16.3k
  const size_t new_capacity = common.capacity();
1242
16.3k
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
1243
16.3k
    return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
1244
16.3k
  }
1245
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
1246
0
    return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
1247
0
  }
1248
  // 16 bytes encoding supports the maximum swisstable capacity.
1249
0
  return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
1250
0
}
1251
1252
// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
1253
// capacity of the table. Returns total probe length.
1254
ABSL_ATTRIBUTE_NOINLINE
1255
size_t GrowToNextCapacityDispatch(CommonFields& common,
1256
                                  const PolicyFunctions& __restrict policy,
1257
102k
                                  ctrl_t* old_ctrl, void* old_slots) {
1258
102k
  const size_t new_capacity = common.capacity();
1259
102k
  if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
1260
85.8k
    return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
1261
85.8k
                                                   old_slots);
1262
85.8k
  } else {
1263
16.3k
    return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
1264
16.3k
                                                 old_slots);
1265
16.3k
  }
1266
102k
}
1267
1268
void IncrementSmallSizeNonSoo(CommonFields& common,
1269
0
                              const PolicyFunctions& __restrict policy) {
1270
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1271
0
  common.increment_size();
1272
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
1273
0
}
1274
1275
void IncrementSmallSize(CommonFields& common,
1276
0
                        const PolicyFunctions& __restrict policy) {
1277
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1278
0
  if (policy.soo_enabled) {
1279
0
    common.set_full_soo();
1280
0
  } else {
1281
0
    IncrementSmallSizeNonSoo(common, policy);
1282
0
  }
1283
0
}
1284
1285
std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(
1286
    CommonFields& common, const PolicyFunctions& __restrict policy,
1287
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1288
  // TODO(b/413062340): Refactor to reuse more code with
1289
  // GrowSooTableToNextCapacityAndPrepareInsert.
1290
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);
1291
0
  ABSL_SWISSTABLE_ASSERT(!common.empty());
1292
0
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1293
0
  constexpr size_t kOldCapacity = 1;
1294
0
  constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);
1295
0
  ctrl_t* old_ctrl = common.control();
1296
0
  void* old_slots = common.slot_array();
1297
1298
0
  common.set_capacity(kNewCapacity);
1299
0
  const size_t slot_size = policy.slot_size;
1300
0
  const size_t slot_align = policy.slot_align;
1301
0
  void* alloc = policy.get_char_alloc(common);
1302
0
  HashtablezInfoHandle infoz = common.infoz();
1303
0
  const bool has_infoz = infoz.IsSampled();
1304
1305
0
  const auto [new_ctrl, new_slots] =
1306
0
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1307
0
  common.set_control(new_ctrl);
1308
0
  common.set_slots(new_slots);
1309
0
  SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);
1310
1311
0
  if (ABSL_PREDICT_TRUE(!has_infoz)) {
1312
    // When we're sampled, we already have a seed.
1313
0
    common.generate_new_seed(/*has_infoz=*/false);
1314
0
  }
1315
0
  const size_t new_hash = get_hash(common.seed().seed());
1316
0
  h2_t new_h2 = H2(new_hash);
1317
0
  size_t orig_hash =
1318
0
      policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed());
1319
0
  size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1320
0
  InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);
1321
1322
0
  void* old_element_target = NextSlot(new_slots, slot_size);
1323
0
  SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);
1324
0
  policy.transfer_n(&common, old_element_target, old_slots, 1);
1325
1326
0
  void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);
1327
0
  SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);
1328
1329
0
  policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,
1330
0
                 has_infoz);
1331
0
  PrepareInsertCommon(common);
1332
0
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1333
0
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1334
1335
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1336
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);
1337
0
  }
1338
0
  return {new_ctrl + offset, new_element_target_slot};
1339
0
}
1340
1341
// Grows to next capacity and prepares insert for the given new_hash.
1342
// Returns the offset of the new element.
1343
size_t GrowToNextCapacityAndPrepareInsert(
1344
    CommonFields& common, const PolicyFunctions& __restrict policy,
1345
168k
    size_t new_hash) {
1346
168k
  ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
1347
168k
  const size_t old_capacity = common.capacity();
1348
168k
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1349
168k
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));
1350
1351
168k
  const size_t new_capacity = NextCapacity(old_capacity);
1352
168k
  ctrl_t* old_ctrl = common.control();
1353
168k
  void* old_slots = common.slot_array();
1354
1355
168k
  common.set_capacity(new_capacity);
1356
168k
  const size_t slot_size = policy.slot_size;
1357
168k
  const size_t slot_align = policy.slot_align;
1358
168k
  void* alloc = policy.get_char_alloc(common);
1359
168k
  HashtablezInfoHandle infoz = common.infoz();
1360
168k
  const bool has_infoz = infoz.IsSampled();
1361
1362
168k
  const auto [new_ctrl, new_slots] =
1363
168k
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
1364
168k
  common.set_control(new_ctrl);
1365
168k
  common.set_slots(new_slots);
1366
168k
  SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
1367
1368
168k
  h2_t new_h2 = H2(new_hash);
1369
168k
  size_t total_probe_length = 0;
1370
168k
  FindInfo find_info;
1371
168k
  if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
1372
66.0k
    size_t offset;
1373
66.0k
    GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
1374
66.0k
                                           new_capacity);
1375
    // We put the new element either at the beginning or at the end of the
1376
    // table with approximately equal probability.
1377
66.0k
    offset =
1378
66.0k
        SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;
1379
1380
66.0k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
1381
66.0k
    SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
1382
66.0k
    find_info = FindInfo{offset, 0};
1383
    // Single group tables have all slots full on resize. So we can transfer
1384
    // all slots without checking the control bytes.
1385
66.0k
    ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
1386
66.0k
    void* target = NextSlot(new_slots, slot_size);
1387
66.0k
    SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
1388
66.0k
    policy.transfer_n(&common, target, old_slots, old_capacity);
1389
102k
  } else {
1390
102k
    total_probe_length =
1391
102k
        GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
1392
102k
    find_info = find_first_non_full(common, new_hash);
1393
102k
    SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
1394
102k
  }
1395
168k
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1396
168k
  (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
1397
168k
                    has_infoz);
1398
168k
  PrepareInsertCommon(common);
1399
168k
  ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
1400
168k
                  common.size());
1401
1402
168k
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1403
0
    ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,
1404
0
                        find_info.probe_length);
1405
0
  }
1406
168k
  return find_info.offset;
1407
168k
}
1408
1409
}  // namespace
1410
1411
std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(
1412
    CommonFields& common, const PolicyFunctions& __restrict policy,
1413
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1414
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1415
0
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1416
0
  if (common.capacity() == 1) {
1417
0
    if (common.empty()) {
1418
0
      IncrementSmallSizeNonSoo(common, policy);
1419
0
      return {SooControl(), common.slot_array()};
1420
0
    } else {
1421
0
      return Grow1To3AndPrepareInsert(common, policy, get_hash);
1422
0
    }
1423
0
  }
1424
1425
  // Growing from 0 to 1 capacity.
1426
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);
1427
0
  constexpr size_t kNewCapacity = 1;
1428
1429
0
  common.set_capacity(kNewCapacity);
1430
0
  HashtablezInfoHandle infoz;
1431
0
  const bool should_sample =
1432
0
      policy.is_hashtablez_eligible && ShouldSampleNextTable();
1433
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
1434
0
    infoz = ForcedTrySample(policy.slot_size, policy.key_size,
1435
0
                            policy.value_size, policy.soo_capacity());
1436
0
  }
1437
0
  const bool has_infoz = infoz.IsSampled();
1438
0
  void* alloc = policy.get_char_alloc(common);
1439
1440
0
  const auto [new_ctrl, new_slots] =
1441
0
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1442
0
  common.set_control(new_ctrl);
1443
0
  common.set_slots(new_slots);
1444
1445
0
  static_assert(NextCapacity(0) == 1);
1446
0
  PrepareInsertCommon(common);
1447
1448
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1449
0
    common.generate_new_seed(/*has_infoz=*/true);
1450
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz,
1451
0
                                        get_hash(common.seed().seed()));
1452
0
  }
1453
0
  return {SooControl(), new_slots};
1454
0
}
1455
1456
namespace {
1457
1458
// Called whenever the table needs to vacate empty slots either by removing
1459
// tombstones via rehash or growth to next capacity.
1460
ABSL_ATTRIBUTE_NOINLINE
1461
size_t RehashOrGrowToNextCapacityAndPrepareInsert(
1462
    CommonFields& common, const PolicyFunctions& __restrict policy,
1463
0
    size_t new_hash) {
1464
0
  const size_t cap = common.capacity();
1465
0
  ABSL_ASSUME(cap > 0);
1466
0
  if (cap > Group::kWidth &&
1467
      // Do these calculations in 64-bit to avoid overflow.
1468
0
      common.size() * uint64_t{32} <= cap * uint64_t{25}) {
1469
    // Squash DELETED without growing if there is enough capacity.
1470
    //
1471
    // Rehash in place if the current size is <= 25/32 of capacity.
1472
    // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
1473
    // faster than resize, and 2) it takes quite a bit of work to add
1474
    // tombstones.  In the worst case, seems to take approximately 4
1475
    // insert/erase pairs to create a single tombstone and so if we are
1476
    // rehashing because of tombstones, we can afford to rehash-in-place as
1477
    // long as we are reclaiming at least 1/8 the capacity without doing more
1478
    // than 2X the work.  (Where "work" is defined to be size() for rehashing
1479
    // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
1480
    // place is faster per operation than inserting or even doubling the size
1481
    // of the table, so we actually afford to reclaim even less space from a
1482
    // resize-in-place.  The decision is to rehash in place if we can reclaim
1483
    // at about 1/8th of the usable capacity (specifically 3/28 of the
1484
    // capacity) which means that the total cost of rehashing will be a small
1485
    // fraction of the total work.
1486
    //
1487
    // Here is output of an experiment using the BM_CacheInSteadyState
1488
    // benchmark running the old case (where we rehash-in-place only if we can
1489
    // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
1490
    // if we can recover 3/32*capacity).
1491
    //
1492
    // Note that although in the worst-case number of rehashes jumped up from
1493
    // 15 to 190, but the number of operations per second is almost the same.
1494
    //
1495
    // Abridged output of running BM_CacheInSteadyState benchmark from
1496
    // raw_hash_set_benchmark.   N is the number of insert/erase operations.
1497
    //
1498
    //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
1499
    // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
1500
    //  448 | 145284       0.44        18 | 140118       0.44        19
1501
    //  493 | 152546       0.24        11 | 151417       0.48        28
1502
    //  538 | 151439       0.26        11 | 151152       0.53        38
1503
    //  583 | 151765       0.28        11 | 150572       0.57        50
1504
    //  628 | 150241       0.31        11 | 150853       0.61        66
1505
    //  672 | 149602       0.33        12 | 150110       0.66        90
1506
    //  717 | 149998       0.35        12 | 149531       0.70       129
1507
    //  762 | 149836       0.37        13 | 148559       0.74       190
1508
    //  807 | 149736       0.39        14 | 151107       0.39        14
1509
    //  852 | 150204       0.42        15 | 151019       0.42        15
1510
0
    return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);
1511
0
  } else {
1512
    // Otherwise grow the container.
1513
0
    return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);
1514
0
  }
1515
0
}
1516
1517
// Slow path for PrepareInsertLarge that is called when the table has deleted
1518
// slots or need to be resized or rehashed.
1519
size_t PrepareInsertLargeSlow(CommonFields& common,
1520
                              const PolicyFunctions& __restrict policy,
1521
168k
                              size_t hash) {
1522
168k
  const GrowthInfo growth_info = common.growth_info();
1523
168k
  ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
1524
168k
  if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
1525
    // Table without deleted slots (>95% cases) that needs to be resized.
1526
168k
    ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
1527
168k
                           growth_info.GetGrowthLeft() == 0);
1528
168k
    return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
1529
168k
  }
1530
0
  if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
1531
    // Table with deleted slots that needs to be rehashed or resized.
1532
0
    return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);
1533
0
  }
1534
  // Table with deleted slots that has space for the inserting element.
1535
0
  FindInfo target = find_first_non_full(common, hash);
1536
0
  PrepareInsertCommon(common);
1537
0
  common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
1538
0
  SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);
1539
0
  common.infoz().RecordInsert(hash, target.probe_length);
1540
0
  return target.offset;
1541
0
}
1542
1543
// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
1544
// forces the table to be sampled and prepares the insert.
1545
// SOO tables need to switch from SOO to heap in order to store the infoz.
1546
// Requires:
1547
//   1. `c.capacity() == SooCapacity()`.
1548
//   2. `c.empty()`.
1549
ABSL_ATTRIBUTE_NOINLINE size_t
1550
GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1551
    CommonFields& common, const PolicyFunctions& __restrict policy,
1552
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1553
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
1554
0
                                   /*force_infoz=*/true);
1555
0
  PrepareInsertCommon(common);
1556
0
  common.growth_info().OverwriteEmptyAsFull();
1557
0
  const size_t new_hash = get_hash(common.seed().seed());
1558
0
  SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
1559
0
                            policy.slot_size);
1560
0
  common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0);
1561
0
  return SooSlotIndex();
1562
0
}
1563
1564
// Resizes empty non-allocated table to the capacity to fit new_size elements.
1565
// Requires:
1566
//   1. `c.capacity() == policy.soo_capacity()`.
1567
//   2. `c.empty()`.
1568
//   3. `new_size > policy.soo_capacity()`.
1569
// The table will be attempted to be sampled.
1570
void ReserveEmptyNonAllocatedTableToFitNewSize(
1571
    CommonFields& common, const PolicyFunctions& __restrict policy,
1572
0
    size_t new_size) {
1573
0
  ValidateMaxSize(new_size, policy.slot_size);
1574
0
  ABSL_ASSUME(new_size > 0);
1575
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
1576
0
                                   /*force_infoz=*/false);
1577
  // This is after resize, to ensure that we have completed the allocation
1578
  // and have potentially sampled the hashtable.
1579
0
  common.infoz().RecordReservation(new_size);
1580
0
}
1581
1582
// Type erased version of raw_hash_set::reserve for tables that have an
1583
// allocated backing array.
1584
//
1585
// Requires:
1586
//   1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.
1587
// Reserving already allocated tables is considered to be a rare case.
1588
ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
1589
    CommonFields& common, const PolicyFunctions& __restrict policy,
1590
0
    size_t new_size) {
1591
0
  const size_t cap = common.capacity();
1592
0
  ValidateMaxSize(new_size, policy.slot_size);
1593
0
  ABSL_ASSUME(new_size > 0);
1594
0
  const size_t new_capacity = SizeToCapacity(new_size);
1595
0
  if (cap == policy.soo_capacity()) {
1596
0
    ABSL_SWISSTABLE_ASSERT(!common.empty());
1597
0
    ResizeFullSooTable(common, policy, new_capacity,
1598
0
                       ResizeFullSooTableSamplingMode::kNoSampling);
1599
0
  } else {
1600
0
    ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());
1601
    // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
1602
0
    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1603
0
  }
1604
0
  common.infoz().RecordReservation(new_size);
1605
0
}
1606
1607
// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
1608
// table to be sampled. SOO tables need to switch from SOO to heap in order to
1609
// store the infoz. No-op if sampling is disabled or not possible.
1610
void GrowFullSooTableToNextCapacityForceSampling(
1611
0
    CommonFields& common, const PolicyFunctions& __restrict policy) {
1612
0
  AssertFullSoo(common, policy);
1613
0
  ResizeFullSooTable(
1614
0
      common, policy, NextCapacity(SooCapacity()),
1615
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
1616
0
}
1617
1618
}  // namespace
1619
1620
278k
void* GetRefForEmptyClass(CommonFields& common) {
1621
  // Empty base optimization typically make the empty base class address to be
1622
  // the same as the first address of the derived class object.
1623
  // But we generally assume that for empty classes we can return any valid
1624
  // pointer.
1625
278k
  return &common;
1626
278k
}
1627
1628
void ResizeAllocatedTableWithSeedChange(
1629
    CommonFields& common, const PolicyFunctions& __restrict policy,
1630
0
    size_t new_capacity) {
1631
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
1632
0
      common, policy, new_capacity, common.infoz());
1633
0
}
1634
1635
void ReserveEmptyNonAllocatedTableToFitBucketCount(
1636
    CommonFields& common, const PolicyFunctions& __restrict policy,
1637
0
    size_t bucket_count) {
1638
0
  size_t new_capacity = NormalizeCapacity(bucket_count);
1639
0
  ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size);
1640
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1641
0
                                   /*force_infoz=*/false);
1642
0
}
1643
1644
// Resizes a full SOO table to the NextCapacity(SooCapacity()).
1645
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
1646
size_t GrowSooTableToNextCapacityAndPrepareInsert(
1647
    CommonFields& common, const PolicyFunctions& __restrict policy,
1648
55.0k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1649
55.0k
  AssertSoo(common, policy);
1650
55.0k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1651
    // The table is empty, it is only used for forced sampling of SOO tables.
1652
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1653
0
        common, policy, get_hash);
1654
0
  }
1655
55.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1656
55.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1657
55.0k
  const size_t slot_size = policy.slot_size;
1658
55.0k
  void* alloc = policy.get_char_alloc(common);
1659
55.0k
  common.set_capacity(kNewCapacity);
1660
1661
  // Since the table is not empty, it will not be sampled.
1662
  // The decision to sample was already made during the first insertion.
1663
  //
1664
  // We do not set control and slots in CommonFields yet to avoid overriding
1665
  // SOO data.
1666
55.0k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1667
55.0k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1668
1669
55.0k
  PrepareInsertCommon(common);
1670
55.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1671
55.0k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1672
55.0k
  common.generate_new_seed(/*has_infoz=*/false);
1673
55.0k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1674
55.0k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1675
55.0k
  const size_t new_hash = get_hash(common.seed().seed());
1676
1677
55.0k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1678
55.0k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1679
55.0k
                                      new_ctrl);
1680
1681
55.0k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1682
55.0k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1683
55.0k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1684
55.0k
  if constexpr (TransferUsesMemcpy) {
1685
    // Target slot is placed at index 1, but capacity is at
1686
    // minimum 3. So we are allowed to copy at least twice as much
1687
    // memory.
1688
55.0k
    static_assert(SooSlotIndex() == 1);
1689
55.0k
    static_assert(SooSlotMemcpySize > 0);
1690
55.0k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1691
55.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1692
55.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1693
55.0k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1694
55.0k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1695
55.0k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1696
55.0k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1697
55.0k
  } else {
1698
0
    static_assert(SooSlotMemcpySize == 0);
1699
0
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1700
0
  }
1701
0
  common.set_control(new_ctrl);
1702
55.0k
  common.set_slots(new_slots);
1703
1704
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1705
  // have been resized to the next capacity.
1706
55.0k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1707
55.0k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1708
55.0k
                                slot_size);
1709
55.0k
  return offset;
1710
55.0k
}
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<0ul, false>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<1ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<4ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<8ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Line
Count
Source
1648
25.0k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1649
25.0k
  AssertSoo(common, policy);
1650
25.0k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1651
    // The table is empty, it is only used for forced sampling of SOO tables.
1652
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1653
0
        common, policy, get_hash);
1654
0
  }
1655
25.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1656
25.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1657
25.0k
  const size_t slot_size = policy.slot_size;
1658
25.0k
  void* alloc = policy.get_char_alloc(common);
1659
25.0k
  common.set_capacity(kNewCapacity);
1660
1661
  // Since the table is not empty, it will not be sampled.
1662
  // The decision to sample was already made during the first insertion.
1663
  //
1664
  // We do not set control and slots in CommonFields yet to avoid overriding
1665
  // SOO data.
1666
25.0k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1667
25.0k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1668
1669
25.0k
  PrepareInsertCommon(common);
1670
25.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1671
25.0k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1672
25.0k
  common.generate_new_seed(/*has_infoz=*/false);
1673
25.0k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1674
25.0k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1675
25.0k
  const size_t new_hash = get_hash(common.seed().seed());
1676
1677
25.0k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1678
25.0k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1679
25.0k
                                      new_ctrl);
1680
1681
25.0k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1682
25.0k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1683
25.0k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1684
25.0k
  if constexpr (TransferUsesMemcpy) {
1685
    // Target slot is placed at index 1, but capacity is at
1686
    // minimum 3. So we are allowed to copy at least twice as much
1687
    // memory.
1688
25.0k
    static_assert(SooSlotIndex() == 1);
1689
25.0k
    static_assert(SooSlotMemcpySize > 0);
1690
25.0k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1691
25.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1692
25.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1693
25.0k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1694
25.0k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1695
25.0k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1696
25.0k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1697
  } else {
1698
    static_assert(SooSlotMemcpySize == 0);
1699
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1700
  }
1701
0
  common.set_control(new_ctrl);
1702
25.0k
  common.set_slots(new_slots);
1703
1704
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1705
  // have been resized to the next capacity.
1706
25.0k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1707
25.0k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1708
25.0k
                                slot_size);
1709
25.0k
  return offset;
1710
25.0k
}
unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<16ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Line
Count
Source
1648
30.0k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1649
30.0k
  AssertSoo(common, policy);
1650
30.0k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1651
    // The table is empty, it is only used for forced sampling of SOO tables.
1652
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1653
0
        common, policy, get_hash);
1654
0
  }
1655
30.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1656
30.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1657
30.0k
  const size_t slot_size = policy.slot_size;
1658
30.0k
  void* alloc = policy.get_char_alloc(common);
1659
30.0k
  common.set_capacity(kNewCapacity);
1660
1661
  // Since the table is not empty, it will not be sampled.
1662
  // The decision to sample was already made during the first insertion.
1663
  //
1664
  // We do not set control and slots in CommonFields yet to avoid overriding
1665
  // SOO data.
1666
30.0k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1667
30.0k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1668
1669
30.0k
  PrepareInsertCommon(common);
1670
30.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1671
30.0k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1672
30.0k
  common.generate_new_seed(/*has_infoz=*/false);
1673
30.0k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1674
30.0k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1675
30.0k
  const size_t new_hash = get_hash(common.seed().seed());
1676
1677
30.0k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1678
30.0k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1679
30.0k
                                      new_ctrl);
1680
1681
30.0k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1682
30.0k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1683
30.0k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1684
30.0k
  if constexpr (TransferUsesMemcpy) {
1685
    // Target slot is placed at index 1, but capacity is at
1686
    // minimum 3. So we are allowed to copy at least twice as much
1687
    // memory.
1688
30.0k
    static_assert(SooSlotIndex() == 1);
1689
30.0k
    static_assert(SooSlotMemcpySize > 0);
1690
30.0k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1691
30.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1692
30.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1693
30.0k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1694
30.0k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1695
30.0k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1696
30.0k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1697
  } else {
1698
    static_assert(SooSlotMemcpySize == 0);
1699
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1700
  }
1701
0
  common.set_control(new_ctrl);
1702
30.0k
  common.set_slots(new_slots);
1703
1704
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1705
  // have been resized to the next capacity.
1706
30.0k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1707
30.0k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1708
30.0k
                                slot_size);
1709
30.0k
  return offset;
1710
30.0k
}
1711
1712
void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
1713
0
            size_t n) {
1714
0
  const size_t cap = common.capacity();
1715
1716
0
  auto clear_backing_array = [&]() {
1717
0
    ClearBackingArray(common, policy, policy.get_char_alloc(common),
1718
0
                      /*reuse=*/false, policy.soo_enabled);
1719
0
  };
1720
1721
0
  const size_t slot_size = policy.slot_size;
1722
1723
0
  if (n == 0) {
1724
0
    if (cap <= policy.soo_capacity()) return;
1725
0
    if (common.empty()) {
1726
0
      clear_backing_array();
1727
0
      return;
1728
0
    }
1729
0
    if (common.size() <= policy.soo_capacity()) {
1730
      // When the table is already sampled, we keep it sampled.
1731
0
      if (common.infoz().IsSampled()) {
1732
0
        static constexpr size_t kInitialSampledCapacity =
1733
0
            NextCapacity(SooCapacity());
1734
0
        if (cap > kInitialSampledCapacity) {
1735
0
          ResizeAllocatedTableWithSeedChange(common, policy,
1736
0
                                             kInitialSampledCapacity);
1737
0
        }
1738
        // This asserts that we didn't lose sampling coverage in `resize`.
1739
0
        ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
1740
0
        return;
1741
0
      }
1742
0
      ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
1743
0
      ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
1744
0
      HeapOrSoo tmp_slot;
1745
0
      size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
1746
0
      policy.transfer_n(
1747
0
          &common, &tmp_slot,
1748
0
          SlotAddress(common.slot_array(), begin_offset, slot_size), 1);
1749
0
      clear_backing_array();
1750
0
      policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);
1751
0
      common.set_full_soo();
1752
0
      return;
1753
0
    }
1754
0
  }
1755
1756
0
  ValidateMaxSize(n, policy.slot_size);
1757
  // bitor is a faster way of doing `max` here. We will round up to the next
1758
  // power-of-2-minus-1, so bitor is good enough.
1759
0
  const size_t new_capacity =
1760
0
      NormalizeCapacity(n | SizeToCapacity(common.size()));
1761
  // n == 0 unconditionally rehashes as per the standard.
1762
0
  if (n == 0 || new_capacity > cap) {
1763
0
    if (cap == policy.soo_capacity()) {
1764
0
      if (common.empty()) {
1765
0
        ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1766
0
                                         /*force_infoz=*/false);
1767
0
      } else {
1768
0
        ResizeFullSooTable(common, policy, new_capacity,
1769
0
                           ResizeFullSooTableSamplingMode::kNoSampling);
1770
0
      }
1771
0
    } else {
1772
0
      ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1773
0
    }
1774
    // This is after resize, to ensure that we have completed the allocation
1775
    // and have potentially sampled the hashtable.
1776
0
    common.infoz().RecordReservation(n);
1777
0
  }
1778
0
}
1779
1780
void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,
1781
          const CommonFields& other,
1782
0
          absl::FunctionRef<void(void*, const void*)> copy_fn) {
1783
0
  const size_t size = other.size();
1784
0
  ABSL_SWISSTABLE_ASSERT(size > 0);
1785
0
  const size_t soo_capacity = policy.soo_capacity();
1786
0
  const size_t slot_size = policy.slot_size;
1787
0
  const bool soo_enabled = policy.soo_enabled;
1788
0
  if (size == 1) {
1789
0
    if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1);
1790
0
    IncrementSmallSize(common, policy);
1791
0
    const size_t other_capacity = other.capacity();
1792
0
    const void* other_slot =
1793
0
        other_capacity <= soo_capacity ? other.soo_data()
1794
0
        : other.is_small()
1795
0
            ? other.slot_array()
1796
0
            : SlotAddress(other.slot_array(),
1797
0
                          FindFirstFullSlot(0, other_capacity, other.control()),
1798
0
                          slot_size);
1799
0
    copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot);
1800
1801
0
    if (soo_enabled && policy.is_hashtablez_eligible &&
1802
0
        ShouldSampleNextTable()) {
1803
0
      GrowFullSooTableToNextCapacityForceSampling(common, policy);
1804
0
    }
1805
0
    return;
1806
0
  }
1807
1808
0
  ReserveTableToFitNewSize(common, policy, size);
1809
0
  auto infoz = common.infoz();
1810
0
  ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);
1811
0
  const size_t cap = common.capacity();
1812
0
  ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);
1813
0
  size_t offset = cap;
1814
0
  const void* hash_fn = policy.hash_fn(common);
1815
0
  auto hasher = policy.hash_slot;
1816
0
  const size_t seed = common.seed().seed();
1817
0
  IterateOverFullSlotsImpl(
1818
0
      other, slot_size, [&](const ctrl_t*, void* that_slot) {
1819
        // The table is guaranteed to be empty, so we can do faster than
1820
        // a full `insert`.
1821
0
        const size_t hash = (*hasher)(hash_fn, that_slot, seed);
1822
0
        FindInfo target = find_first_non_full(common, hash);
1823
0
        infoz.RecordInsert(hash, target.probe_length);
1824
0
        offset = target.offset;
1825
0
        SetCtrl(common, offset, H2(hash), slot_size);
1826
0
        copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);
1827
0
        common.maybe_increment_generation_on_insert();
1828
0
      });
1829
0
  common.increment_size(size);
1830
0
  common.growth_info().OverwriteManyEmptyAsFull(size);
1831
0
}
1832
1833
void ReserveTableToFitNewSize(CommonFields& common,
1834
                              const PolicyFunctions& __restrict policy,
1835
0
                              size_t new_size) {
1836
0
  common.reset_reserved_growth(new_size);
1837
0
  common.set_reservation_size(new_size);
1838
0
  ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());
1839
0
  const size_t cap = common.capacity();
1840
0
  if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) {
1841
0
    return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
1842
0
  }
1843
1844
0
  ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
1845
0
  ABSL_SWISSTABLE_ASSERT(cap > 0);
1846
0
  const size_t max_size_before_growth =
1847
0
      IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();
1848
0
  if (new_size <= max_size_before_growth) {
1849
0
    return;
1850
0
  }
1851
0
  ReserveAllocatedTable(common, policy, new_size);
1852
0
}
1853
1854
namespace {
1855
size_t PrepareInsertLargeImpl(CommonFields& common,
1856
                              const PolicyFunctions& __restrict policy,
1857
8.76M
                              size_t hash, FindInfo target) {
1858
8.76M
  ABSL_SWISSTABLE_ASSERT(!common.is_small());
1859
8.76M
  const GrowthInfo growth_info = common.growth_info();
1860
  // When there are no deleted slots in the table
1861
  // and growth_left is positive, we can insert at the first
1862
  // empty slot in the probe sequence (target).
1863
8.76M
  if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) {
1864
168k
    return PrepareInsertLargeSlow(common, policy, hash);
1865
168k
  }
1866
8.60M
  PrepareInsertCommon(common);
1867
8.60M
  common.growth_info().OverwriteEmptyAsFull();
1868
8.60M
  SetCtrl(common, target.offset, H2(hash), policy.slot_size);
1869
8.60M
  common.infoz().RecordInsert(hash, target.probe_length);
1870
8.60M
  return target.offset;
1871
8.76M
}
1872
}  // namespace
1873
1874
size_t PrepareInsertLarge(CommonFields& common,
1875
                          const PolicyFunctions& __restrict policy, size_t hash,
1876
8.76M
                          FindInfo target) {
1877
  // NOLINTNEXTLINE(misc-static-assert)
1878
8.76M
  ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled());
1879
8.76M
  return PrepareInsertLargeImpl(common, policy, hash, target);
1880
8.76M
}
1881
1882
size_t PrepareInsertLargeGenerationsEnabled(
1883
    CommonFields& common, const PolicyFunctions& policy, size_t hash,
1884
0
    FindInfo target, absl::FunctionRef<size_t(size_t)> recompute_hash) {
1885
  // NOLINTNEXTLINE(misc-static-assert)
1886
0
  ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled());
1887
0
  if (common.should_rehash_for_bug_detection_on_insert()) {
1888
    // Move to a different heap allocation in order to detect bugs.
1889
0
    const size_t cap = common.capacity();
1890
0
    ResizeAllocatedTableWithSeedChange(
1891
0
        common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));
1892
0
    hash = recompute_hash(common.seed().seed());
1893
0
    target = find_first_non_full(common, hash);
1894
0
  }
1895
0
  return PrepareInsertLargeImpl(common, policy, hash, target);
1896
0
}
1897
1898
namespace {
1899
// Returns true if the following is true
1900
// 1. OptimalMemcpySizeForSooSlotTransfer(left) >
1901
//    OptimalMemcpySizeForSooSlotTransfer(left - 1)
1902
// 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,
1903
// right].
1904
// This function is used to verify that we have all the possible template
1905
// instantiations for GrowFullSooTableToNextCapacity.
1906
// With this verification the problem may be detected at compile time instead of
1907
// link time.
1908
constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,
1909
0
                                                              size_t right) {
1910
0
  size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);
1911
0
  if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) {
1912
0
    return false;
1913
0
  }
1914
0
  for (size_t i = left + 1; i <= right; ++i) {
1915
0
    if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) {
1916
0
      return false;
1917
0
    }
1918
0
  }
1919
0
  return true;
1920
0
}
1921
}  // namespace
1922
1923
// Extern template instantiation for inline function.
1924
template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
1925
                                              size_t old_capacity,
1926
                                              ctrl_t* new_ctrl,
1927
                                              size_t new_capacity);
1928
1929
// We need to instantiate ALL possible template combinations because we define
1930
// the function in the cc file.
1931
template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
1932
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
1933
    bool);
1934
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
1935
    OptimalMemcpySizeForSooSlotTransfer(1), true>(
1936
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
1937
    bool);
1938
1939
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
1940
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
1941
    OptimalMemcpySizeForSooSlotTransfer(3), true>(
1942
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
1943
    bool);
1944
1945
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
1946
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
1947
    OptimalMemcpySizeForSooSlotTransfer(8), true>(
1948
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
1949
    bool);
1950
1951
#if UINTPTR_MAX == UINT32_MAX
1952
static_assert(MaxSooSlotSize() == 8);
1953
#else
1954
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
1955
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
1956
    OptimalMemcpySizeForSooSlotTransfer(16), true>(
1957
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
1958
    bool);
1959
static_assert(MaxSooSlotSize() == 16);
1960
#endif
1961
1962
}  // namespace container_internal
1963
ABSL_NAMESPACE_END
1964
}  // namespace absl