Coverage Report

Created: 2026-02-26 07:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/container/internal/raw_hash_set.cc
Line
Count
Source
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/container/internal/raw_hash_set.h"
16
17
#include <atomic>
18
#include <cassert>
19
#include <cstddef>
20
#include <cstdint>
21
#include <cstring>
22
#include <memory>
23
#include <tuple>
24
#include <utility>
25
26
#include "absl/base/attributes.h"
27
#include "absl/base/config.h"
28
#include "absl/base/dynamic_annotations.h"
29
#include "absl/base/internal/endian.h"
30
#include "absl/base/internal/raw_logging.h"
31
#include "absl/base/optimization.h"
32
#include "absl/container/internal/container_memory.h"
33
#include "absl/container/internal/hashtable_control_bytes.h"
34
#include "absl/container/internal/hashtablez_sampler.h"
35
#include "absl/container/internal/raw_hash_set_resize_impl.h"
36
#include "absl/functional/function_ref.h"
37
#include "absl/hash/hash.h"
38
39
namespace absl {
40
ABSL_NAMESPACE_BEGIN
41
namespace container_internal {
42
43
// Represents a control byte corresponding to a full slot with arbitrary hash.
44
0
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
45
46
// A single control byte for default-constructed iterators. We leave it
47
// uninitialized because reading this memory is a bug.
48
ABSL_DLL ctrl_t kDefaultIterControl;
49
50
// We need one full byte followed by a sentinel byte for iterator::operator++.
51
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(),
52
                                                        ctrl_t::kSentinel};
53
54
namespace {
55
56
#ifdef ABSL_SWISSTABLE_ASSERT
57
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
58
#else
59
// We use this macro for assertions that users may see when the table is in an
60
// invalid state that sanitizers may help diagnose.
61
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
62
35.5M
  assert((CONDITION) && "Try enabling sanitizers.")
63
#endif
64
65
void ValidateMaxSize([[maybe_unused]] size_t size,
66
                     [[maybe_unused]] size_t key_size,
67
0
                     [[maybe_unused]] size_t slot_size) {
68
0
  ABSL_SWISSTABLE_ASSERT(size <= MaxValidSize(key_size, slot_size));
69
0
}
70
0
void ValidateMaxCapacity(size_t capacity, size_t key_size, size_t slot_size) {
71
0
  if (capacity <= 1) return;
72
0
  ValidateMaxSize(CapacityToGrowth(PreviousCapacity(capacity)), key_size,
73
0
                  slot_size);
74
0
}
75
76
// Returns "random" seed.
77
0
inline size_t RandomSeed() {
78
0
#ifdef ABSL_HAVE_THREAD_LOCAL
79
0
  static thread_local size_t counter = 0;
80
0
  size_t value = ++counter;
81
#else   // ABSL_HAVE_THREAD_LOCAL
82
  static std::atomic<size_t> counter(0);
83
  size_t value = counter.fetch_add(1, std::memory_order_relaxed);
84
#endif  // ABSL_HAVE_THREAD_LOCAL
85
0
  return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
86
0
}
87
88
0
bool ShouldRehashForBugDetection(size_t capacity) {
89
  // Note: we can't use the abseil-random library because abseil-random
90
  // depends on swisstable. We want to return true with probability
91
  // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
92
  // we probe based on a random hash and see if the offset is less than
93
  // RehashProbabilityConstant().
94
0
  return probe(capacity, absl::HashOf(RandomSeed())).offset() <
95
0
         RehashProbabilityConstant();
96
0
}
97
98
// Find a non-deterministic hash for single group table.
99
// Last two bits are used to find a position for a newly inserted element after
100
// resize.
101
// This function basically using H2 last bits to save on shift operation.
102
108k
size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) {
103
108k
  return hash ^ seed.seed();
104
108k
}
105
106
// Returns the offset of the new element after resize from capacity 1 to 3.
107
49.0k
size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) {
108
  // After resize from capacity 1 to 3, we always have exactly the slot with
109
  // index 1 occupied, so we need to insert either at index 0 or index 2.
110
49.0k
  static_assert(SooSlotIndex() == 1);
111
49.0k
  return SingleGroupTableH1(hash, seed) & 2;
112
49.0k
}
113
114
// Returns the address of the ith slot in slots where each slot occupies
115
// slot_size.
116
288k
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
117
288k
  return static_cast<void*>(static_cast<char*>(slot_array) +
118
288k
                            (slot * slot_size));
119
288k
}
120
121
// Returns the address of the slot `i` iterations after `slot` assuming each
122
// slot has the specified size.
123
59.6k
inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) {
124
59.6k
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) +
125
59.6k
                                 slot_size * i);
126
59.6k
}
127
128
// Returns the address of the slot just before `slot` assuming each slot has the
129
// specified size.
130
0
inline void* PrevSlot(void* slot, size_t slot_size) {
131
0
  return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
132
0
}
133
134
}  // namespace
135
136
// Must be defined out-of-line to avoid MSVC error C2482 on some platforms,
137
// which is caused by non-constexpr initialization.
138
49.0k
uint16_t HashtableSize::NextSeed() {
139
49.0k
  static_assert(PerTableSeed::kBitCount == 16);
140
49.0k
  thread_local uint16_t seed =
141
49.0k
      static_cast<uint16_t>(reinterpret_cast<uintptr_t>(&seed));
142
49.0k
  seed += uint16_t{0xad53};
143
49.0k
  return seed;
144
49.0k
}
145
146
0
GenerationType* EmptyGeneration() {
147
0
  if (SwisstableGenerationsEnabled()) {
148
0
    constexpr size_t kNumEmptyGenerations = 1024;
149
0
    static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
150
0
    return const_cast<GenerationType*>(
151
0
        &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
152
0
  }
153
0
  return nullptr;
154
0
}
155
156
bool CommonFieldsGenerationInfoEnabled::
157
0
    should_rehash_for_bug_detection_on_insert(size_t capacity) const {
158
0
  if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
159
0
  if (reserved_growth_ > 0) return false;
160
0
  return ShouldRehashForBugDetection(capacity);
161
0
}
162
163
bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
164
0
    size_t capacity) const {
165
0
  return ShouldRehashForBugDetection(capacity);
166
0
}
167
168
namespace {
169
170
// Probes an array of control bits using a probe sequence,
171
// and returns the mask corresponding to the first group with a deleted or empty
172
// slot.
173
inline Group::NonIterableBitMaskType probe_till_first_non_full_group(
174
    const ctrl_t* ctrl, probe_seq<Group::kWidth>& seq,
175
80.4k
    [[maybe_unused]] size_t capacity) {
176
83.3k
  while (true) {
177
83.3k
    GroupFullEmptyOrDeleted g{ctrl + seq.offset()};
178
83.3k
    auto mask = g.MaskEmptyOrDeleted();
179
83.3k
    if (mask) {
180
80.4k
      return mask;
181
80.4k
    }
182
2.88k
    seq.next();
183
2.88k
    ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity && "full table!");
184
2.88k
  }
185
80.4k
}
186
187
FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1,
188
163k
                                     size_t capacity) {
189
163k
  auto seq = probe_h1(capacity, h1);
190
163k
  if (IsEmptyOrDeleted(ctrl[seq.offset()])) {
191
82.7k
    return {seq.offset(), /*probe_length=*/0};
192
82.7k
  }
193
80.4k
  auto mask = probe_till_first_non_full_group(ctrl, seq, capacity);
194
80.4k
  return {seq.offset(mask.LowestBitSet()), seq.index()};
195
163k
}
196
197
// Probes an array of control bits using a probe sequence derived from `hash`,
198
// and returns the offset corresponding to the first deleted or empty slot.
199
//
200
// Behavior when the entire table is full is undefined.
201
//
202
// NOTE: this function must work with tables having both empty and deleted
203
// slots in the same group. Such tables appear during `erase()`.
204
92.4k
FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
205
92.4k
  return find_first_non_full_from_h1(common.control(), H1(hash),
206
92.4k
                                     common.capacity());
207
92.4k
}
208
209
// Same as `find_first_non_full`, but returns the mask corresponding to the
210
// first group with a deleted or empty slot.
211
std::pair<FindInfo, Group::NonIterableBitMaskType> find_first_non_full_group(
212
0
    const CommonFields& common, size_t hash) {
213
0
  auto seq = probe(common, hash);
214
0
  auto mask =
215
0
      probe_till_first_non_full_group(common.control(), seq, common.capacity());
216
0
  return {{seq.offset(), seq.index()}, mask};
217
0
}
218
219
// Whether a table fits in half a group. A half-group table fits entirely into a
220
// probing group, i.e., has a capacity < `Group::kWidth`.
221
//
222
// In half-group mode we are able to use the whole capacity. The extra control
223
// bytes give us at least one "empty" control byte to stop the iteration.
224
// This is important to make 1 a valid capacity.
225
//
226
// In half-group mode only the first `capacity` control bytes after the sentinel
227
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
228
// represent a real slot.
229
0
constexpr bool is_half_group(size_t capacity) {
230
0
  return capacity < Group::kWidth - 1;
231
0
}
232
233
template <class Fn>
234
0
void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) {
235
0
  const size_t cap = c.capacity();
236
0
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap));
237
0
  const ctrl_t* ctrl = c.control();
238
0
  void* slot = c.slot_array();
239
0
  if (is_half_group(cap)) {
240
    // Mirrored/cloned control bytes in half-group table are also located in the
241
    // first group (starting from position 0). We are taking group from position
242
    // `capacity` in order to avoid duplicates.
243
244
    // Half-group tables capacity fits into portable group, where
245
    // GroupPortableImpl::MaskFull is more efficient for the
246
    // capacity <= GroupPortableImpl::kWidth.
247
0
    ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth &&
248
0
                           "unexpectedly large half-group capacity");
249
0
    static_assert(Group::kWidth >= GroupPortableImpl::kWidth,
250
0
                  "unexpected group width");
251
    // Group starts from kSentinel slot, so indices in the mask will
252
    // be increased by 1.
253
0
    const auto mask = GroupPortableImpl(ctrl + cap).MaskFull();
254
0
    --ctrl;
255
0
    slot = PrevSlot(slot, slot_size);
256
0
    for (uint32_t i : mask) {
257
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
258
0
    }
259
0
    return;
260
0
  }
261
0
  size_t remaining = c.size();
262
0
  ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
263
0
  while (remaining != 0) {
264
0
    for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
265
0
      ABSL_SWISSTABLE_ASSERT(IsFull(ctrl[i]) &&
266
0
                             "hash table was modified unexpectedly");
267
0
      cb(ctrl + i, SlotAddress(slot, i, slot_size));
268
0
      --remaining;
269
0
    }
270
0
    ctrl += Group::kWidth;
271
0
    slot = NextSlot(slot, slot_size, Group::kWidth);
272
0
    ABSL_SWISSTABLE_ASSERT(
273
0
        (remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
274
0
        "hash table was modified unexpectedly");
275
0
  }
276
  // NOTE: erasure of the current element is allowed in callback for
277
  // absl::erase_if specialization. So we use `>=`.
278
0
  ABSL_SWISSTABLE_ASSERT(original_size_for_assert >= c.size() &&
279
0
                         "hash table was modified unexpectedly");
280
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)> >(absl::container_internal::CommonFields const&, unsigned long, absl::FunctionRef<void (absl::container_internal::ctrl_t const*, void*)>)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::IterateOverFullSlotsImpl<absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0>(absl::container_internal::CommonFields const&, unsigned long, absl::container_internal::Copy(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::CommonFields const&, absl::FunctionRef<void (void*, void const*)>)::$_0)
281
282
}  // namespace
283
284
0
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
285
0
  ABSL_SWISSTABLE_ASSERT(ctrl[capacity] == ctrl_t::kSentinel);
286
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
287
0
  for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
288
0
    Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
289
0
  }
290
  // Copy the cloned ctrl bytes.
291
0
  std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
292
0
  ctrl[capacity] = ctrl_t::kSentinel;
293
0
}
294
295
void IterateOverFullSlots(const CommonFields& c, size_t slot_size,
296
0
                          absl::FunctionRef<void(const ctrl_t*, void*)> cb) {
297
0
  IterateOverFullSlotsImpl(c, slot_size, cb);
298
0
}
299
300
namespace {
301
302
372k
void ResetGrowthLeft(GrowthInfo& growth_info, size_t capacity, size_t size) {
303
372k
  growth_info.InitGrowthLeftNoDeleted(CapacityToGrowth(capacity) - size);
304
372k
}
305
306
220k
void ResetGrowthLeft(CommonFields& common) {
307
220k
  ResetGrowthLeft(common.growth_info(), common.capacity(), common.size());
308
220k
}
309
310
// Finds guaranteed to exists empty slot from the given position.
311
// NOTE: this function is almost never triggered inside of the
312
// DropDeletesWithoutResize, so we keep it simple.
313
// The table is rather sparse, so empty slot will be found very quickly.
314
0
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
315
0
  for (size_t i = start; i < end; ++i) {
316
0
    if (IsEmpty(ctrl[i])) {
317
0
      return i;
318
0
    }
319
0
  }
320
0
  ABSL_UNREACHABLE();
321
0
}
322
323
// Finds guaranteed to exist full slot starting from the given position.
324
// NOTE: this function is only triggered for rehash(0), when we need to
325
// go back to SOO state, so we keep it simple.
326
0
size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) {
327
0
  for (size_t i = start; i < end; ++i) {
328
0
    if (IsFull(ctrl[i])) {
329
0
      return i;
330
0
    }
331
0
  }
332
0
  ABSL_UNREACHABLE();
333
0
}
334
335
8.33M
void PrepareInsertCommon(CommonFields& common) {
336
8.33M
  common.increment_size();
337
8.33M
  common.maybe_increment_generation_on_insert();
338
8.33M
}
339
340
// Sets sanitizer poisoning for slot corresponding to control byte being set.
341
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
342
8.35M
                                size_t slot_size) {
343
8.35M
  ABSL_SWISSTABLE_ASSERT(i < c.capacity());
344
8.35M
  auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
345
8.35M
  if (IsFull(h)) {
346
8.35M
    SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
347
8.35M
  } else {
348
0
    SanitizerPoisonMemoryRegion(slot_i, slot_size);
349
0
  }
350
8.35M
}
351
352
// Sets `ctrl[i]` to `h`.
353
//
354
// Unlike setting it directly, this function will perform bounds checks and
355
// mirror the value to the cloned tail if necessary.
356
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
357
8.13M
                    size_t slot_size) {
358
8.13M
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
359
8.13M
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
360
8.13M
  ctrl_t* ctrl = c.control();
361
8.13M
  ctrl[i] = h;
362
8.13M
  ctrl[((i - NumClonedBytes()) & c.capacity()) +
363
8.13M
       (NumClonedBytes() & c.capacity())] = h;
364
8.13M
}
365
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
366
8.13M
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
367
8.13M
  SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
368
8.13M
}
369
370
// Like SetCtrl, but in a single group table, we can save some operations when
371
// setting the cloned control byte.
372
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
373
59.6k
                                      size_t slot_size) {
374
59.6k
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
375
59.6k
  ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity()));
376
59.6k
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
377
59.6k
  ctrl_t* ctrl = c.control();
378
59.6k
  ctrl[i] = h;
379
59.6k
  ctrl[i + c.capacity() + 1] = h;
380
59.6k
}
381
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
382
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
383
59.6k
                                      size_t slot_size) {
384
59.6k
  SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
385
59.6k
}
386
387
// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1,
388
// we can save some operations when setting the cloned control byte.
389
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h,
390
163k
                                size_t slot_size) {
391
163k
  ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1);
392
163k
  DoSanitizeOnSetCtrl(c, i, h, slot_size);
393
163k
  ctrl_t* ctrl = c.control();
394
163k
  ctrl[i] = h;
395
163k
  ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h;
396
163k
}
397
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
398
inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h,
399
163k
                                size_t slot_size) {
400
163k
  SetCtrlInLargeTable(c, i, static_cast<ctrl_t>(h), slot_size);
401
163k
}
402
403
size_t DropDeletesWithoutResizeAndPrepareInsert(
404
    CommonFields& common, const PolicyFunctions& __restrict policy,
405
0
    size_t new_hash) {
406
0
  void* set = &common;
407
0
  void* slot_array = common.slot_array();
408
0
  const size_t capacity = common.capacity();
409
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity));
410
0
  ABSL_SWISSTABLE_ASSERT(!is_single_group(capacity));
411
  // Algorithm:
412
  // - mark all DELETED slots as EMPTY
413
  // - mark all FULL slots as DELETED
414
  // - for each slot marked as DELETED
415
  //     hash = Hash(element)
416
  //     target = find_first_non_full(hash)
417
  //     if target is in the same group
418
  //       mark slot as FULL
419
  //     else if target is EMPTY
420
  //       transfer element to target
421
  //       mark slot as EMPTY
422
  //       mark target as FULL
423
  //     else if target is DELETED
424
  //       swap current element with target element
425
  //       mark target as FULL
426
  //       repeat procedure for current slot with moved from element (target)
427
0
  ctrl_t* ctrl = common.control();
428
0
  ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
429
0
  const void* hash_fn = policy.hash_fn(common);
430
0
  auto hasher = policy.hash_slot;
431
0
  auto transfer_n = policy.transfer_n;
432
0
  const size_t slot_size = policy.slot_size;
433
434
0
  size_t total_probe_length = 0;
435
0
  void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
436
437
  // The index of an empty slot that can be used as temporary memory for
438
  // the swap operation.
439
0
  constexpr size_t kUnknownId = ~size_t{};
440
0
  size_t tmp_space_id = kUnknownId;
441
442
0
  for (size_t i = 0; i != capacity;
443
0
       ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
444
0
    ABSL_SWISSTABLE_ASSERT(slot_ptr == SlotAddress(slot_array, i, slot_size));
445
0
    if (IsEmpty(ctrl[i])) {
446
0
      tmp_space_id = i;
447
0
      continue;
448
0
    }
449
0
    if (!IsDeleted(ctrl[i])) continue;
450
0
    const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed());
451
0
    const FindInfo target = find_first_non_full(common, hash);
452
0
    const size_t new_i = target.offset;
453
0
    total_probe_length += target.probe_length;
454
455
    // Verify if the old and new i fall within the same group wrt the hash.
456
    // If they do, we don't need to move the object as it falls already in the
457
    // best probe we can.
458
0
    const size_t probe_offset = probe(common, hash).offset();
459
0
    const h2_t h2 = H2(hash);
460
0
    const auto probe_index = [probe_offset, capacity](size_t pos) {
461
0
      return ((pos - probe_offset) & capacity) / Group::kWidth;
462
0
    };
463
464
    // Element doesn't move.
465
0
    if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
466
0
      SetCtrlInLargeTable(common, i, h2, slot_size);
467
0
      continue;
468
0
    }
469
470
0
    void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
471
0
    if (IsEmpty(ctrl[new_i])) {
472
      // Transfer element to the empty spot.
473
      // SetCtrl poisons/unpoisons the slots so we have to call it at the
474
      // right time.
475
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
476
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
477
0
      SetCtrlInLargeTable(common, i, ctrl_t::kEmpty, slot_size);
478
      // Initialize or change empty space id.
479
0
      tmp_space_id = i;
480
0
    } else {
481
0
      ABSL_SWISSTABLE_ASSERT(IsDeleted(ctrl[new_i]));
482
0
      SetCtrlInLargeTable(common, new_i, h2, slot_size);
483
      // Until we are done rehashing, DELETED marks previously FULL slots.
484
485
0
      if (tmp_space_id == kUnknownId) {
486
0
        tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
487
0
      }
488
0
      void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
489
0
      SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
490
491
      // Swap i and new_i elements.
492
0
      (*transfer_n)(set, tmp_space, new_slot_ptr, 1);
493
0
      (*transfer_n)(set, new_slot_ptr, slot_ptr, 1);
494
0
      (*transfer_n)(set, slot_ptr, tmp_space, 1);
495
496
0
      SanitizerPoisonMemoryRegion(tmp_space, slot_size);
497
498
      // repeat the processing of the ith slot
499
0
      --i;
500
0
      slot_ptr = PrevSlot(slot_ptr, slot_size);
501
0
    }
502
0
  }
503
  // Prepare insert for the new element.
504
0
  PrepareInsertCommon(common);
505
0
  ResetGrowthLeft(common);
506
0
  FindInfo find_info = find_first_non_full(common, new_hash);
507
0
  SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size);
508
0
  common.infoz().RecordInsertMiss(new_hash, find_info.probe_length);
509
0
  common.infoz().RecordRehash(total_probe_length);
510
0
  return find_info.offset;
511
0
}
512
513
0
bool WasNeverFull(CommonFields& c, size_t index) {
514
0
  if (is_single_group(c.capacity())) {
515
0
    return true;
516
0
  }
517
0
  const size_t index_before = (index - Group::kWidth) & c.capacity();
518
0
  const auto empty_after = Group(c.control() + index).MaskEmpty();
519
0
  const auto empty_before = Group(c.control() + index_before).MaskEmpty();
520
521
  // We count how many consecutive non empties we have to the right and to the
522
  // left of `it`. If the sum is >= kWidth then there is at least one probe
523
  // window that might have seen a full group.
524
0
  return empty_before && empty_after &&
525
0
         static_cast<size_t>(empty_after.TrailingZeros()) +
526
0
                 empty_before.LeadingZeros() <
527
0
             Group::kWidth;
528
0
}
529
530
// Updates the control bytes to indicate a completely empty table such that all
531
// control bytes are kEmpty except for the kSentinel byte.
532
220k
void ResetCtrl(CommonFields& common, size_t slot_size) {
533
220k
  const size_t capacity = common.capacity();
534
220k
  ctrl_t* ctrl = common.control();
535
220k
  static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1;
536
220k
  if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) {
537
179k
    if (IsSmallCapacity(capacity)) return;
538
179k
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty), Group::kWidth);
539
179k
    std::memset(ctrl + capacity, static_cast<int8_t>(ctrl_t::kEmpty),
540
179k
                Group::kWidth);
541
179k
    if (capacity == kTwoGroupCapacity) {
542
49.7k
      std::memset(ctrl + Group::kWidth, static_cast<int8_t>(ctrl_t::kEmpty),
543
49.7k
                  Group::kWidth);
544
49.7k
    }
545
179k
  } else {
546
41.7k
    std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
547
41.7k
                capacity + 1 + NumClonedBytes());
548
41.7k
  }
549
220k
  ctrl[capacity] = ctrl_t::kSentinel;
550
220k
  SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
551
220k
}
552
553
// Initializes control bytes for growing from capacity 1 to 3.
554
// `orig_h2` is placed in the position `SooSlotIndex()`.
555
// `new_h2` is placed in the position `new_offset`.
556
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes(
557
49.0k
    h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) {
558
49.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
559
49.0k
  static_assert(kNewCapacity == 3);
560
49.0k
  static_assert(is_single_group(kNewCapacity));
561
49.0k
  static_assert(SooSlotIndex() == 1);
562
49.0k
  ABSL_SWISSTABLE_ASSERT(new_offset == 0 || new_offset == 2);
563
564
49.0k
  static constexpr uint64_t kEmptyXorSentinel =
565
49.0k
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
566
49.0k
      static_cast<uint8_t>(ctrl_t::kSentinel);
567
49.0k
  static constexpr uint64_t kEmpty64 = static_cast<uint8_t>(ctrl_t::kEmpty);
568
49.0k
  static constexpr size_t kMirroredSooSlotIndex =
569
49.0k
      SooSlotIndex() + kNewCapacity + 1;
570
  // The first 8 bytes, where SOO slot original and mirrored positions are
571
  // replaced with 0.
572
  // Result will look like: E0ESE0EE
573
49.0k
  static constexpr uint64_t kFirstCtrlBytesWithZeroes =
574
49.0k
      k8EmptyBytes ^ (kEmpty64 << (8 * SooSlotIndex())) ^
575
49.0k
      (kEmptyXorSentinel << (8 * kNewCapacity)) ^
576
49.0k
      (kEmpty64 << (8 * kMirroredSooSlotIndex));
577
578
49.0k
  const uint64_t soo_h2 = static_cast<uint64_t>(orig_h2);
579
49.0k
  const uint64_t new_h2_xor_empty =
580
49.0k
      static_cast<uint64_t>(new_h2 ^ static_cast<uint8_t>(ctrl_t::kEmpty));
581
  // Fill the original and mirrored bytes for SOO slot.
582
  // Result will look like:
583
  // EHESEHEE
584
  // Where H = soo_h2, E = kEmpty, S = kSentinel.
585
49.0k
  uint64_t first_ctrl_bytes =
586
49.0k
      ((soo_h2 << (8 * SooSlotIndex())) | kFirstCtrlBytesWithZeroes) |
587
49.0k
      (soo_h2 << (8 * kMirroredSooSlotIndex));
588
  // Replace original and mirrored empty bytes for the new position.
589
  // Result for new_offset 0 will look like:
590
  // NHESNHEE
591
  // Where H = soo_h2, N = H2(new_hash), E = kEmpty, S = kSentinel.
592
  // Result for new_offset 2 will look like:
593
  // EHNSEHNE
594
49.0k
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_offset));
595
49.0k
  size_t new_mirrored_offset = new_offset + kNewCapacity + 1;
596
49.0k
  first_ctrl_bytes ^= (new_h2_xor_empty << (8 * new_mirrored_offset));
597
598
  // Fill last bytes with kEmpty.
599
49.0k
  std::memset(new_ctrl + kNewCapacity, static_cast<int8_t>(ctrl_t::kEmpty),
600
49.0k
              Group::kWidth);
601
  // Overwrite the first 8 bytes with first_ctrl_bytes.
602
49.0k
  absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
603
604
  // Example for group size 16:
605
  // new_ctrl after 1st memset =      ???EEEEEEEEEEEEEEEE
606
  // new_offset 0:
607
  // new_ctrl after 2nd store  =      NHESNHEEEEEEEEEEEEE
608
  // new_offset 2:
609
  // new_ctrl after 2nd store  =      EHNSEHNEEEEEEEEEEEE
610
611
  // Example for group size 8:
612
  // new_ctrl after 1st memset =      ???EEEEEEEE
613
  // new_offset 0:
614
  // new_ctrl after 2nd store  =      NHESNHEEEEE
615
  // new_offset 2:
616
  // new_ctrl after 2nd store  =      EHNSEHNEEEE
617
49.0k
}
618
619
}  // namespace
620
621
0
void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) {
622
0
  ABSL_SWISSTABLE_ASSERT(c.is_small());
623
0
  if (soo_enabled) {
624
0
    c.set_empty_soo();
625
0
    return;
626
0
  }
627
0
  c.decrement_size();
628
0
  c.infoz().RecordErase();
629
0
  SanitizerPoisonMemoryRegion(c.slot_array(), slot_size);
630
0
}
631
632
0
void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) {
633
0
  ABSL_SWISSTABLE_ASSERT(!c.is_small());
634
0
  ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator");
635
0
  c.decrement_size();
636
0
  c.infoz().RecordErase();
637
638
0
  size_t index = static_cast<size_t>(ctrl - c.control());
639
640
0
  if (WasNeverFull(c, index)) {
641
0
    SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
642
0
    c.growth_info().OverwriteFullAsEmpty();
643
0
    return;
644
0
  }
645
646
0
  c.growth_info().OverwriteFullAsDeleted();
647
0
  SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size);
648
0
}
649
650
void ClearBackingArray(CommonFields& c,
651
                       const PolicyFunctions& __restrict policy, void* alloc,
652
235k
                       bool reuse, bool soo_enabled) {
653
235k
  if (reuse) {
654
220k
    c.set_size_to_zero();
655
220k
    ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity());
656
220k
    ResetCtrl(c, policy.slot_size);
657
220k
    ResetGrowthLeft(c);
658
220k
    c.infoz().RecordStorageChanged(0, c.capacity());
659
220k
  } else {
660
    // We need to record infoz before calling dealloc, which will unregister
661
    // infoz.
662
14.9k
    c.infoz().RecordClearedReservation();
663
14.9k
    c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
664
14.9k
    c.infoz().Unregister();
665
14.9k
    (*policy.dealloc)(alloc, c.capacity(), c.control(), policy.slot_size,
666
14.9k
                      policy.slot_align, c.has_infoz());
667
14.9k
    c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{non_soo_tag_t{}};
668
14.9k
  }
669
235k
}
670
671
namespace {
672
673
enum class ResizeNonSooMode {
674
  kGuaranteedEmpty,
675
  kGuaranteedAllocated,
676
};
677
678
// Iterates over full slots in old table, finds new positions for them and
679
// transfers the slots.
680
// This function is used for reserving or rehashing non-empty tables.
681
// This use case is rare so the function is type erased.
682
// Returns the total probe length.
683
size_t FindNewPositionsAndTransferSlots(
684
    CommonFields& common, const PolicyFunctions& __restrict policy,
685
0
    ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) {
686
0
  void* new_slots = common.slot_array();
687
0
  const void* hash_fn = policy.hash_fn(common);
688
0
  const size_t slot_size = policy.slot_size;
689
0
  const size_t seed = common.seed().seed();
690
691
0
  const auto insert_slot = [&](void* slot) {
692
0
    size_t hash = policy.hash_slot(hash_fn, slot, seed);
693
0
    FindInfo target;
694
0
    if (common.is_small()) {
695
0
      target = FindInfo{0, 0};
696
0
    } else {
697
0
      target = find_first_non_full(common, hash);
698
0
      SetCtrl(common, target.offset, H2(hash), slot_size);
699
0
    }
700
0
    policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size),
701
0
                      slot, 1);
702
0
    return target.probe_length;
703
0
  };
704
0
  if (IsSmallCapacity(old_capacity)) {
705
0
    if (common.size() == 1) insert_slot(old_slots);
706
0
    return 0;
707
0
  }
708
0
  size_t total_probe_length = 0;
709
0
  for (size_t i = 0; i < old_capacity; ++i) {
710
0
    if (IsFull(old_ctrl[i])) {
711
0
      total_probe_length += insert_slot(old_slots);
712
0
    }
713
0
    old_slots = NextSlot(old_slots, slot_size);
714
0
  }
715
0
  return total_probe_length;
716
0
}
717
718
void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz,
719
                             size_t hash, size_t total_probe_length,
720
0
                             size_t distance_from_desired) {
721
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
722
0
  infoz.RecordStorageChanged(common.size() - 1, common.capacity());
723
0
  infoz.RecordRehash(total_probe_length);
724
0
  infoz.RecordInsertMiss(hash, distance_from_desired);
725
0
  common.set_has_infoz();
726
  // TODO(b/413062340): we could potentially store infoz in place of the
727
  // control pointer for the capacity 1 case.
728
0
  common.set_infoz(infoz);
729
0
}
730
731
// Specialization to avoid passing two 0s from hot function.
732
ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz(
733
0
    CommonFields& common, HashtablezInfoHandle infoz, size_t hash) {
734
0
  ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0,
735
0
                          /*distance_from_desired=*/0);
736
0
}
737
738
ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common,
739
                                                 HashtablezInfoHandle infoz,
740
                                                 size_t hash,
741
                                                 size_t total_probe_length,
742
0
                                                 size_t distance_from_desired) {
743
0
  ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length,
744
0
                          distance_from_desired);
745
0
}
746
747
ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common,
748
                                                 HashtablezInfoHandle infoz,
749
0
                                                 size_t total_probe_length) {
750
0
  ABSL_SWISSTABLE_ASSERT(infoz.IsSampled());
751
0
  infoz.RecordStorageChanged(common.size(), common.capacity());
752
0
  infoz.RecordRehash(total_probe_length);
753
0
  common.set_has_infoz();
754
0
  common.set_infoz(infoz);
755
0
}
756
757
struct BackingArrayPtrs {
758
  ctrl_t* ctrl;
759
  void* slots;
760
};
761
762
BackingArrayPtrs AllocBackingArray(CommonFields& common,
763
                                   const PolicyFunctions& __restrict policy,
764
                                   size_t new_capacity, bool has_infoz,
765
201k
                                   void* alloc) {
766
201k
  RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align,
767
201k
                          has_infoz);
768
  // Perform a direct call in the common case to allow for profile-guided
769
  // heap optimization (PGHO) to understand which allocation function is used.
770
201k
  constexpr size_t kDefaultAlignment = BackingArrayAlignment(alignof(size_t));
771
201k
  char* mem = static_cast<char*>(
772
201k
      ABSL_PREDICT_TRUE(
773
201k
          policy.alloc ==
774
201k
          (&AllocateBackingArray<kDefaultAlignment, std::allocator<char>>))
775
201k
          ? AllocateBackingArray<kDefaultAlignment, std::allocator<char>>(
776
201k
                alloc, layout.alloc_size())
777
201k
          : policy.alloc(alloc, layout.alloc_size()));
778
201k
  const GenerationType old_generation = common.generation();
779
201k
  common.set_generation_ptr(
780
201k
      reinterpret_cast<GenerationType*>(mem + layout.generation_offset()));
781
201k
  common.set_generation(NextGeneration(old_generation));
782
783
201k
  return {reinterpret_cast<ctrl_t*>(mem + layout.control_offset()),
784
201k
          mem + layout.slot_offset()};
785
201k
}
786
787
template <ResizeNonSooMode kMode>
788
void ResizeNonSooImpl(CommonFields& common,
789
                      const PolicyFunctions& __restrict policy,
790
0
                      size_t new_capacity, HashtablezInfoHandle infoz) {
791
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
792
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
793
794
0
  [[maybe_unused]] const size_t old_capacity = common.capacity();
795
0
  [[maybe_unused]] ctrl_t* old_ctrl;
796
0
  [[maybe_unused]] void* old_slots;
797
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
798
0
    old_ctrl = common.control();
799
0
    old_slots = common.slot_array();
800
0
  }
801
802
0
  const size_t slot_size = policy.slot_size;
803
0
  [[maybe_unused]] const size_t slot_align = policy.slot_align;
804
0
  const bool has_infoz = infoz.IsSampled();
805
0
  void* alloc = policy.get_char_alloc(common);
806
807
0
  common.set_capacity(new_capacity);
808
0
  const auto [new_ctrl, new_slots] =
809
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
810
0
  common.set_control(new_ctrl);
811
0
  common.set_slots(new_slots);
812
0
  common.generate_new_seed(has_infoz);
813
814
0
  size_t total_probe_length = 0;
815
0
  ResetCtrl(common, slot_size);
816
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedEmpty ||
817
0
                         old_capacity == policy.soo_capacity());
818
0
  ABSL_SWISSTABLE_ASSERT(kMode != ResizeNonSooMode::kGuaranteedAllocated ||
819
0
                         old_capacity > 0);
820
0
  if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) {
821
0
    total_probe_length = FindNewPositionsAndTransferSlots(
822
0
        common, policy, old_ctrl, old_slots, old_capacity);
823
0
    (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
824
0
                      has_infoz);
825
0
    ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
826
0
                    common.size());
827
0
  } else {
828
0
    GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(
829
0
        CapacityToGrowth(new_capacity));
830
0
  }
831
832
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
833
0
    ReportResizeToInfoz(common, infoz, total_probe_length);
834
0
  }
835
0
}
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)0>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
Unexecuted instantiation: raw_hash_set.cc:void absl::container_internal::(anonymous namespace)::ResizeNonSooImpl<(absl::container_internal::(anonymous namespace)::ResizeNonSooMode)1>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, unsigned long, absl::container_internal::HashtablezInfoHandle)
836
837
void ResizeEmptyNonAllocatedTableImpl(CommonFields& common,
838
                                      const PolicyFunctions& __restrict policy,
839
0
                                      size_t new_capacity, bool force_infoz) {
840
0
  ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity));
841
0
  ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity());
842
0
  ABSL_SWISSTABLE_ASSERT(!force_infoz || policy.soo_enabled);
843
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= policy.soo_capacity());
844
0
  ABSL_SWISSTABLE_ASSERT(common.empty());
845
0
  const size_t slot_size = policy.slot_size;
846
0
  HashtablezInfoHandle infoz;
847
0
  const bool should_sample =
848
0
      policy.is_hashtablez_eligible && (force_infoz || ShouldSampleNextTable());
849
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
850
0
    infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
851
0
                            policy.soo_capacity());
852
0
  }
853
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedEmpty>(common, policy,
854
0
                                                       new_capacity, infoz);
855
0
}
856
857
// If the table was SOO, initializes new control bytes and transfers slot.
858
// After transferring the slot, sets control and slots in CommonFields.
859
// It is rare to resize an SOO table with one element to a large size.
860
// Requires: `c` contains SOO data.
861
void InsertOldSooSlotAndInitializeControlBytes(
862
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl,
863
0
    void* new_slots, bool has_infoz) {
864
0
  ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity());
865
0
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
866
0
  size_t new_capacity = c.capacity();
867
868
0
  c.generate_new_seed(has_infoz);
869
870
0
  const size_t soo_slot_hash =
871
0
      policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed());
872
0
  size_t offset = probe(new_capacity, soo_slot_hash).offset();
873
0
  offset = offset == new_capacity ? 0 : offset;
874
0
  SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
875
0
  void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
876
0
  SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
877
0
  policy.transfer_n(&c, target_slot, c.soo_data(), 1);
878
0
  c.set_control(new_ctrl);
879
0
  c.set_slots(new_slots);
880
0
  ResetCtrl(c, policy.slot_size);
881
0
  SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size);
882
0
}
883
884
enum class ResizeFullSooTableSamplingMode {
885
  kNoSampling,
886
  // Force sampling. If the table was still not sampled, do not resize.
887
  kForceSampleNoResizeIfUnsampled,
888
};
889
890
void AssertSoo([[maybe_unused]] CommonFields& common,
891
49.0k
               [[maybe_unused]] const PolicyFunctions& policy) {
892
49.0k
  ABSL_SWISSTABLE_ASSERT(policy.soo_enabled);
893
49.0k
  ABSL_SWISSTABLE_ASSERT(common.capacity() == policy.soo_capacity());
894
49.0k
}
895
void AssertFullSoo([[maybe_unused]] CommonFields& common,
896
0
                   [[maybe_unused]] const PolicyFunctions& policy) {
897
0
  AssertSoo(common, policy);
898
0
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
899
0
}
900
901
void ResizeFullSooTable(CommonFields& common,
902
                        const PolicyFunctions& __restrict policy,
903
                        size_t new_capacity,
904
0
                        ResizeFullSooTableSamplingMode sampling_mode) {
905
0
  AssertFullSoo(common, policy);
906
0
  const size_t slot_size = policy.slot_size;
907
0
  void* alloc = policy.get_char_alloc(common);
908
909
0
  HashtablezInfoHandle infoz;
910
0
  bool has_infoz = false;
911
0
  if (sampling_mode ==
912
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) {
913
0
    if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) {
914
0
      infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size,
915
0
                              policy.soo_capacity());
916
0
    }
917
918
0
    if (!infoz.IsSampled()) return;
919
0
    has_infoz = true;
920
0
  }
921
922
0
  common.set_capacity(new_capacity);
923
924
  // We do not set control and slots in CommonFields yet to avoid overriding
925
  // SOO data.
926
0
  const auto [new_ctrl, new_slots] =
927
0
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
928
929
0
  InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots,
930
0
                                            has_infoz);
931
0
  ResetGrowthLeft(common);
932
0
  if (has_infoz) {
933
0
    common.set_has_infoz();
934
0
    common.set_infoz(infoz);
935
0
    infoz.RecordStorageChanged(common.size(), new_capacity);
936
0
  }
937
0
}
938
939
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl,
940
                                            size_t old_capacity,
941
                                            ctrl_t* __restrict new_ctrl,
942
59.6k
                                            size_t new_capacity) {
943
59.6k
  ABSL_SWISSTABLE_ASSERT(is_single_group(new_capacity));
944
59.6k
  constexpr size_t kHalfWidth = Group::kWidth / 2;
945
59.6k
  ABSL_ASSUME(old_capacity < kHalfWidth);
946
59.6k
  ABSL_ASSUME(old_capacity > 0);
947
59.6k
  static_assert(Group::kWidth == 8 || Group::kWidth == 16,
948
59.6k
                "Group size is not supported.");
949
950
  // NOTE: operations are done with compile time known size = 8.
951
  // Compiler optimizes that into single ASM operation.
952
953
  // Load the bytes from old_capacity. This contains
954
  // - the sentinel byte
955
  // - all the old control bytes
956
  // - the rest is filled with kEmpty bytes
957
  // Example:
958
  // old_ctrl =     012S012EEEEEEEEE...
959
  // copied_bytes = S012EEEE
960
59.6k
  uint64_t copied_bytes = absl::little_endian::Load64(old_ctrl + old_capacity);
961
962
  // We change the sentinel byte to kEmpty before storing to both the start of
963
  // the new_ctrl, and past the end of the new_ctrl later for the new cloned
964
  // bytes. Note that this is faster than setting the sentinel byte to kEmpty
965
  // after the copy directly in new_ctrl because we are limited on store
966
  // bandwidth.
967
59.6k
  static constexpr uint64_t kEmptyXorSentinel =
968
59.6k
      static_cast<uint8_t>(ctrl_t::kEmpty) ^
969
59.6k
      static_cast<uint8_t>(ctrl_t::kSentinel);
970
971
  // Replace the first byte kSentinel with kEmpty.
972
  // Resulting bytes will be shifted by one byte old control blocks.
973
  // Example:
974
  // old_ctrl = 012S012EEEEEEEEE...
975
  // before =   S012EEEE
976
  // after  =   E012EEEE
977
59.6k
  copied_bytes ^= kEmptyXorSentinel;
978
979
59.6k
  if (Group::kWidth == 8) {
980
    // With group size 8, we can grow with two write operations.
981
0
    ABSL_SWISSTABLE_ASSERT(old_capacity < 8 &&
982
0
                           "old_capacity is too large for group size 8");
983
0
    absl::little_endian::Store64(new_ctrl, copied_bytes);
984
985
0
    static constexpr uint64_t kSentinal64 =
986
0
        static_cast<uint8_t>(ctrl_t::kSentinel);
987
988
    // Prepend kSentinel byte to the beginning of copied_bytes.
989
    // We have maximum 3 non-empty bytes at the beginning of copied_bytes for
990
    // group size 8.
991
    // Example:
992
    // old_ctrl = 012S012EEEE
993
    // before =   E012EEEE
994
    // after  =   SE012EEE
995
0
    copied_bytes = (copied_bytes << 8) ^ kSentinal64;
996
0
    absl::little_endian::Store64(new_ctrl + new_capacity, copied_bytes);
997
    // Example for capacity 3:
998
    // old_ctrl = 012S012EEEE
999
    // After the first store:
1000
    //           >!
1001
    // new_ctrl = E012EEEE???????
1002
    // After the second store:
1003
    //                  >!
1004
    // new_ctrl = E012EEESE012EEE
1005
0
    return;
1006
0
  }
1007
1008
59.6k
  ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16);  // NOLINT(misc-static-assert)
1009
1010
  // Fill the second half of the main control bytes with kEmpty.
1011
  // For small capacity that may write into mirrored control bytes.
1012
  // It is fine as we will overwrite all the bytes later.
1013
59.6k
  std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
1014
59.6k
              kHalfWidth);
1015
  // Fill the second half of the mirrored control bytes with kEmpty.
1016
59.6k
  std::memset(new_ctrl + new_capacity + kHalfWidth,
1017
59.6k
              static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
1018
  // Copy the first half of the non-mirrored control bytes.
1019
59.6k
  absl::little_endian::Store64(new_ctrl, copied_bytes);
1020
59.6k
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1021
  // Copy the first half of the mirrored control bytes.
1022
59.6k
  absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
1023
1024
  // Example for growth capacity 1->3:
1025
  // old_ctrl =                  0S0EEEEEEEEEEEEEE
1026
  // new_ctrl at the end =       E0ESE0EEEEEEEEEEEEE
1027
  //                                    >!
1028
  // new_ctrl after 1st memset = ????????EEEEEEEE???
1029
  //                                       >!
1030
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEE
1031
  //                            >!
1032
  // new_ctrl after 1st store =  E0EEEEEEEEEEEEEEEEE
1033
  // new_ctrl after kSentinel =  E0ESEEEEEEEEEEEEEEE
1034
  //                                >!
1035
  // new_ctrl after 2nd store =  E0ESE0EEEEEEEEEEEEE
1036
1037
  // Example for growth capacity 3->7:
1038
  // old_ctrl =                  012S012EEEEEEEEEEEE
1039
  // new_ctrl at the end =       E012EEESE012EEEEEEEEEEE
1040
  //                                    >!
1041
  // new_ctrl after 1st memset = ????????EEEEEEEE???????
1042
  //                                           >!
1043
  // new_ctrl after 2nd memset = ????????EEEEEEEEEEEEEEE
1044
  //                            >!
1045
  // new_ctrl after 1st store =  E012EEEEEEEEEEEEEEEEEEE
1046
  // new_ctrl after kSentinel =  E012EEESEEEEEEEEEEEEEEE
1047
  //                                >!
1048
  // new_ctrl after 2nd store =  E012EEESE012EEEEEEEEEEE
1049
1050
  // Example for growth capacity 7->15:
1051
  // old_ctrl =                  0123456S0123456EEEEEEEE
1052
  // new_ctrl at the end =       E0123456EEEEEEESE0123456EEEEEEE
1053
  //                                    >!
1054
  // new_ctrl after 1st memset = ????????EEEEEEEE???????????????
1055
  //                                                   >!
1056
  // new_ctrl after 2nd memset = ????????EEEEEEEE???????EEEEEEEE
1057
  //                            >!
1058
  // new_ctrl after 1st store =  E0123456EEEEEEEE???????EEEEEEEE
1059
  // new_ctrl after kSentinel =  E0123456EEEEEEES???????EEEEEEEE
1060
  //                                            >!
1061
  // new_ctrl after 2nd store =  E0123456EEEEEEESE0123456EEEEEEE
1062
59.6k
}
1063
1064
// Size of the buffer we allocate on stack for storing probed elements in
1065
// GrowToNextCapacity algorithm.
1066
constexpr size_t kProbedElementsBufferSize = 512;
1067
1068
// Decodes information about probed elements from contiguous memory.
1069
// Finds new position for each element and transfers it to the new slots.
1070
// Returns the total probe length.
1071
template <typename ProbedItem>
1072
ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl(
1073
    CommonFields& c, const PolicyFunctions& __restrict policy,
1074
28.4k
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
1075
28.4k
  const size_t new_capacity = c.capacity();
1076
1077
28.4k
  void* new_slots = c.slot_array();
1078
28.4k
  ctrl_t* new_ctrl = c.control();
1079
28.4k
  size_t total_probe_length = 0;
1080
1081
28.4k
  const size_t slot_size = policy.slot_size;
1082
28.4k
  auto transfer_n = policy.transfer_n;
1083
1084
99.2k
  for (; start < end; ++start) {
1085
70.7k
    const FindInfo target = find_first_non_full_from_h1(
1086
70.7k
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
1087
70.7k
    total_probe_length += target.probe_length;
1088
70.7k
    const size_t old_index = static_cast<size_t>(start->source_offset);
1089
70.7k
    const size_t new_i = target.offset;
1090
70.7k
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
1091
70.7k
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
1092
70.7k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
1093
70.7k
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1094
70.7k
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1095
70.7k
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
1096
70.7k
    transfer_n(&c, dst_slot, src_slot, 1);
1097
70.7k
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
1098
70.7k
  }
1099
28.4k
  return total_probe_length;
1100
28.4k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned int, 32ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, absl::container_internal::ProbedItemImpl<unsigned int, 32ul> const*, void*)
Line
Count
Source
1074
28.4k
    const ProbedItem* start, const ProbedItem* end, void* old_slots) {
1075
28.4k
  const size_t new_capacity = c.capacity();
1076
1077
28.4k
  void* new_slots = c.slot_array();
1078
28.4k
  ctrl_t* new_ctrl = c.control();
1079
28.4k
  size_t total_probe_length = 0;
1080
1081
28.4k
  const size_t slot_size = policy.slot_size;
1082
28.4k
  auto transfer_n = policy.transfer_n;
1083
1084
99.2k
  for (; start < end; ++start) {
1085
70.7k
    const FindInfo target = find_first_non_full_from_h1(
1086
70.7k
        new_ctrl, static_cast<size_t>(start->h1), new_capacity);
1087
70.7k
    total_probe_length += target.probe_length;
1088
70.7k
    const size_t old_index = static_cast<size_t>(start->source_offset);
1089
70.7k
    const size_t new_i = target.offset;
1090
70.7k
    ABSL_SWISSTABLE_ASSERT(old_index < new_capacity / 2);
1091
70.7k
    ABSL_SWISSTABLE_ASSERT(new_i < new_capacity);
1092
70.7k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[new_i]));
1093
70.7k
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1094
70.7k
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1095
70.7k
    SanitizerUnpoisonMemoryRegion(dst_slot, slot_size);
1096
70.7k
    transfer_n(&c, dst_slot, src_slot, 1);
1097
70.7k
    SetCtrlInLargeTable(c, new_i, static_cast<h2_t>(start->h2), slot_size);
1098
70.7k
  }
1099
28.4k
  return total_probe_length;
1100
28.4k
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 64ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 64ul> const*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::DecodeAndInsertImpl<absl::container_internal::ProbedItemImpl<unsigned long, 122ul> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, absl::container_internal::ProbedItemImpl<unsigned long, 122ul> const*, void*)
1101
1102
// Sentinel value for the start of marked elements.
1103
// Signals that there are no marked elements.
1104
constexpr size_t kNoMarkedElementsSentinel = ~size_t{};
1105
1106
// Process probed elements that did not fit into available buffers.
1107
// We marked them in control bytes as kSentinel.
1108
// Hash recomputation and full probing is done here.
1109
// This use case should be extremely rare.
1110
ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements(
1111
    CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl,
1112
0
    void* old_slots, size_t start) {
1113
0
  size_t old_capacity = PreviousCapacity(c.capacity());
1114
0
  const size_t slot_size = policy.slot_size;
1115
0
  void* new_slots = c.slot_array();
1116
0
  size_t total_probe_length = 0;
1117
0
  const void* hash_fn = policy.hash_fn(c);
1118
0
  auto hash_slot = policy.hash_slot;
1119
0
  auto transfer_n = policy.transfer_n;
1120
0
  const size_t seed = c.seed().seed();
1121
0
  for (size_t old_index = start; old_index < old_capacity; ++old_index) {
1122
0
    if (old_ctrl[old_index] != ctrl_t::kSentinel) {
1123
0
      continue;
1124
0
    }
1125
0
    void* src_slot = SlotAddress(old_slots, old_index, slot_size);
1126
0
    const size_t hash = hash_slot(hash_fn, src_slot, seed);
1127
0
    const FindInfo target = find_first_non_full(c, hash);
1128
0
    total_probe_length += target.probe_length;
1129
0
    const size_t new_i = target.offset;
1130
0
    void* dst_slot = SlotAddress(new_slots, new_i, slot_size);
1131
0
    SetCtrlInLargeTable(c, new_i, H2(hash), slot_size);
1132
0
    transfer_n(&c, dst_slot, src_slot, 1);
1133
0
  }
1134
0
  return total_probe_length;
1135
0
}
1136
1137
// The largest old capacity for which it is guaranteed that all probed elements
1138
// fit in ProbedItemEncoder's local buffer.
1139
// For such tables, `encode_probed_element` is trivial.
1140
constexpr size_t kMaxLocalBufferOldCapacity =
1141
    kProbedElementsBufferSize / sizeof(ProbedItem4Bytes) - 1;
1142
static_assert(IsValidCapacity(kMaxLocalBufferOldCapacity));
1143
constexpr size_t kMaxLocalBufferNewCapacity =
1144
    NextCapacity(kMaxLocalBufferOldCapacity);
1145
static_assert(kMaxLocalBufferNewCapacity <= ProbedItem4Bytes::kMaxNewCapacity);
1146
static_assert(NextCapacity(kMaxLocalBufferNewCapacity) <=
1147
              ProbedItem4Bytes::kMaxNewCapacity);
1148
1149
// Initializes mirrored control bytes after
1150
// transfer_unprobed_elements_to_next_capacity.
1151
92.4k
void InitializeMirroredControlBytes(ctrl_t* new_ctrl, size_t new_capacity) {
1152
92.4k
  std::memcpy(new_ctrl + new_capacity,
1153
              // We own GrowthInfo just before control bytes. So it is ok
1154
              // to read one byte from it.
1155
92.4k
              new_ctrl - 1, Group::kWidth);
1156
92.4k
  new_ctrl[new_capacity] = ctrl_t::kSentinel;
1157
92.4k
}
1158
1159
// Encodes probed elements into available memory.
1160
// At first, a local (on stack) buffer is used. The size of the buffer is
1161
// kProbedElementsBufferSize bytes.
1162
// When the local buffer is full, we switch to `control_` buffer. We are allowed
1163
// to overwrite `control_` buffer till the `source_offset` byte. In case we have
1164
// no space in `control_` buffer, we fallback to a naive algorithm for all the
1165
// rest of the probed elements. We mark elements as kSentinel in control bytes
1166
// and later process them fully. See ProcessMarkedElements for details. It
1167
// should be extremely rare.
1168
template <typename ProbedItemType,
1169
          // If true, we only use the local buffer and never switch to the
1170
          // control buffer.
1171
          bool kGuaranteedFitToBuffer = false>
1172
class ProbedItemEncoder {
1173
 public:
1174
  using ProbedItem = ProbedItemType;
1175
92.4k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Line
Count
Source
1175
77.7k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Line
Count
Source
1175
14.7k
  explicit ProbedItemEncoder(ctrl_t* control) : control_(control) {}
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProbedItemEncoder(absl::container_internal::ctrl_t*)
1176
1177
  // Encode item into the best available location.
1178
70.7k
  void EncodeItem(ProbedItem item) {
1179
70.7k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1180
0
      return ProcessEncodeWithOverflow(item);
1181
0
    }
1182
70.7k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1183
70.7k
    *pos_ = item;
1184
70.7k
    ++pos_;
1185
70.7k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Line
Count
Source
1178
44.3k
  void EncodeItem(ProbedItem item) {
1179
44.3k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1180
0
      return ProcessEncodeWithOverflow(item);
1181
0
    }
1182
44.3k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1183
44.3k
    *pos_ = item;
1184
44.3k
    ++pos_;
1185
44.3k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Line
Count
Source
1178
26.4k
  void EncodeItem(ProbedItem item) {
1179
26.4k
    if (ABSL_PREDICT_FALSE(!kGuaranteedFitToBuffer && pos_ >= end_)) {
1180
0
      return ProcessEncodeWithOverflow(item);
1181
0
    }
1182
26.4k
    ABSL_SWISSTABLE_ASSERT(pos_ < end_);
1183
26.4k
    *pos_ = item;
1184
26.4k
    ++pos_;
1185
26.4k
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::EncodeItem(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
1186
1187
  // Decodes information about probed elements from all available sources.
1188
  // Finds new position for each element and transfers it to the new slots.
1189
  // Returns the total probe length.
1190
  size_t DecodeAndInsertToTable(CommonFields& common,
1191
                                const PolicyFunctions& __restrict policy,
1192
92.4k
                                void* old_slots) const {
1193
92.4k
    if (pos_ == buffer_) {
1194
64.0k
      return 0;
1195
64.0k
    }
1196
28.4k
    if constexpr (kGuaranteedFitToBuffer) {
1197
21.0k
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1198
21.0k
    }
1199
0
    size_t total_probe_length = DecodeAndInsertImpl(
1200
28.4k
        common, policy, buffer_,
1201
28.4k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1202
28.4k
    if (!local_buffer_full_) {
1203
7.41k
      return total_probe_length;
1204
7.41k
    }
1205
21.0k
    total_probe_length +=
1206
21.0k
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1207
21.0k
    return total_probe_length;
1208
28.4k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Line
Count
Source
1192
77.7k
                                void* old_slots) const {
1193
77.7k
    if (pos_ == buffer_) {
1194
56.7k
      return 0;
1195
56.7k
    }
1196
21.0k
    if constexpr (kGuaranteedFitToBuffer) {
1197
21.0k
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1198
21.0k
    }
1199
0
    size_t total_probe_length = DecodeAndInsertImpl(
1200
21.0k
        common, policy, buffer_,
1201
21.0k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1202
21.0k
    if (!local_buffer_full_) {
1203
0
      return total_probe_length;
1204
0
    }
1205
21.0k
    total_probe_length +=
1206
21.0k
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1207
21.0k
    return total_probe_length;
1208
21.0k
  }
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Line
Count
Source
1192
14.7k
                                void* old_slots) const {
1193
14.7k
    if (pos_ == buffer_) {
1194
7.29k
      return 0;
1195
7.29k
    }
1196
    if constexpr (kGuaranteedFitToBuffer) {
1197
      return DecodeAndInsertImpl(common, policy, buffer_, pos_, old_slots);
1198
    }
1199
7.41k
    size_t total_probe_length = DecodeAndInsertImpl(
1200
7.41k
        common, policy, buffer_,
1201
7.41k
        local_buffer_full_ ? buffer_ + kBufferSize : pos_, old_slots);
1202
7.41k
    if (!local_buffer_full_) {
1203
7.41k
      return total_probe_length;
1204
7.41k
    }
1205
0
    total_probe_length +=
1206
0
        DecodeAndInsertToTableOverflow(common, policy, old_slots);
1207
0
    return total_probe_length;
1208
7.41k
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTable(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1209
1210
 private:
1211
0
  static ProbedItem* AlignToNextItem(void* ptr) {
1212
0
    return reinterpret_cast<ProbedItem*>(AlignUpTo(
1213
0
        reinterpret_cast<uintptr_t>(ptr), alignof(ProbedItem)));
1214
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::AlignToNextItem(void*)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::AlignToNextItem(void*)
1215
1216
0
  ProbedItem* OverflowBufferStart() const {
1217
    // We reuse GrowthInfo memory as well.
1218
0
    return AlignToNextItem(control_ - ControlOffset(/*has_infoz=*/false));
1219
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::OverflowBufferStart() const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::OverflowBufferStart() const
1220
1221
  // Encodes item when previously allocated buffer is full.
1222
  // At first that happens when local buffer is full.
1223
  // We switch from the local buffer to the control buffer.
1224
  // Every time this function is called, the available buffer is extended till
1225
  // `item.source_offset` byte in the control buffer.
1226
  // After the buffer is extended, this function wouldn't be called till the
1227
  // buffer is exhausted.
1228
  //
1229
  // If there's no space in the control buffer, we fallback to naive algorithm
1230
  // and mark probed elements as kSentinel in the control buffer. In this case,
1231
  // we will call this function for every subsequent probed element.
1232
0
  ABSL_ATTRIBUTE_NOINLINE void ProcessEncodeWithOverflow(ProbedItem item) {
1233
0
    if (!local_buffer_full_) {
1234
0
      local_buffer_full_ = true;
1235
0
      pos_ = OverflowBufferStart();
1236
0
    }
1237
0
    const size_t source_offset = static_cast<size_t>(item.source_offset);
1238
    // We are in fallback mode so we can't reuse control buffer anymore.
1239
    // Probed elements are marked as kSentinel in the control buffer.
1240
0
    if (ABSL_PREDICT_FALSE(marked_elements_starting_position_ !=
1241
0
                           kNoMarkedElementsSentinel)) {
1242
0
      control_[source_offset] = ctrl_t::kSentinel;
1243
0
      return;
1244
0
    }
1245
    // Refresh the end pointer to the new available position.
1246
    // Invariant: if pos < end, then we have at least sizeof(ProbedItem) bytes
1247
    // to write.
1248
0
    end_ = control_ + source_offset + 1 - sizeof(ProbedItem);
1249
0
    if (ABSL_PREDICT_TRUE(pos_ < end_)) {
1250
0
      *pos_ = item;
1251
0
      ++pos_;
1252
0
      return;
1253
0
    }
1254
0
    control_[source_offset] = ctrl_t::kSentinel;
1255
0
    marked_elements_starting_position_ = source_offset;
1256
    // Now we will always fall down to `ProcessEncodeWithOverflow`.
1257
0
    ABSL_SWISSTABLE_ASSERT(pos_ >= end_);
1258
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 64ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned long, 122ul>)
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::ProcessEncodeWithOverflow(absl::container_internal::ProbedItemImpl<unsigned int, 32ul>)
1259
1260
  // Decodes information about probed elements from control buffer and processes
1261
  // marked elements.
1262
  // Finds new position for each element and transfers it to the new slots.
1263
  // Returns the total probe length.
1264
  ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow(
1265
      CommonFields& common, const PolicyFunctions& __restrict policy,
1266
0
      void* old_slots) const {
1267
0
    ABSL_SWISSTABLE_ASSERT(local_buffer_full_ &&
1268
0
                           "must not be called when local buffer is not full");
1269
0
    size_t total_probe_length = DecodeAndInsertImpl(
1270
0
        common, policy, OverflowBufferStart(), pos_, old_slots);
1271
0
    if (ABSL_PREDICT_TRUE(marked_elements_starting_position_ ==
1272
0
                          kNoMarkedElementsSentinel)) {
1273
0
      return total_probe_length;
1274
0
    }
1275
0
    total_probe_length +=
1276
0
        ProcessProbedMarkedElements(common, policy, control_, old_slots,
1277
0
                                    marked_elements_starting_position_);
1278
0
    return total_probe_length;
1279
0
  }
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true>::DecodeAndInsertToTableOverflow(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, void*) const
1280
1281
  static constexpr size_t kBufferSize =
1282
      kProbedElementsBufferSize / sizeof(ProbedItem);
1283
  ProbedItem buffer_[kBufferSize];
1284
  // If local_buffer_full_ is false, then pos_/end_ are in the local buffer,
1285
  // otherwise, they're in the overflow buffer.
1286
  ProbedItem* pos_ = buffer_;
1287
  const void* end_ = buffer_ + kBufferSize;
1288
  ctrl_t* const control_;
1289
  size_t marked_elements_starting_position_ = kNoMarkedElementsSentinel;
1290
  bool local_buffer_full_ = false;
1291
};
1292
1293
// Grows to next capacity with specified encoder type.
1294
// Encoder is used to store probed elements that are processed later.
1295
// Different encoder is used depending on the capacity of the table.
1296
// Returns total probe length.
1297
template <typename Encoder>
1298
size_t GrowToNextCapacity(CommonFields& common,
1299
                          const PolicyFunctions& __restrict policy,
1300
92.4k
                          ctrl_t* old_ctrl, void* old_slots) {
1301
92.4k
  using ProbedItem = typename Encoder::ProbedItem;
1302
92.4k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1303
92.4k
  Encoder encoder(old_ctrl);
1304
92.4k
  policy.transfer_unprobed_elements_to_next_capacity(
1305
92.4k
      common, old_ctrl, old_slots, &encoder,
1306
92.4k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1307
70.7k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1308
70.7k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1309
70.7k
      });
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Line
Count
Source
1306
44.3k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1307
44.3k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1308
44.3k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1309
44.3k
      });
raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Line
Count
Source
1306
26.4k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1307
26.4k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1308
26.4k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1309
26.4k
      });
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
Unexecuted instantiation: raw_hash_set.cc:absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)::{lambda(void*, unsigned char, unsigned long, unsigned long)#1}::operator()(void*, unsigned char, unsigned long, unsigned long) const
1310
92.4k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1311
92.4k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1312
92.4k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, true> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Line
Count
Source
1300
77.7k
                          ctrl_t* old_ctrl, void* old_slots) {
1301
77.7k
  using ProbedItem = typename Encoder::ProbedItem;
1302
77.7k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1303
77.7k
  Encoder encoder(old_ctrl);
1304
77.7k
  policy.transfer_unprobed_elements_to_next_capacity(
1305
77.7k
      common, old_ctrl, old_slots, &encoder,
1306
77.7k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1307
77.7k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1308
77.7k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1309
77.7k
      });
1310
77.7k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1311
77.7k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1312
77.7k
}
raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned int, 32ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Line
Count
Source
1300
14.7k
                          ctrl_t* old_ctrl, void* old_slots) {
1301
14.7k
  using ProbedItem = typename Encoder::ProbedItem;
1302
14.7k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity);
1303
14.7k
  Encoder encoder(old_ctrl);
1304
14.7k
  policy.transfer_unprobed_elements_to_next_capacity(
1305
14.7k
      common, old_ctrl, old_slots, &encoder,
1306
14.7k
      [](void* probed_storage, h2_t h2, size_t source_offset, size_t h1) {
1307
14.7k
        auto encoder_ptr = static_cast<Encoder*>(probed_storage);
1308
14.7k
        encoder_ptr->EncodeItem(ProbedItem(h2, source_offset, h1));
1309
14.7k
      });
1310
14.7k
  InitializeMirroredControlBytes(common.control(), common.capacity());
1311
14.7k
  return encoder.DecodeAndInsertToTable(common, policy, old_slots);
1312
14.7k
}
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 64ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
Unexecuted instantiation: raw_hash_set.cc:unsigned long absl::container_internal::(anonymous namespace)::GrowToNextCapacity<absl::container_internal::(anonymous namespace)::ProbedItemEncoder<absl::container_internal::ProbedItemImpl<unsigned long, 122ul>, false> >(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::container_internal::ctrl_t*, void*)
1313
1314
// Grows to next capacity for relatively small tables so that even if all
1315
// elements are probed, we don't need to overflow the local buffer.
1316
// Returns total probe length.
1317
size_t GrowToNextCapacityThatFitsInLocalBuffer(
1318
    CommonFields& common, const PolicyFunctions& __restrict policy,
1319
77.7k
    ctrl_t* old_ctrl, void* old_slots) {
1320
77.7k
  ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity);
1321
77.7k
  return GrowToNextCapacity<
1322
77.7k
      ProbedItemEncoder<ProbedItem4Bytes, /*kGuaranteedFitToBuffer=*/true>>(
1323
77.7k
      common, policy, old_ctrl, old_slots);
1324
77.7k
}
1325
1326
// Grows to next capacity with different encodings. Returns total probe length.
1327
// These functions are useful to simplify profile analysis.
1328
size_t GrowToNextCapacity4BytesEncoder(CommonFields& common,
1329
                                       const PolicyFunctions& __restrict policy,
1330
14.7k
                                       ctrl_t* old_ctrl, void* old_slots) {
1331
14.7k
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem4Bytes>>(
1332
14.7k
      common, policy, old_ctrl, old_slots);
1333
14.7k
}
1334
size_t GrowToNextCapacity8BytesEncoder(CommonFields& common,
1335
                                       const PolicyFunctions& __restrict policy,
1336
0
                                       ctrl_t* old_ctrl, void* old_slots) {
1337
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem8Bytes>>(
1338
0
      common, policy, old_ctrl, old_slots);
1339
0
}
1340
size_t GrowToNextCapacity16BytesEncoder(
1341
    CommonFields& common, const PolicyFunctions& __restrict policy,
1342
0
    ctrl_t* old_ctrl, void* old_slots) {
1343
0
  return GrowToNextCapacity<ProbedItemEncoder<ProbedItem16Bytes>>(
1344
0
      common, policy, old_ctrl, old_slots);
1345
0
}
1346
1347
// Grows to next capacity for tables with relatively large capacity so that we
1348
// can't guarantee that all probed elements fit in the local buffer. Returns
1349
// total probe length.
1350
size_t GrowToNextCapacityOverflowLocalBuffer(
1351
    CommonFields& common, const PolicyFunctions& __restrict policy,
1352
14.7k
    ctrl_t* old_ctrl, void* old_slots) {
1353
14.7k
  const size_t new_capacity = common.capacity();
1354
14.7k
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) {
1355
14.7k
    return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots);
1356
14.7k
  }
1357
0
  if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem8Bytes::kMaxNewCapacity)) {
1358
0
    return GrowToNextCapacity8BytesEncoder(common, policy, old_ctrl, old_slots);
1359
0
  }
1360
  // 16 bytes encoding supports the maximum swisstable capacity.
1361
0
  return GrowToNextCapacity16BytesEncoder(common, policy, old_ctrl, old_slots);
1362
0
}
1363
1364
// Dispatches to the appropriate `GrowToNextCapacity*` function based on the
1365
// capacity of the table. Returns total probe length.
1366
ABSL_ATTRIBUTE_NOINLINE
1367
size_t GrowToNextCapacityDispatch(CommonFields& common,
1368
                                  const PolicyFunctions& __restrict policy,
1369
92.4k
                                  ctrl_t* old_ctrl, void* old_slots) {
1370
92.4k
  const size_t new_capacity = common.capacity();
1371
92.4k
  if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) {
1372
77.7k
    return GrowToNextCapacityThatFitsInLocalBuffer(common, policy, old_ctrl,
1373
77.7k
                                                   old_slots);
1374
77.7k
  } else {
1375
14.7k
    return GrowToNextCapacityOverflowLocalBuffer(common, policy, old_ctrl,
1376
14.7k
                                                 old_slots);
1377
14.7k
  }
1378
92.4k
}
1379
1380
void IncrementSmallSizeNonSoo(CommonFields& common,
1381
0
                              const PolicyFunctions& __restrict policy) {
1382
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1383
0
  common.increment_size();
1384
0
  SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size);
1385
0
}
1386
1387
void IncrementSmallSize(CommonFields& common,
1388
0
                        const PolicyFunctions& __restrict policy) {
1389
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1390
0
  if (policy.soo_enabled) {
1391
0
    common.set_full_soo();
1392
0
  } else {
1393
0
    IncrementSmallSizeNonSoo(common, policy);
1394
0
  }
1395
0
}
1396
1397
std::pair<ctrl_t*, void*> Grow1To3AndPrepareInsert(
1398
    CommonFields& common, const PolicyFunctions& __restrict policy,
1399
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1400
  // TODO(b/413062340): Refactor to reuse more code with
1401
  // GrowSooTableToNextCapacityAndPrepareInsert.
1402
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 1);
1403
0
  ABSL_SWISSTABLE_ASSERT(!common.empty());
1404
0
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1405
0
  constexpr size_t kOldCapacity = 1;
1406
0
  constexpr size_t kNewCapacity = NextCapacity(kOldCapacity);
1407
0
  ctrl_t* old_ctrl = common.control();
1408
0
  void* old_slots = common.slot_array();
1409
1410
0
  const size_t slot_size = policy.slot_size;
1411
0
  const size_t slot_align = policy.slot_align;
1412
0
  void* alloc = policy.get_char_alloc(common);
1413
0
  HashtablezInfoHandle infoz = common.infoz();
1414
0
  const bool has_infoz = infoz.IsSampled();
1415
0
  common.set_capacity(kNewCapacity);
1416
1417
0
  const auto [new_ctrl, new_slots] =
1418
0
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1419
0
  common.set_control(new_ctrl);
1420
0
  common.set_slots(new_slots);
1421
0
  SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size);
1422
1423
0
  if (ABSL_PREDICT_TRUE(!has_infoz)) {
1424
    // When we're sampled, we already have a seed.
1425
0
    common.generate_new_seed(/*has_infoz=*/false);
1426
0
  }
1427
0
  const size_t new_hash = get_hash(common.seed().seed());
1428
0
  h2_t new_h2 = H2(new_hash);
1429
0
  size_t orig_hash =
1430
0
      policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed());
1431
0
  size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1432
0
  InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl);
1433
1434
0
  void* old_element_target = NextSlot(new_slots, slot_size);
1435
0
  SanitizerUnpoisonMemoryRegion(old_element_target, slot_size);
1436
0
  policy.transfer_n(&common, old_element_target, old_slots, 1);
1437
1438
0
  void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size);
1439
0
  SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size);
1440
1441
0
  policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align,
1442
0
                 has_infoz);
1443
0
  PrepareInsertCommon(common);
1444
0
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1445
0
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1446
1447
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1448
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash);
1449
0
  }
1450
0
  return {new_ctrl + offset, new_element_target_slot};
1451
0
}
1452
1453
// Grows to next capacity and prepares insert for the given new_hash.
1454
// Returns the offset of the new element.
1455
size_t GrowToNextCapacityAndPrepareInsert(
1456
    CommonFields& common, const PolicyFunctions& __restrict policy,
1457
152k
    size_t new_hash) {
1458
152k
  ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0);
1459
152k
  const size_t old_capacity = common.capacity();
1460
152k
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1461
152k
  ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity));
1462
1463
152k
  const size_t new_capacity = NextCapacity(old_capacity);
1464
152k
  ctrl_t* old_ctrl = common.control();
1465
152k
  void* old_slots = common.slot_array();
1466
1467
152k
  common.set_capacity(new_capacity);
1468
152k
  const size_t slot_size = policy.slot_size;
1469
152k
  const size_t slot_align = policy.slot_align;
1470
152k
  void* alloc = policy.get_char_alloc(common);
1471
152k
  HashtablezInfoHandle infoz = common.infoz();
1472
152k
  const bool has_infoz = infoz.IsSampled();
1473
1474
152k
  const auto [new_ctrl, new_slots] =
1475
152k
      AllocBackingArray(common, policy, new_capacity, has_infoz, alloc);
1476
152k
  common.set_control(new_ctrl);
1477
152k
  common.set_slots(new_slots);
1478
152k
  SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size);
1479
1480
152k
  h2_t new_h2 = H2(new_hash);
1481
152k
  size_t total_probe_length = 0;
1482
152k
  FindInfo find_info;
1483
152k
  if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) {
1484
59.6k
    size_t offset;
1485
59.6k
    GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl,
1486
59.6k
                                           new_capacity);
1487
    // We put the new element either at the beginning or at the end of the
1488
    // table with approximately equal probability.
1489
59.6k
    offset =
1490
59.6k
        SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1;
1491
1492
59.6k
    ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset]));
1493
59.6k
    SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size);
1494
59.6k
    find_info = FindInfo{offset, 0};
1495
    // Single group tables have all slots full on resize. So we can transfer
1496
    // all slots without checking the control bytes.
1497
59.6k
    ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity);
1498
59.6k
    void* target = NextSlot(new_slots, slot_size);
1499
59.6k
    SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size);
1500
59.6k
    policy.transfer_n(&common, target, old_slots, old_capacity);
1501
92.4k
  } else {
1502
92.4k
    total_probe_length =
1503
92.4k
        GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots);
1504
92.4k
    find_info = find_first_non_full(common, new_hash);
1505
92.4k
    SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size);
1506
92.4k
  }
1507
152k
  ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity());
1508
152k
  (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align,
1509
152k
                    has_infoz);
1510
152k
  PrepareInsertCommon(common);
1511
152k
  ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity,
1512
152k
                  common.size());
1513
1514
152k
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1515
0
    ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length,
1516
0
                        find_info.probe_length);
1517
0
  }
1518
152k
  return find_info.offset;
1519
152k
}
1520
1521
}  // namespace
1522
1523
std::pair<ctrl_t*, void*> PrepareInsertSmallNonSoo(
1524
    CommonFields& common, const PolicyFunctions& __restrict policy,
1525
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1526
0
  ABSL_SWISSTABLE_ASSERT(common.is_small());
1527
0
  ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled);
1528
0
  if (common.capacity() == 1) {
1529
0
    if (common.empty()) {
1530
0
      IncrementSmallSizeNonSoo(common, policy);
1531
0
      return {SooControl(), common.slot_array()};
1532
0
    } else {
1533
0
      return Grow1To3AndPrepareInsert(common, policy, get_hash);
1534
0
    }
1535
0
  }
1536
1537
  // Growing from 0 to 1 capacity.
1538
0
  ABSL_SWISSTABLE_ASSERT(common.capacity() == 0);
1539
0
  constexpr size_t kNewCapacity = 1;
1540
1541
0
  common.set_capacity(kNewCapacity);
1542
0
  HashtablezInfoHandle infoz;
1543
0
  const bool should_sample =
1544
0
      policy.is_hashtablez_eligible && ShouldSampleNextTable();
1545
0
  if (ABSL_PREDICT_FALSE(should_sample)) {
1546
0
    infoz = ForcedTrySample(policy.slot_size, policy.key_size,
1547
0
                            policy.value_size, policy.soo_capacity());
1548
0
  }
1549
0
  const bool has_infoz = infoz.IsSampled();
1550
0
  void* alloc = policy.get_char_alloc(common);
1551
1552
0
  const auto [new_ctrl, new_slots] =
1553
0
      AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc);
1554
0
  common.set_control(new_ctrl);
1555
0
  common.set_slots(new_slots);
1556
1557
0
  static_assert(NextCapacity(0) == 1);
1558
0
  PrepareInsertCommon(common);
1559
1560
0
  if (ABSL_PREDICT_FALSE(has_infoz)) {
1561
0
    common.generate_new_seed(/*has_infoz=*/true);
1562
0
    ReportSingleGroupTableGrowthToInfoz(common, infoz,
1563
0
                                        get_hash(common.seed().seed()));
1564
0
  }
1565
0
  return {SooControl(), new_slots};
1566
0
}
1567
1568
namespace {
1569
1570
// Called whenever the table needs to vacate empty slots either by removing
1571
// tombstones via rehash or growth to next capacity.
1572
ABSL_ATTRIBUTE_NOINLINE
1573
size_t RehashOrGrowToNextCapacityAndPrepareInsert(
1574
    CommonFields& common, const PolicyFunctions& __restrict policy,
1575
0
    size_t new_hash) {
1576
0
  const size_t cap = common.capacity();
1577
0
  ABSL_ASSUME(cap > 0);
1578
0
  if (cap > Group::kWidth &&
1579
      // Do these calculations in 64-bit to avoid overflow.
1580
0
      common.size() * uint64_t{32} <= cap * uint64_t{25}) {
1581
    // Squash DELETED without growing if there is enough capacity.
1582
    //
1583
    // Rehash in place if the current size is <= 25/32 of capacity.
1584
    // Rationale for such a high factor: 1) DropDeletesWithoutResize() is
1585
    // faster than resize, and 2) it takes quite a bit of work to add
1586
    // tombstones.  In the worst case, seems to take approximately 4
1587
    // insert/erase pairs to create a single tombstone and so if we are
1588
    // rehashing because of tombstones, we can afford to rehash-in-place as
1589
    // long as we are reclaiming at least 1/8 the capacity without doing more
1590
    // than 2X the work.  (Where "work" is defined to be size() for rehashing
1591
    // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
1592
    // place is faster per operation than inserting or even doubling the size
1593
    // of the table, so we actually afford to reclaim even less space from a
1594
    // resize-in-place.  The decision is to rehash in place if we can reclaim
1595
    // at about 1/8th of the usable capacity (specifically 3/28 of the
1596
    // capacity) which means that the total cost of rehashing will be a small
1597
    // fraction of the total work.
1598
    //
1599
    // Here is output of an experiment using the BM_CacheInSteadyState
1600
    // benchmark running the old case (where we rehash-in-place only if we can
1601
    // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
1602
    // if we can recover 3/32*capacity).
1603
    //
1604
    // Note that although in the worst-case number of rehashes jumped up from
1605
    // 15 to 190, but the number of operations per second is almost the same.
1606
    //
1607
    // Abridged output of running BM_CacheInSteadyState benchmark from
1608
    // raw_hash_set_benchmark.   N is the number of insert/erase operations.
1609
    //
1610
    //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
1611
    // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
1612
    //  448 | 145284       0.44        18 | 140118       0.44        19
1613
    //  493 | 152546       0.24        11 | 151417       0.48        28
1614
    //  538 | 151439       0.26        11 | 151152       0.53        38
1615
    //  583 | 151765       0.28        11 | 150572       0.57        50
1616
    //  628 | 150241       0.31        11 | 150853       0.61        66
1617
    //  672 | 149602       0.33        12 | 150110       0.66        90
1618
    //  717 | 149998       0.35        12 | 149531       0.70       129
1619
    //  762 | 149836       0.37        13 | 148559       0.74       190
1620
    //  807 | 149736       0.39        14 | 151107       0.39        14
1621
    //  852 | 150204       0.42        15 | 151019       0.42        15
1622
0
    return DropDeletesWithoutResizeAndPrepareInsert(common, policy, new_hash);
1623
0
  } else {
1624
    // Otherwise grow the container.
1625
0
    return GrowToNextCapacityAndPrepareInsert(common, policy, new_hash);
1626
0
  }
1627
0
}
1628
1629
// Slow path for PrepareInsertLarge that is called when the table has deleted
1630
// slots or need to be resized or rehashed.
1631
size_t PrepareInsertLargeSlow(CommonFields& common,
1632
                              const PolicyFunctions& __restrict policy,
1633
152k
                              size_t hash) {
1634
152k
  const GrowthInfo growth_info = common.growth_info();
1635
152k
  ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft());
1636
152k
  if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) {
1637
    // Table without deleted slots (>95% cases) that needs to be resized.
1638
152k
    ABSL_SWISSTABLE_ASSERT(growth_info.HasNoDeleted() &&
1639
152k
                           growth_info.GetGrowthLeft() == 0);
1640
152k
    return GrowToNextCapacityAndPrepareInsert(common, policy, hash);
1641
152k
  }
1642
0
  if (ABSL_PREDICT_FALSE(growth_info.HasNoGrowthLeftAssumingMayHaveDeleted())) {
1643
    // Table with deleted slots that needs to be rehashed or resized.
1644
0
    return RehashOrGrowToNextCapacityAndPrepareInsert(common, policy, hash);
1645
0
  }
1646
  // Table with deleted slots that has space for the inserting element.
1647
0
  FindInfo target = find_first_non_full(common, hash);
1648
0
  PrepareInsertCommon(common);
1649
0
  common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
1650
0
  SetCtrlInLargeTable(common, target.offset, H2(hash), policy.slot_size);
1651
0
  common.infoz().RecordInsertMiss(hash, target.probe_length);
1652
0
  return target.offset;
1653
0
}
1654
1655
// Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()),
1656
// forces the table to be sampled and prepares the insert.
1657
// SOO tables need to switch from SOO to heap in order to store the infoz.
1658
// Requires:
1659
//   1. `c.capacity() == SooCapacity()`.
1660
//   2. `c.empty()`.
1661
ABSL_ATTRIBUTE_NOINLINE size_t
1662
GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1663
    CommonFields& common, const PolicyFunctions& __restrict policy,
1664
0
    absl::FunctionRef<size_t(size_t)> get_hash) {
1665
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()),
1666
0
                                   /*force_infoz=*/true);
1667
0
  PrepareInsertCommon(common);
1668
0
  common.growth_info().OverwriteEmptyAsFull();
1669
0
  const size_t new_hash = get_hash(common.seed().seed());
1670
0
  SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash),
1671
0
                            policy.slot_size);
1672
0
  common.infoz().RecordInsertMiss(new_hash, /*distance_from_desired=*/0);
1673
0
  return SooSlotIndex();
1674
0
}
1675
1676
// Resizes empty non-allocated table to the capacity to fit new_size elements.
1677
// Requires:
1678
//   1. `c.capacity() == policy.soo_capacity()`.
1679
//   2. `c.empty()`.
1680
//   3. `new_size > policy.soo_capacity()`.
1681
// The table will be attempted to be sampled.
1682
void ReserveEmptyNonAllocatedTableToFitNewSize(
1683
    CommonFields& common, const PolicyFunctions& __restrict policy,
1684
0
    size_t new_size) {
1685
0
  ValidateMaxSize(new_size, policy.key_size, policy.slot_size);
1686
0
  ABSL_ASSUME(new_size > 0);
1687
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size),
1688
0
                                   /*force_infoz=*/false);
1689
  // This is after resize, to ensure that we have completed the allocation
1690
  // and have potentially sampled the hashtable.
1691
0
  common.infoz().RecordReservation(new_size);
1692
0
}
1693
1694
// Type erased version of raw_hash_set::reserve for tables that have an
1695
// allocated backing array.
1696
//
1697
// Requires:
1698
//   1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`.
1699
// Reserving already allocated tables is considered to be a rare case.
1700
ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable(
1701
    CommonFields& common, const PolicyFunctions& __restrict policy,
1702
0
    size_t new_size) {
1703
0
  const size_t cap = common.capacity();
1704
0
  ValidateMaxSize(new_size, policy.key_size, policy.slot_size);
1705
0
  ABSL_ASSUME(new_size > 0);
1706
0
  const size_t new_capacity = SizeToCapacity(new_size);
1707
0
  if (cap == policy.soo_capacity()) {
1708
0
    ABSL_SWISSTABLE_ASSERT(!common.empty());
1709
0
    ResizeFullSooTable(common, policy, new_capacity,
1710
0
                       ResizeFullSooTableSamplingMode::kNoSampling);
1711
0
  } else {
1712
0
    ABSL_SWISSTABLE_ASSERT(cap > policy.soo_capacity());
1713
    // TODO(b/382423690): consider using GrowToNextCapacity, when applicable.
1714
0
    ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1715
0
  }
1716
0
  common.infoz().RecordReservation(new_size);
1717
0
}
1718
1719
// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO
1720
// table to be sampled. SOO tables need to switch from SOO to heap in order to
1721
// store the infoz. No-op if sampling is disabled or not possible.
1722
void GrowFullSooTableToNextCapacityForceSampling(
1723
0
    CommonFields& common, const PolicyFunctions& __restrict policy) {
1724
0
  AssertFullSoo(common, policy);
1725
0
  ResizeFullSooTable(
1726
0
      common, policy, NextCapacity(SooCapacity()),
1727
0
      ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled);
1728
0
}
1729
1730
}  // namespace
1731
1732
250k
void* GetRefForEmptyClass(CommonFields& common) {
1733
  // Empty base optimization typically make the empty base class address to be
1734
  // the same as the first address of the derived class object.
1735
  // But we generally assume that for empty classes we can return any valid
1736
  // pointer.
1737
250k
  return &common;
1738
250k
}
1739
1740
void ResizeAllocatedTableWithSeedChange(
1741
    CommonFields& common, const PolicyFunctions& __restrict policy,
1742
0
    size_t new_capacity) {
1743
0
  ResizeNonSooImpl<ResizeNonSooMode::kGuaranteedAllocated>(
1744
0
      common, policy, new_capacity, common.infoz());
1745
0
}
1746
1747
void ReserveEmptyNonAllocatedTableToFitBucketCount(
1748
    CommonFields& common, const PolicyFunctions& __restrict policy,
1749
0
    size_t bucket_count) {
1750
0
  size_t new_capacity = NormalizeCapacity(bucket_count);
1751
0
  ValidateMaxCapacity(new_capacity, policy.key_size, policy.slot_size);
1752
0
  ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1753
0
                                   /*force_infoz=*/false);
1754
0
}
1755
1756
// Resizes a full SOO table to the NextCapacity(SooCapacity()).
1757
template <size_t SooSlotMemcpySize, bool TransferUsesMemcpy>
1758
size_t GrowSooTableToNextCapacityAndPrepareInsert(
1759
    CommonFields& common, const PolicyFunctions& __restrict policy,
1760
49.0k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1761
49.0k
  AssertSoo(common, policy);
1762
49.0k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1763
    // The table is empty, it is only used for forced sampling of SOO tables.
1764
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1765
0
        common, policy, get_hash);
1766
0
  }
1767
49.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1768
49.0k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1769
49.0k
  const size_t slot_size = policy.slot_size;
1770
49.0k
  void* alloc = policy.get_char_alloc(common);
1771
49.0k
  common.set_capacity(kNewCapacity);
1772
1773
  // Since the table is not empty, it will not be sampled.
1774
  // The decision to sample was already made during the first insertion.
1775
  //
1776
  // We do not set control and slots in CommonFields yet to avoid overriding
1777
  // SOO data.
1778
49.0k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1779
49.0k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1780
1781
49.0k
  PrepareInsertCommon(common);
1782
49.0k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1783
49.0k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1784
49.0k
  common.generate_new_seed(/*has_infoz=*/false);
1785
49.0k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1786
49.0k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1787
49.0k
  const size_t new_hash = get_hash(common.seed().seed());
1788
1789
49.0k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1790
49.0k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1791
49.0k
                                      new_ctrl);
1792
1793
49.0k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1794
49.0k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1795
49.0k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1796
49.0k
  if constexpr (TransferUsesMemcpy) {
1797
    // Target slot is placed at index 1, but capacity is at
1798
    // minimum 3. So we are allowed to copy at least twice as much
1799
    // memory.
1800
49.0k
    static_assert(SooSlotIndex() == 1);
1801
49.0k
    static_assert(SooSlotMemcpySize > 0);
1802
49.0k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1803
49.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1804
49.0k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1805
49.0k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1806
49.0k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1807
49.0k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1808
49.0k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1809
49.0k
  } else {
1810
0
    static_assert(SooSlotMemcpySize == 0);
1811
0
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1812
0
  }
1813
0
  common.set_control(new_ctrl);
1814
49.0k
  common.set_slots(new_slots);
1815
1816
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1817
  // have been resized to the next capacity.
1818
49.0k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1819
49.0k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1820
49.0k
                                slot_size);
1821
49.0k
  return offset;
1822
49.0k
}
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<0ul, false>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<1ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Unexecuted instantiation: unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<4ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<8ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Line
Count
Source
1760
22.1k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1761
22.1k
  AssertSoo(common, policy);
1762
22.1k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1763
    // The table is empty, it is only used for forced sampling of SOO tables.
1764
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1765
0
        common, policy, get_hash);
1766
0
  }
1767
22.1k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1768
22.1k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1769
22.1k
  const size_t slot_size = policy.slot_size;
1770
22.1k
  void* alloc = policy.get_char_alloc(common);
1771
22.1k
  common.set_capacity(kNewCapacity);
1772
1773
  // Since the table is not empty, it will not be sampled.
1774
  // The decision to sample was already made during the first insertion.
1775
  //
1776
  // We do not set control and slots in CommonFields yet to avoid overriding
1777
  // SOO data.
1778
22.1k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1779
22.1k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1780
1781
22.1k
  PrepareInsertCommon(common);
1782
22.1k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1783
22.1k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1784
22.1k
  common.generate_new_seed(/*has_infoz=*/false);
1785
22.1k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1786
22.1k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1787
22.1k
  const size_t new_hash = get_hash(common.seed().seed());
1788
1789
22.1k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1790
22.1k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1791
22.1k
                                      new_ctrl);
1792
1793
22.1k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1794
22.1k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1795
22.1k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1796
22.1k
  if constexpr (TransferUsesMemcpy) {
1797
    // Target slot is placed at index 1, but capacity is at
1798
    // minimum 3. So we are allowed to copy at least twice as much
1799
    // memory.
1800
22.1k
    static_assert(SooSlotIndex() == 1);
1801
22.1k
    static_assert(SooSlotMemcpySize > 0);
1802
22.1k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1803
22.1k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1804
22.1k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1805
22.1k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1806
22.1k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1807
22.1k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1808
22.1k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1809
  } else {
1810
    static_assert(SooSlotMemcpySize == 0);
1811
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1812
  }
1813
0
  common.set_control(new_ctrl);
1814
22.1k
  common.set_slots(new_slots);
1815
1816
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1817
  // have been resized to the next capacity.
1818
22.1k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1819
22.1k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1820
22.1k
                                slot_size);
1821
22.1k
  return offset;
1822
22.1k
}
unsigned long absl::container_internal::GrowSooTableToNextCapacityAndPrepareInsert<16ul, true>(absl::container_internal::CommonFields&, absl::container_internal::PolicyFunctions const&, absl::FunctionRef<unsigned long (unsigned long)>, bool)
Line
Count
Source
1760
26.9k
    absl::FunctionRef<size_t(size_t)> get_hash, bool force_sampling) {
1761
26.9k
  AssertSoo(common, policy);
1762
26.9k
  if (ABSL_PREDICT_FALSE(force_sampling)) {
1763
    // The table is empty, it is only used for forced sampling of SOO tables.
1764
0
    return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert(
1765
0
        common, policy, get_hash);
1766
0
  }
1767
26.9k
  ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity());
1768
26.9k
  static constexpr size_t kNewCapacity = NextCapacity(SooCapacity());
1769
26.9k
  const size_t slot_size = policy.slot_size;
1770
26.9k
  void* alloc = policy.get_char_alloc(common);
1771
26.9k
  common.set_capacity(kNewCapacity);
1772
1773
  // Since the table is not empty, it will not be sampled.
1774
  // The decision to sample was already made during the first insertion.
1775
  //
1776
  // We do not set control and slots in CommonFields yet to avoid overriding
1777
  // SOO data.
1778
26.9k
  const auto [new_ctrl, new_slots] = AllocBackingArray(
1779
26.9k
      common, policy, kNewCapacity, /*has_infoz=*/false, alloc);
1780
1781
26.9k
  PrepareInsertCommon(common);
1782
26.9k
  ABSL_SWISSTABLE_ASSERT(common.size() == 2);
1783
26.9k
  GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2);
1784
26.9k
  common.generate_new_seed(/*has_infoz=*/false);
1785
26.9k
  const h2_t soo_slot_h2 = H2(policy.hash_slot(
1786
26.9k
      policy.hash_fn(common), common.soo_data(), common.seed().seed()));
1787
26.9k
  const size_t new_hash = get_hash(common.seed().seed());
1788
1789
26.9k
  const size_t offset = Resize1To3NewOffset(new_hash, common.seed());
1790
26.9k
  InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset,
1791
26.9k
                                      new_ctrl);
1792
1793
26.9k
  SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity);
1794
26.9k
  void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size);
1795
26.9k
  SanitizerUnpoisonMemoryRegion(target_slot, slot_size);
1796
26.9k
  if constexpr (TransferUsesMemcpy) {
1797
    // Target slot is placed at index 1, but capacity is at
1798
    // minimum 3. So we are allowed to copy at least twice as much
1799
    // memory.
1800
26.9k
    static_assert(SooSlotIndex() == 1);
1801
26.9k
    static_assert(SooSlotMemcpySize > 0);
1802
26.9k
    static_assert(SooSlotMemcpySize <= MaxSooSlotSize());
1803
26.9k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize <= 2 * slot_size);
1804
26.9k
    ABSL_SWISSTABLE_ASSERT(SooSlotMemcpySize >= slot_size);
1805
26.9k
    void* next_slot = SlotAddress(target_slot, 1, slot_size);
1806
26.9k
    SanitizerUnpoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1807
26.9k
    std::memcpy(target_slot, common.soo_data(), SooSlotMemcpySize);
1808
26.9k
    SanitizerPoisonMemoryRegion(next_slot, SooSlotMemcpySize - slot_size);
1809
  } else {
1810
    static_assert(SooSlotMemcpySize == 0);
1811
    policy.transfer_n(&common, target_slot, common.soo_data(), 1);
1812
  }
1813
0
  common.set_control(new_ctrl);
1814
26.9k
  common.set_slots(new_slots);
1815
1816
  // Full SOO table couldn't be sampled. If SOO table is sampled, it would
1817
  // have been resized to the next capacity.
1818
26.9k
  ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled());
1819
26.9k
  SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size),
1820
26.9k
                                slot_size);
1821
26.9k
  return offset;
1822
26.9k
}
1823
1824
void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy,
1825
0
            size_t n) {
1826
0
  const size_t cap = common.capacity();
1827
1828
0
  auto clear_backing_array = [&]() {
1829
0
    ClearBackingArray(common, policy, policy.get_char_alloc(common),
1830
0
                      /*reuse=*/false, policy.soo_enabled);
1831
0
  };
1832
1833
0
  const size_t slot_size = policy.slot_size;
1834
1835
0
  if (n == 0) {
1836
0
    if (cap <= policy.soo_capacity()) return;
1837
0
    if (common.empty()) {
1838
0
      clear_backing_array();
1839
0
      return;
1840
0
    }
1841
0
    if (common.size() <= policy.soo_capacity()) {
1842
      // When the table is already sampled, we keep it sampled.
1843
0
      if (common.infoz().IsSampled()) {
1844
0
        static constexpr size_t kInitialSampledCapacity =
1845
0
            NextCapacity(SooCapacity());
1846
0
        if (cap > kInitialSampledCapacity) {
1847
0
          ResizeAllocatedTableWithSeedChange(common, policy,
1848
0
                                             kInitialSampledCapacity);
1849
0
        }
1850
        // This asserts that we didn't lose sampling coverage in `resize`.
1851
0
        ABSL_SWISSTABLE_ASSERT(common.infoz().IsSampled());
1852
0
        return;
1853
0
      }
1854
0
      ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo));
1855
0
      ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo));
1856
0
      HeapOrSoo tmp_slot;
1857
0
      size_t begin_offset = FindFirstFullSlot(0, cap, common.control());
1858
0
      policy.transfer_n(
1859
0
          &common, &tmp_slot,
1860
0
          SlotAddress(common.slot_array(), begin_offset, slot_size), 1);
1861
0
      clear_backing_array();
1862
0
      policy.transfer_n(&common, common.soo_data(), &tmp_slot, 1);
1863
0
      common.set_full_soo();
1864
0
      return;
1865
0
    }
1866
0
  }
1867
1868
  // bitor is a faster way of doing `max` here. We will round up to the next
1869
  // power-of-2-minus-1, so bitor is good enough.
1870
0
  const size_t new_capacity =
1871
0
      NormalizeCapacity(n | SizeToCapacity(common.size()));
1872
0
  ValidateMaxCapacity(new_capacity, policy.key_size, policy.slot_size);
1873
  // n == 0 unconditionally rehashes as per the standard.
1874
0
  if (n == 0 || new_capacity > cap) {
1875
0
    if (cap == policy.soo_capacity()) {
1876
0
      if (common.empty()) {
1877
0
        ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity,
1878
0
                                         /*force_infoz=*/false);
1879
0
      } else {
1880
0
        ResizeFullSooTable(common, policy, new_capacity,
1881
0
                           ResizeFullSooTableSamplingMode::kNoSampling);
1882
0
      }
1883
0
    } else {
1884
0
      ResizeAllocatedTableWithSeedChange(common, policy, new_capacity);
1885
0
    }
1886
    // This is after resize, to ensure that we have completed the allocation
1887
    // and have potentially sampled the hashtable.
1888
0
    common.infoz().RecordReservation(n);
1889
0
  }
1890
0
}
1891
1892
void Copy(CommonFields& common, const PolicyFunctions& __restrict policy,
1893
          const CommonFields& other,
1894
0
          absl::FunctionRef<void(void*, const void*)> copy_fn) {
1895
0
  const size_t size = other.size();
1896
0
  ABSL_SWISSTABLE_ASSERT(size > 0);
1897
0
  const size_t soo_capacity = policy.soo_capacity();
1898
0
  const size_t slot_size = policy.slot_size;
1899
0
  const bool soo_enabled = policy.soo_enabled;
1900
0
  if (size == 1) {
1901
0
    if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1);
1902
0
    IncrementSmallSize(common, policy);
1903
0
    const size_t other_capacity = other.capacity();
1904
0
    const void* other_slot =
1905
0
        other_capacity <= soo_capacity ? other.soo_data()
1906
0
        : other.is_small()
1907
0
            ? other.slot_array()
1908
0
            : SlotAddress(other.slot_array(),
1909
0
                          FindFirstFullSlot(0, other_capacity, other.control()),
1910
0
                          slot_size);
1911
0
    copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot);
1912
1913
0
    if (soo_enabled && policy.is_hashtablez_eligible &&
1914
0
        ShouldSampleNextTable()) {
1915
0
      GrowFullSooTableToNextCapacityForceSampling(common, policy);
1916
0
    }
1917
0
    return;
1918
0
  }
1919
1920
0
  ReserveTableToFitNewSize(common, policy, size);
1921
0
  auto infoz = common.infoz();
1922
0
  ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity);
1923
0
  const size_t cap = common.capacity();
1924
0
  ABSL_SWISSTABLE_ASSERT(cap > soo_capacity);
1925
0
  size_t offset = cap;
1926
0
  const void* hash_fn = policy.hash_fn(common);
1927
0
  auto hasher = policy.hash_slot;
1928
0
  const size_t seed = common.seed().seed();
1929
0
  IterateOverFullSlotsImpl(
1930
0
      other, slot_size, [&](const ctrl_t*, void* that_slot) {
1931
        // The table is guaranteed to be empty, so we can do faster than
1932
        // a full `insert`.
1933
0
        const size_t hash = (*hasher)(hash_fn, that_slot, seed);
1934
0
        FindInfo target = find_first_non_full(common, hash);
1935
0
        infoz.RecordInsertMiss(hash, target.probe_length);
1936
0
        offset = target.offset;
1937
0
        SetCtrl(common, offset, H2(hash), slot_size);
1938
0
        copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot);
1939
0
        common.maybe_increment_generation_on_insert();
1940
0
      });
1941
0
  common.increment_size(size);
1942
0
  common.growth_info().OverwriteManyEmptyAsFull(size);
1943
0
}
1944
1945
void ReserveTableToFitNewSize(CommonFields& common,
1946
                              const PolicyFunctions& __restrict policy,
1947
0
                              size_t new_size) {
1948
0
  common.reset_reserved_growth(new_size);
1949
0
  common.set_reservation_size(new_size);
1950
0
  ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity());
1951
0
  const size_t cap = common.capacity();
1952
0
  if (ABSL_PREDICT_TRUE(common.empty() && cap <= policy.soo_capacity())) {
1953
0
    return ReserveEmptyNonAllocatedTableToFitNewSize(common, policy, new_size);
1954
0
  }
1955
1956
0
  ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity());
1957
0
  ABSL_SWISSTABLE_ASSERT(cap > 0);
1958
0
  const size_t max_size_before_growth =
1959
0
      IsSmallCapacity(cap) ? cap : common.size() + common.growth_left();
1960
0
  if (new_size <= max_size_before_growth) {
1961
0
    return;
1962
0
  }
1963
0
  ReserveAllocatedTable(common, policy, new_size);
1964
0
}
1965
1966
namespace {
1967
size_t PrepareInsertLargeImpl(CommonFields& common,
1968
                              const PolicyFunctions& __restrict policy,
1969
                              size_t hash,
1970
                              Group::NonIterableBitMaskType mask_empty,
1971
8.28M
                              FindInfo target_group) {
1972
8.28M
  ABSL_SWISSTABLE_ASSERT(!common.is_small());
1973
8.28M
  const GrowthInfo growth_info = common.growth_info();
1974
  // When there are no deleted slots in the table
1975
  // and growth_left is positive, we can insert at the first
1976
  // empty slot in the probe sequence (target).
1977
8.28M
  if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) {
1978
152k
    return PrepareInsertLargeSlow(common, policy, hash);
1979
152k
  }
1980
8.13M
  PrepareInsertCommon(common);
1981
8.13M
  common.growth_info().OverwriteEmptyAsFull();
1982
8.13M
  target_group.offset += mask_empty.LowestBitSet();
1983
8.13M
  target_group.offset &= common.capacity();
1984
8.13M
  SetCtrl(common, target_group.offset, H2(hash), policy.slot_size);
1985
8.13M
  common.infoz().RecordInsertMiss(hash, target_group.probe_length);
1986
8.13M
  return target_group.offset;
1987
8.28M
}
1988
}  // namespace
1989
1990
size_t PrepareInsertLarge(CommonFields& common,
1991
                          const PolicyFunctions& __restrict policy, size_t hash,
1992
                          Group::NonIterableBitMaskType mask_empty,
1993
8.28M
                          FindInfo target_group) {
1994
  // NOLINTNEXTLINE(misc-static-assert)
1995
8.28M
  ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled());
1996
8.28M
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
1997
8.28M
}
1998
1999
size_t PrepareInsertLargeGenerationsEnabled(
2000
    CommonFields& common, const PolicyFunctions& policy, size_t hash,
2001
    Group::NonIterableBitMaskType mask_empty, FindInfo target_group,
2002
0
    absl::FunctionRef<size_t(size_t)> recompute_hash) {
2003
  // NOLINTNEXTLINE(misc-static-assert)
2004
0
  ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled());
2005
0
  if (common.should_rehash_for_bug_detection_on_insert()) {
2006
    // Move to a different heap allocation in order to detect bugs.
2007
0
    const size_t cap = common.capacity();
2008
0
    ResizeAllocatedTableWithSeedChange(
2009
0
        common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap));
2010
0
    hash = recompute_hash(common.seed().seed());
2011
0
    std::tie(target_group, mask_empty) =
2012
0
        find_first_non_full_group(common, hash);
2013
0
  }
2014
0
  return PrepareInsertLargeImpl(common, policy, hash, mask_empty, target_group);
2015
0
}
2016
2017
namespace {
2018
// Returns true if the following is true
2019
// 1. OptimalMemcpySizeForSooSlotTransfer(left) >
2020
//    OptimalMemcpySizeForSooSlotTransfer(left - 1)
2021
// 2. OptimalMemcpySizeForSooSlotTransfer(left) are equal for all i in [left,
2022
// right].
2023
// This function is used to verify that we have all the possible template
2024
// instantiations for GrowFullSooTableToNextCapacity.
2025
// With this verification the problem may be detected at compile time instead of
2026
// link time.
2027
constexpr bool VerifyOptimalMemcpySizeForSooSlotTransferRange(size_t left,
2028
0
                                                              size_t right) {
2029
0
  size_t optimal_size_for_range = OptimalMemcpySizeForSooSlotTransfer(left);
2030
0
  if (optimal_size_for_range <= OptimalMemcpySizeForSooSlotTransfer(left - 1)) {
2031
0
    return false;
2032
0
  }
2033
0
  for (size_t i = left + 1; i <= right; ++i) {
2034
0
    if (OptimalMemcpySizeForSooSlotTransfer(i) != optimal_size_for_range) {
2035
0
      return false;
2036
0
    }
2037
0
  }
2038
0
  return true;
2039
0
}
2040
}  // namespace
2041
2042
// Extern template instantiation for inline function.
2043
template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index,
2044
                                              size_t old_capacity,
2045
                                              ctrl_t* new_ctrl,
2046
                                              size_t new_capacity);
2047
2048
// We need to instantiate ALL possible template combinations because we define
2049
// the function in the cc file.
2050
template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>(
2051
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2052
    bool);
2053
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2054
    OptimalMemcpySizeForSooSlotTransfer(1), true>(
2055
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2056
    bool);
2057
2058
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3));
2059
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2060
    OptimalMemcpySizeForSooSlotTransfer(3), true>(
2061
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2062
    bool);
2063
2064
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8));
2065
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2066
    OptimalMemcpySizeForSooSlotTransfer(8), true>(
2067
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2068
    bool);
2069
2070
#if UINTPTR_MAX == UINT32_MAX
2071
static_assert(MaxSooSlotSize() == 8);
2072
#else
2073
static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16));
2074
template size_t GrowSooTableToNextCapacityAndPrepareInsert<
2075
    OptimalMemcpySizeForSooSlotTransfer(16), true>(
2076
    CommonFields&, const PolicyFunctions&, absl::FunctionRef<size_t(size_t)>,
2077
    bool);
2078
static_assert(MaxSooSlotSize() == 16);
2079
#endif
2080
2081
template void* AllocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2082
                                    std::allocator<char>>(void* alloc,
2083
                                                          size_t n);
2084
template void DeallocateBackingArray<BackingArrayAlignment(alignof(size_t)),
2085
                                     std::allocator<char>>(
2086
    void* alloc, size_t capacity, ctrl_t* ctrl, size_t slot_size,
2087
    size_t slot_align, bool had_infoz);
2088
2089
}  // namespace container_internal
2090
ABSL_NAMESPACE_END
2091
}  // namespace absl