Coverage Report

Created: 2025-10-09 07:07

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/container/internal/hashtable_control_bytes.h
Line
Count
Source
1
// Copyright 2025 The Abseil Authors
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// This file contains the implementation of the hashtable control bytes
16
// manipulation.
17
18
#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
19
#define ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
20
21
#include <cassert>
22
#include <cstddef>
23
#include <cstdint>
24
#include <type_traits>
25
26
#include "absl/base/config.h"
27
28
#ifdef ABSL_INTERNAL_HAVE_SSE2
29
#include <emmintrin.h>
30
#endif
31
32
#ifdef ABSL_INTERNAL_HAVE_SSSE3
33
#include <tmmintrin.h>
34
#endif
35
36
#ifdef _MSC_VER
37
#include <intrin.h>
38
#endif
39
40
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
41
#include <arm_neon.h>
42
#endif
43
44
#include "absl/base/optimization.h"
45
#include "absl/numeric/bits.h"
46
#include "absl/base/internal/endian.h"
47
48
namespace absl {
49
ABSL_NAMESPACE_BEGIN
50
namespace container_internal {
51
52
#ifdef ABSL_SWISSTABLE_ASSERT
53
#error ABSL_SWISSTABLE_ASSERT cannot be directly set
54
#else
55
// We use this macro for assertions that users may see when the table is in an
56
// invalid state that sanitizers may help diagnose.
57
#define ABSL_SWISSTABLE_ASSERT(CONDITION) \
58
0
  assert((CONDITION) && "Try enabling sanitizers.")
59
#endif
60
61
62
template <typename T>
63
8.77M
uint32_t TrailingZeros(T x) {
64
8.77M
  ABSL_ASSUME(x != 0);
65
8.77M
  return static_cast<uint32_t>(countr_zero(x));
66
8.77M
}
unsigned int absl::container_internal::TrailingZeros<unsigned short>(unsigned short)
Line
Count
Source
63
8.77M
uint32_t TrailingZeros(T x) {
64
8.77M
  ABSL_ASSUME(x != 0);
65
8.77M
  return static_cast<uint32_t>(countr_zero(x));
66
8.77M
}
Unexecuted instantiation: unsigned int absl::container_internal::TrailingZeros<unsigned long>(unsigned long)
67
68
// 8 bytes bitmask with most significant bit set for every byte.
69
constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
70
// 8 kEmpty bytes that is useful for small table initialization.
71
constexpr uint64_t k8EmptyBytes = kMsbs8Bytes;
72
73
// An abstract bitmask, such as that emitted by a SIMD instruction.
74
//
75
// Specifically, this type implements a simple bitset whose representation is
76
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
77
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
78
// width of an abstract bit in the representation.
79
// This mask provides operations for any number of real bits set in an abstract
80
// bit. To add iteration on top of that, implementation must guarantee no more
81
// than the most significant real bit is set in a set abstract bit.
82
template <class T, int SignificantBits, int Shift = 0>
83
class NonIterableBitMask {
84
 public:
85
80.5k
  explicit NonIterableBitMask(T mask) : mask_(mask) {}
absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::NonIterableBitMask(unsigned short)
Line
Count
Source
85
80.5k
  explicit NonIterableBitMask(T mask) : mask_(mask) {}
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::NonIterableBitMask(unsigned long)
86
87
80.5k
  explicit operator bool() const { return this->mask_ != 0; }
88
89
  // Returns the index of the lowest *abstract* bit set in `self`.
90
8.77M
  uint32_t LowestBitSet() const {
91
8.77M
    return container_internal::TrailingZeros(mask_) >> Shift;
92
8.77M
  }
absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::LowestBitSet() const
Line
Count
Source
90
8.77M
  uint32_t LowestBitSet() const {
91
8.77M
    return container_internal::TrailingZeros(mask_) >> Shift;
92
8.77M
  }
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::LowestBitSet() const
93
94
  // Returns the number of trailing zero *abstract* bits.
95
0
  uint32_t TrailingZeros() const {
96
0
    return container_internal::TrailingZeros(mask_) >> Shift;
97
0
  }
98
99
  // Returns the number of leading zero *abstract* bits.
100
0
  uint32_t LeadingZeros() const {
101
0
    constexpr int total_significant_bits = SignificantBits << Shift;
102
0
    constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
103
0
    return static_cast<uint32_t>(
104
0
               countl_zero(static_cast<T>(mask_ << extra_bits))) >>
105
0
           Shift;
106
0
  }
107
108
  T mask_;
109
};
110
111
// Mask that can be iterable
112
//
113
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
114
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
115
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
116
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
117
// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
118
// non zero abstract bit is allowed to have additional bits
119
// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
120
//
121
// For example:
122
//   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
123
//   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
124
template <class T, int SignificantBits, int Shift = 0,
125
          bool NullifyBitsOnIteration = false>
126
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
127
  using Base = NonIterableBitMask<T, SignificantBits, Shift>;
128
  static_assert(std::is_unsigned<T>::value, "");
129
  static_assert(Shift == 0 || Shift == 3, "");
130
  static_assert(!NullifyBitsOnIteration || Shift == 3, "");
131
132
 public:
133
0
  explicit BitMask(T mask) : Base(mask) {
134
0
    if (Shift == 3 && !NullifyBitsOnIteration) {
135
0
      ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
136
0
    }
137
0
  }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned short, 16, 0, false>::BitMask(unsigned short)
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::BitMask(unsigned long)
138
  // BitMask is an iterator over the indices of its abstract bits.
139
  using value_type = int;
140
  using iterator = BitMask;
141
  using const_iterator = BitMask;
142
143
  BitMask& operator++() {
144
    if (Shift == 3 && NullifyBitsOnIteration) {
145
      this->mask_ &= kMsbs8Bytes;
146
    }
147
    this->mask_ &= (this->mask_ - 1);
148
    return *this;
149
  }
150
151
0
  uint32_t operator*() const { return Base::LowestBitSet(); }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator*() const
Unexecuted instantiation: absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator*() const
152
153
0
  BitMask begin() const { return *this; }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::begin() const
Unexecuted instantiation: absl::container_internal::BitMask<unsigned short, 16, 0, false>::begin() const
154
0
  BitMask end() const { return BitMask(0); }
Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::end() const
Unexecuted instantiation: absl::container_internal::BitMask<unsigned short, 16, 0, false>::end() const
155
156
 private:
157
  friend bool operator==(const BitMask& a, const BitMask& b) {
158
    return a.mask_ == b.mask_;
159
  }
160
0
  friend bool operator!=(const BitMask& a, const BitMask& b) {
161
0
    return a.mask_ != b.mask_;
162
0
  }
Unexecuted instantiation: absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned long, 8, 3, false> const&, absl::container_internal::BitMask<unsigned long, 8, 3, false> const&)
Unexecuted instantiation: absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned short, 16, 0, false> const&, absl::container_internal::BitMask<unsigned short, 16, 0, false> const&)
163
};
164
165
using h2_t = uint8_t;
166
167
// The values here are selected for maximum performance. See the static asserts
168
// below for details.
169
170
// A `ctrl_t` is a single control byte, which can have one of four
171
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
172
// and the sentinel. They have the following bit patterns:
173
//
174
//      empty: 1 0 0 0 0 0 0 0
175
//    deleted: 1 1 1 1 1 1 1 0
176
//       full: 0 h h h h h h h  // h represents the hash bits.
177
//   sentinel: 1 1 1 1 1 1 1 1
178
//
179
// These values are specifically tuned for SSE-flavored SIMD.
180
// The static_asserts below detail the source of these choices.
181
//
182
// We use an enum class so that when strict aliasing is enabled, the compiler
183
// knows ctrl_t doesn't alias other types.
184
enum class ctrl_t : int8_t {
185
  kEmpty = -128,   // 0b10000000
186
  kDeleted = -2,   // 0b11111110
187
  kSentinel = -1,  // 0b11111111
188
};
189
static_assert(
190
    (static_cast<int8_t>(ctrl_t::kEmpty) &
191
     static_cast<int8_t>(ctrl_t::kDeleted) &
192
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
193
    "Special markers need to have the MSB to make checking for them efficient");
194
static_assert(
195
    ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
196
    "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
197
    "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
198
static_assert(
199
    ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
200
    "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
201
    "registers (pcmpeqd xmm, xmm)");
202
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
203
              "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
204
              "existence efficient (psignb xmm, xmm)");
205
static_assert(
206
    (~static_cast<int8_t>(ctrl_t::kEmpty) &
207
     ~static_cast<int8_t>(ctrl_t::kDeleted) &
208
     static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
209
    "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
210
    "shared by ctrl_t::kSentinel to make the scalar test for "
211
    "MaskEmptyOrDeleted() efficient");
212
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
213
              "ctrl_t::kDeleted must be -2 to make the implementation of "
214
              "ConvertSpecialToEmptyAndFullToDeleted efficient");
215
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
216
              "ctrl_t::kEmpty must be -128 to use saturated subtraction in"
217
              " ConvertSpecialToEmptyAndFullToDeleted");
218
219
// Helpers for checking the state of a control byte.
220
124k
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
221
8.92M
inline bool IsFull(ctrl_t c) {
222
  // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
223
  // is not a value in the enum. Both ways are equivalent, but this way makes
224
  // linters happier.
225
8.92M
  return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
226
8.92M
}
227
0
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
228
158k
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
229
230
#ifdef ABSL_INTERNAL_HAVE_SSE2
231
// Quick reference guide for intrinsics used below:
232
//
233
// * __m128i: An XMM (128-bit) word.
234
//
235
// * _mm_setzero_si128: Returns a zero vector.
236
// * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
237
//
238
// * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
239
// * _mm_and_si128:    Ands two i128s together.
240
// * _mm_or_si128:     Ors two i128s together.
241
// * _mm_andnot_si128: And-nots two i128s together.
242
//
243
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
244
//                   filling each lane with 0x00 or 0xff.
245
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
246
//
247
// * _mm_loadu_si128:  Performs an unaligned load of an i128.
248
// * _mm_storeu_si128: Performs an unaligned store of an i128.
249
//
250
// * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
251
//                      argument if the corresponding lane of the second
252
//                      argument is positive, negative, or zero, respectively.
253
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
254
//                      bitmask consisting of those bits.
255
// * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
256
//                      four bits of each i8 lane in the second argument as
257
//                      indices.
258
259
// https://github.com/abseil/abseil-cpp/issues/209
260
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
261
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
262
// Work around this by using the portable implementation of Group
263
// when using -funsigned-char under GCC.
264
80.5k
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
265
#if defined(__GNUC__) && !defined(__clang__)
266
  if (std::is_unsigned<char>::value) {
267
    const __m128i mask = _mm_set1_epi8(0x80);
268
    const __m128i diff = _mm_subs_epi8(b, a);
269
    return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
270
  }
271
#endif
272
80.5k
  return _mm_cmpgt_epi8(a, b);
273
80.5k
}
274
275
struct GroupSse2Impl {
276
  static constexpr size_t kWidth = 16;  // the number of slots per group
277
  using BitMaskType = BitMask<uint16_t, kWidth>;
278
  using NonIterableBitMaskType = NonIterableBitMask<uint16_t, kWidth>;
279
280
80.5k
  explicit GroupSse2Impl(const ctrl_t* pos) {
281
80.5k
    ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
282
80.5k
  }
283
284
  // Returns a bitmask representing the positions of slots that match hash.
285
0
  BitMaskType Match(h2_t hash) const {
286
0
    auto match = _mm_set1_epi8(static_cast<char>(hash));
287
0
    return BitMaskType(
288
0
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
289
0
  }
290
291
  // Returns a bitmask representing the positions of empty slots.
292
0
  NonIterableBitMaskType MaskEmpty() const {
293
#ifdef ABSL_INTERNAL_HAVE_SSSE3
294
    // This only works because ctrl_t::kEmpty is -128.
295
    return NonIterableBitMaskType(
296
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
297
#else
298
0
    auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
299
0
    return NonIterableBitMaskType(
300
0
        static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
301
0
#endif
302
0
  }
303
304
  // Returns a bitmask representing the positions of full slots.
305
  // Note: for `is_small()` tables group may contain the "same" slot twice:
306
  // original and mirrored.
307
0
  BitMaskType MaskFull() const {
308
0
    return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
309
0
  }
310
311
  // Returns a bitmask representing the positions of non full slots.
312
  // Note: this includes: kEmpty, kDeleted, kSentinel.
313
  // It is useful in contexts when kSentinel is not present.
314
0
  auto MaskNonFull() const {
315
0
    return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
316
0
  }
317
318
  // Returns a bitmask representing the positions of empty or deleted slots.
319
80.5k
  NonIterableBitMaskType MaskEmptyOrDeleted() const {
320
80.5k
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
321
80.5k
    return NonIterableBitMaskType(static_cast<uint16_t>(
322
80.5k
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
323
80.5k
  }
324
325
  // Returns a bitmask representing the positions of full or sentinel slots.
326
  // Note: for `is_small()` tables group may contain the "same" slot twice:
327
  // original and mirrored.
328
0
  NonIterableBitMaskType MaskFullOrSentinel() const {
329
0
    auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel) - 1);
330
0
    return NonIterableBitMaskType(static_cast<uint16_t>(
331
0
        _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(ctrl, special))));
332
0
  }
333
334
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
335
    // Take advantage of the fact that kEmpty is already the smallest signed
336
    // char value, and using a saturated subtraction will not affect it.
337
    // All special values have the MSB set, so after an AND with MSBS, we
338
    // are left with -128 for special values and 0 for full. After applying
339
    // subs 2, we arrive at the result of -128(kEmpty) for special and
340
    // -2(kDeleted) for full.
341
0
    auto msbs = _mm_set1_epi8(static_cast<char>(-128));
342
0
    auto twos = _mm_set1_epi8(static_cast<char>(2));
343
0
    auto res = _mm_subs_epi8(_mm_and_si128(msbs, ctrl), twos);
344
0
    _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
345
0
  }
346
347
  __m128i ctrl;
348
};
349
#endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
350
351
#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
352
struct GroupAArch64Impl {
353
  static constexpr size_t kWidth = 8;
354
  using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
355
                              /*NullifyBitsOnIteration=*/true>;
356
  using NonIterableBitMaskType =
357
      NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
358
359
  explicit GroupAArch64Impl(const ctrl_t* pos) {
360
    ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
361
  }
362
363
  auto Match(h2_t hash) const {
364
    uint8x8_t dup = vdup_n_u8(hash);
365
    auto mask = vceq_u8(ctrl, dup);
366
    return BitMaskType(vget_lane_u64(vreinterpret_u64_u8(mask), 0));
367
  }
368
369
  auto MaskEmpty() const {
370
    uint64_t mask =
371
        vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
372
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
373
                          vreinterpret_s8_u8(ctrl))),
374
                      0);
375
    return NonIterableBitMaskType(mask);
376
  }
377
378
  // Returns a bitmask representing the positions of full slots.
379
  // Note: for `is_small()` tables group may contain the "same" slot twice:
380
  // original and mirrored.
381
  auto MaskFull() const {
382
    uint64_t mask = vget_lane_u64(
383
        vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
384
                                    vdup_n_s8(static_cast<int8_t>(0)))),
385
        0);
386
    return BitMaskType(mask);
387
  }
388
389
  // Returns a bitmask representing the positions of non full slots.
390
  // Note: this includes: kEmpty, kDeleted, kSentinel.
391
  // It is useful in contexts when kSentinel is not present.
392
  auto MaskNonFull() const {
393
    uint64_t mask = vget_lane_u64(
394
        vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
395
                                    vdup_n_s8(static_cast<int8_t>(0)))),
396
        0);
397
    return BitMaskType(mask);
398
  }
399
400
  auto MaskEmptyOrDeleted() const {
401
    uint64_t mask =
402
        vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
403
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
404
                          vreinterpret_s8_u8(ctrl))),
405
                      0);
406
    return NonIterableBitMaskType(mask);
407
  }
408
409
  NonIterableBitMaskType MaskFullOrSentinel() const {
410
    uint64_t mask = vget_lane_u64(
411
        vreinterpret_u64_u8(
412
            vcgt_s8(vreinterpret_s8_u8(ctrl),
413
                    vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel) - 1))),
414
        0);
415
    return NonIterableBitMaskType(mask);
416
  }
417
418
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
419
    uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
420
    constexpr uint64_t slsbs = 0x0202020202020202ULL;
421
    constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
422
    auto x = slsbs & (mask >> 6);
423
    auto res = (x + midbs) | kMsbs8Bytes;
424
    little_endian::Store64(dst, res);
425
  }
426
427
  uint8x8_t ctrl;
428
};
429
#endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
430
431
struct GroupPortableImpl {
432
  static constexpr size_t kWidth = 8;
433
  using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
434
                              /*NullifyBitsOnIteration=*/false>;
435
  using NonIterableBitMaskType =
436
      NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
437
438
  explicit GroupPortableImpl(const ctrl_t* pos)
439
0
      : ctrl(little_endian::Load64(pos)) {}
440
441
0
  BitMaskType Match(h2_t hash) const {
442
0
    // For the technique, see:
443
0
    // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
444
0
    // (Determine if a word has a byte equal to n).
445
0
    //
446
0
    // Caveat: there are false positives but:
447
0
    // - they only occur if there is a real match
448
0
    // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
449
0
    // - they will be handled gracefully by subsequent checks in code
450
0
    //
451
0
    // Example:
452
0
    //   v = 0x1716151413121110
453
0
    //   hash = 0x12
454
0
    //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
455
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
456
0
    auto x = ctrl ^ (lsbs * hash);
457
0
    return BitMaskType((x - lsbs) & ~x & kMsbs8Bytes);
458
0
  }
459
460
0
  auto MaskEmpty() const {
461
0
    return NonIterableBitMaskType((ctrl & ~(ctrl << 6)) & kMsbs8Bytes);
462
0
  }
463
464
  // Returns a bitmask representing the positions of full slots.
465
  // Note: for `is_small()` tables group may contain the "same" slot twice:
466
  // original and mirrored.
467
0
  auto MaskFull() const {
468
0
    return BitMaskType((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
469
0
  }
470
471
  // Returns a bitmask representing the positions of non full slots.
472
  // Note: this includes: kEmpty, kDeleted, kSentinel.
473
  // It is useful in contexts when kSentinel is not present.
474
0
  auto MaskNonFull() const { return BitMaskType(ctrl & kMsbs8Bytes); }
475
476
0
  auto MaskEmptyOrDeleted() const {
477
0
    return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes);
478
0
  }
479
480
0
  auto MaskFullOrSentinel() const {
481
0
    return NonIterableBitMaskType((~ctrl | (ctrl << 7)) & kMsbs8Bytes);
482
0
  }
483
484
0
  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
485
0
    constexpr uint64_t lsbs = 0x0101010101010101ULL;
486
0
    auto x = ctrl & kMsbs8Bytes;
487
0
    auto res = (~x + (x >> 7)) & ~lsbs;
488
0
    little_endian::Store64(dst, res);
489
0
  }
490
491
  uint64_t ctrl;
492
};
493
494
#ifdef ABSL_INTERNAL_HAVE_SSE2
495
using Group = GroupSse2Impl;
496
using GroupFullEmptyOrDeleted = GroupSse2Impl;
497
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
498
using Group = GroupAArch64Impl;
499
// For Aarch64, we use the portable implementation for counting and masking
500
// full, empty or deleted group elements. This is to avoid the latency of moving
501
// between data GPRs and Neon registers when it does not provide a benefit.
502
// Using Neon is profitable when we call Match(), but is not when we don't,
503
// which is the case when we do *EmptyOrDeleted and MaskFull operations.
504
// It is difficult to make a similar approach beneficial on other architectures
505
// such as x86 since they have much lower GPR <-> vector register transfer
506
// latency and 16-wide Groups.
507
using GroupFullEmptyOrDeleted = GroupPortableImpl;
508
#else
509
using Group = GroupPortableImpl;
510
using GroupFullEmptyOrDeleted = GroupPortableImpl;
511
#endif
512
513
}  // namespace container_internal
514
ABSL_NAMESPACE_END
515
}  // namespace absl
516
517
#undef ABSL_SWISSTABLE_ASSERT
518
519
#endif  // ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_