/src/abseil-cpp/absl/container/internal/hashtable_control_bytes.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2025 The Abseil Authors |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // This file contains the implementation of the hashtable control bytes |
16 | | // manipulation. |
17 | | |
18 | | #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |
19 | | #define ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |
20 | | |
21 | | #include <cassert> |
22 | | #include <cstddef> |
23 | | #include <cstdint> |
24 | | #include <type_traits> |
25 | | |
26 | | #include "absl/base/config.h" |
27 | | |
28 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
29 | | #include <emmintrin.h> |
30 | | #endif |
31 | | |
32 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
33 | | #include <tmmintrin.h> |
34 | | #endif |
35 | | |
36 | | #ifdef _MSC_VER |
37 | | #include <intrin.h> |
38 | | #endif |
39 | | |
40 | | #ifdef ABSL_INTERNAL_HAVE_ARM_NEON |
41 | | #include <arm_neon.h> |
42 | | #endif |
43 | | |
44 | | #include "absl/base/optimization.h" |
45 | | #include "absl/numeric/bits.h" |
46 | | #include "absl/base/internal/endian.h" |
47 | | |
48 | | namespace absl { |
49 | | ABSL_NAMESPACE_BEGIN |
50 | | namespace container_internal { |
51 | | |
52 | | #ifdef ABSL_SWISSTABLE_ASSERT |
53 | | #error ABSL_SWISSTABLE_ASSERT cannot be directly set |
54 | | #else |
55 | | // We use this macro for assertions that users may see when the table is in an |
56 | | // invalid state that sanitizers may help diagnose. |
57 | | #define ABSL_SWISSTABLE_ASSERT(CONDITION) \ |
58 | 0 | assert((CONDITION) && "Try enabling sanitizers.") |
59 | | #endif |
60 | | |
61 | | |
62 | | template <typename T> |
63 | 0 | uint32_t TrailingZeros(T x) { |
64 | 0 | ABSL_ASSUME(x != 0); |
65 | 0 | return static_cast<uint32_t>(countr_zero(x)); |
66 | 0 | } |
67 | | |
68 | | // 8 bytes bitmask with most significant bit set for every byte. |
69 | | constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL; |
70 | | // 8 kEmpty bytes that is useful for small table initialization. |
71 | | constexpr uint64_t k8EmptyBytes = kMsbs8Bytes; |
72 | | |
73 | | // An abstract bitmask, such as that emitted by a SIMD instruction. |
74 | | // |
75 | | // Specifically, this type implements a simple bitset whose representation is |
76 | | // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number |
77 | | // of abstract bits in the bitset, while `Shift` is the log-base-two of the |
78 | | // width of an abstract bit in the representation. |
79 | | // This mask provides operations for any number of real bits set in an abstract |
80 | | // bit. To add iteration on top of that, implementation must guarantee no more |
81 | | // than the most significant real bit is set in a set abstract bit. |
82 | | template <class T, int SignificantBits, int Shift = 0> |
83 | | class NonIterableBitMask { |
84 | | public: |
85 | 207M | explicit NonIterableBitMask(T mask) : mask_(mask) {} absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::NonIterableBitMask(unsigned short) Line | Count | Source | 85 | 207M | explicit NonIterableBitMask(T mask) : mask_(mask) {} |
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::NonIterableBitMask(unsigned long) |
86 | | |
87 | 61.2M | explicit operator bool() const { return this->mask_ != 0; } |
88 | | |
89 | | // Returns the index of the lowest *abstract* bit set in `self`. |
90 | 32.9M | uint32_t LowestBitSet() const { |
91 | 32.9M | return container_internal::TrailingZeros(mask_) >> Shift; |
92 | 32.9M | } absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::LowestBitSet() const Line | Count | Source | 90 | 32.9M | uint32_t LowestBitSet() const { | 91 | 32.9M | return container_internal::TrailingZeros(mask_) >> Shift; | 92 | 32.9M | } |
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::LowestBitSet() const |
93 | | |
94 | | // Returns the index of the highest *abstract* bit set in `self`. |
95 | | uint32_t HighestBitSet() const { |
96 | | return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift); |
97 | | } |
98 | | |
99 | | // Returns the number of trailing zero *abstract* bits. |
100 | 0 | uint32_t TrailingZeros() const { |
101 | 0 | return container_internal::TrailingZeros(mask_) >> Shift; |
102 | 0 | } |
103 | | |
104 | | // Returns the number of leading zero *abstract* bits. |
105 | 0 | uint32_t LeadingZeros() const { |
106 | 0 | constexpr int total_significant_bits = SignificantBits << Shift; |
107 | 0 | constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; |
108 | 0 | return static_cast<uint32_t>( |
109 | 0 | countl_zero(static_cast<T>(mask_ << extra_bits))) >> |
110 | 0 | Shift; |
111 | 0 | } |
112 | | |
113 | | T mask_; |
114 | | }; |
115 | | |
116 | | // Mask that can be iterable |
117 | | // |
118 | | // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just |
119 | | // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When |
120 | | // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as |
121 | | // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. |
122 | | // If NullifyBitsOnIteration is true (only allowed for Shift == 3), |
123 | | // non zero abstract bit is allowed to have additional bits |
124 | | // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not). |
125 | | // |
126 | | // For example: |
127 | | // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2 |
128 | | // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3 |
129 | | template <class T, int SignificantBits, int Shift = 0, |
130 | | bool NullifyBitsOnIteration = false> |
131 | | class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> { |
132 | | using Base = NonIterableBitMask<T, SignificantBits, Shift>; |
133 | | static_assert(std::is_unsigned<T>::value, ""); |
134 | | static_assert(Shift == 0 || Shift == 3, ""); |
135 | | static_assert(!NullifyBitsOnIteration || Shift == 3, ""); |
136 | | |
137 | | public: |
138 | 0 | explicit BitMask(T mask) : Base(mask) { |
139 | 0 | if (Shift == 3 && !NullifyBitsOnIteration) { |
140 | 0 | ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes)); |
141 | 0 | } |
142 | 0 | } |
143 | | // BitMask is an iterator over the indices of its abstract bits. |
144 | | using value_type = int; |
145 | | using iterator = BitMask; |
146 | | using const_iterator = BitMask; |
147 | | |
148 | 12.0M | BitMask& operator++() { |
149 | 12.0M | if (Shift == 3 && NullifyBitsOnIteration) { |
150 | 0 | this->mask_ &= kMsbs8Bytes; |
151 | 0 | } |
152 | 12.0M | this->mask_ &= (this->mask_ - 1); |
153 | 12.0M | return *this; |
154 | 12.0M | } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator++() absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator++() Line | Count | Source | 148 | 12.0M | BitMask& operator++() { | 149 | 12.0M | if (Shift == 3 && NullifyBitsOnIteration) { | 150 | 0 | this->mask_ &= kMsbs8Bytes; | 151 | 0 | } | 152 | 12.0M | this->mask_ &= (this->mask_ - 1); | 153 | 12.0M | return *this; | 154 | 12.0M | } |
|
155 | | |
156 | 23.9M | uint32_t operator*() const { return Base::LowestBitSet(); } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator*() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator*() const Line | Count | Source | 156 | 23.9M | uint32_t operator*() const { return Base::LowestBitSet(); } |
|
157 | | |
158 | 73.2M | BitMask begin() const { return *this; } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::begin() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::begin() const Line | Count | Source | 158 | 73.2M | BitMask begin() const { return *this; } |
|
159 | 73.2M | BitMask end() const { return BitMask(0); } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::end() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::end() const Line | Count | Source | 159 | 73.2M | BitMask end() const { return BitMask(0); } |
|
160 | | |
161 | | private: |
162 | | friend bool operator==(const BitMask& a, const BitMask& b) { |
163 | | return a.mask_ == b.mask_; |
164 | | } |
165 | 85.3M | friend bool operator!=(const BitMask& a, const BitMask& b) { |
166 | 85.3M | return a.mask_ != b.mask_; |
167 | 85.3M | } Unexecuted instantiation: absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned long, 8, 3, false> const&, absl::container_internal::BitMask<unsigned long, 8, 3, false> const&) absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned short, 16, 0, false> const&, absl::container_internal::BitMask<unsigned short, 16, 0, false> const&) Line | Count | Source | 165 | 85.3M | friend bool operator!=(const BitMask& a, const BitMask& b) { | 166 | 85.3M | return a.mask_ != b.mask_; | 167 | 85.3M | } |
|
168 | | }; |
169 | | |
170 | | using h2_t = uint8_t; |
171 | | |
172 | | // The values here are selected for maximum performance. See the static asserts |
173 | | // below for details. |
174 | | |
175 | | // A `ctrl_t` is a single control byte, which can have one of four |
176 | | // states: empty, deleted, full (which has an associated seven-bit h2_t value) |
177 | | // and the sentinel. They have the following bit patterns: |
178 | | // |
179 | | // empty: 1 0 0 0 0 0 0 0 |
180 | | // deleted: 1 1 1 1 1 1 1 0 |
181 | | // full: 0 h h h h h h h // h represents the hash bits. |
182 | | // sentinel: 1 1 1 1 1 1 1 1 |
183 | | // |
184 | | // These values are specifically tuned for SSE-flavored SIMD. |
185 | | // The static_asserts below detail the source of these choices. |
186 | | // |
187 | | // We use an enum class so that when strict aliasing is enabled, the compiler |
188 | | // knows ctrl_t doesn't alias other types. |
189 | | enum class ctrl_t : int8_t { |
190 | | kEmpty = -128, // 0b10000000 |
191 | | kDeleted = -2, // 0b11111110 |
192 | | kSentinel = -1, // 0b11111111 |
193 | | }; |
194 | | static_assert( |
195 | | (static_cast<int8_t>(ctrl_t::kEmpty) & |
196 | | static_cast<int8_t>(ctrl_t::kDeleted) & |
197 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0, |
198 | | "Special markers need to have the MSB to make checking for them efficient"); |
199 | | static_assert( |
200 | | ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, |
201 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " |
202 | | "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); |
203 | | static_assert( |
204 | | ctrl_t::kSentinel == static_cast<ctrl_t>(-1), |
205 | | "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " |
206 | | "registers (pcmpeqd xmm, xmm)"); |
207 | | static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128), |
208 | | "ctrl_t::kEmpty must be -128 to make the SIMD check for its " |
209 | | "existence efficient (psignb xmm, xmm)"); |
210 | | static_assert( |
211 | | (~static_cast<int8_t>(ctrl_t::kEmpty) & |
212 | | ~static_cast<int8_t>(ctrl_t::kDeleted) & |
213 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0, |
214 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " |
215 | | "shared by ctrl_t::kSentinel to make the scalar test for " |
216 | | "MaskEmptyOrDeleted() efficient"); |
217 | | static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2), |
218 | | "ctrl_t::kDeleted must be -2 to make the implementation of " |
219 | | "ConvertSpecialToEmptyAndFullToDeleted efficient"); |
220 | | |
221 | | // Helpers for checking the state of a control byte. |
222 | 130k | inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } |
223 | 8.73M | inline bool IsFull(ctrl_t c) { |
224 | | // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0` |
225 | | // is not a value in the enum. Both ways are equivalent, but this way makes |
226 | | // linters happier. |
227 | 8.73M | return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0; |
228 | 8.73M | } |
229 | 0 | inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } |
230 | 474k | inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } |
231 | | |
232 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
233 | | // Quick reference guide for intrinsics used below: |
234 | | // |
235 | | // * __m128i: An XMM (128-bit) word. |
236 | | // |
237 | | // * _mm_setzero_si128: Returns a zero vector. |
238 | | // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. |
239 | | // |
240 | | // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. |
241 | | // * _mm_and_si128: Ands two i128s together. |
242 | | // * _mm_or_si128: Ors two i128s together. |
243 | | // * _mm_andnot_si128: And-nots two i128s together. |
244 | | // |
245 | | // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, |
246 | | // filling each lane with 0x00 or 0xff. |
247 | | // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. |
248 | | // |
249 | | // * _mm_loadu_si128: Performs an unaligned load of an i128. |
250 | | // * _mm_storeu_si128: Performs an unaligned store of an i128. |
251 | | // |
252 | | // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first |
253 | | // argument if the corresponding lane of the second |
254 | | // argument is positive, negative, or zero, respectively. |
255 | | // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a |
256 | | // bitmask consisting of those bits. |
257 | | // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low |
258 | | // four bits of each i8 lane in the second argument as |
259 | | // indices. |
260 | | |
261 | | // https://github.com/abseil/abseil-cpp/issues/209 |
262 | | // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 |
263 | | // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char |
264 | | // Work around this by using the portable implementation of Group |
265 | | // when using -funsigned-char under GCC. |
266 | 167k | inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { |
267 | | #if defined(__GNUC__) && !defined(__clang__) |
268 | | if (std::is_unsigned<char>::value) { |
269 | | const __m128i mask = _mm_set1_epi8(0x80); |
270 | | const __m128i diff = _mm_subs_epi8(b, a); |
271 | | return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); |
272 | | } |
273 | | #endif |
274 | 167k | return _mm_cmpgt_epi8(a, b); |
275 | 167k | } |
276 | | |
277 | | struct GroupSse2Impl { |
278 | | static constexpr size_t kWidth = 16; // the number of slots per group |
279 | | using BitMaskType = BitMask<uint16_t, kWidth>; |
280 | | using NonIterableBitMaskType = NonIterableBitMask<uint16_t, kWidth>; |
281 | | |
282 | 73.6M | explicit GroupSse2Impl(const ctrl_t* pos) { |
283 | 73.6M | ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos)); |
284 | 73.6M | } |
285 | | |
286 | | // Returns a bitmask representing the positions of slots that match hash. |
287 | | BitMaskType Match(h2_t hash) const { |
288 | | auto match = _mm_set1_epi8(static_cast<char>(hash)); |
289 | | return BitMaskType( |
290 | | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
291 | | } |
292 | | |
293 | | // Returns a bitmask representing the positions of empty slots. |
294 | 60.8M | NonIterableBitMaskType MaskEmpty() const { |
295 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
296 | | // This only works because ctrl_t::kEmpty is -128. |
297 | | return NonIterableBitMaskType( |
298 | | static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); |
299 | | #else |
300 | 60.8M | auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty)); |
301 | 60.8M | return NonIterableBitMaskType( |
302 | 60.8M | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
303 | 60.8M | #endif |
304 | 60.8M | } |
305 | | |
306 | | // Returns a bitmask representing the positions of full slots. |
307 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
308 | | // original and mirrored. |
309 | 528k | BitMaskType MaskFull() const { |
310 | 528k | return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff)); |
311 | 528k | } |
312 | | |
313 | | // Returns a bitmask representing the positions of non full slots. |
314 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
315 | | // It is useful in contexts when kSentinel is not present. |
316 | 197k | auto MaskNonFull() const { |
317 | 197k | return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl))); |
318 | 197k | } |
319 | | |
320 | | // Returns a bitmask representing the positions of empty or deleted slots. |
321 | 81.4k | NonIterableBitMaskType MaskEmptyOrDeleted() const { |
322 | 81.4k | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel)); |
323 | 81.4k | return NonIterableBitMaskType(static_cast<uint16_t>( |
324 | 81.4k | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); |
325 | 81.4k | } |
326 | | |
327 | | // Returns a bitmask representing the positions of full or sentinel slots. |
328 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
329 | | // original and mirrored. |
330 | | NonIterableBitMaskType MaskFullOrSentinel() const { |
331 | | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel) - 1); |
332 | | return NonIterableBitMaskType(static_cast<uint16_t>( |
333 | | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(ctrl, special)))); |
334 | | } |
335 | | |
336 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
337 | 0 | auto msbs = _mm_set1_epi8(static_cast<char>(-128)); |
338 | 0 | auto x126 = _mm_set1_epi8(126); |
339 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
340 | | auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); |
341 | | #else |
342 | 0 | auto zero = _mm_setzero_si128(); |
343 | 0 | auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); |
344 | 0 | auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); |
345 | 0 | #endif |
346 | 0 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); |
347 | 0 | } |
348 | | |
349 | | __m128i ctrl; |
350 | | }; |
351 | | #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 |
352 | | |
353 | | #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
354 | | struct GroupAArch64Impl { |
355 | | static constexpr size_t kWidth = 8; |
356 | | using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3, |
357 | | /*NullifyBitsOnIteration=*/true>; |
358 | | using NonIterableBitMaskType = |
359 | | NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>; |
360 | | |
361 | | explicit GroupAArch64Impl(const ctrl_t* pos) { |
362 | | ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos)); |
363 | | } |
364 | | |
365 | | auto Match(h2_t hash) const { |
366 | | uint8x8_t dup = vdup_n_u8(hash); |
367 | | auto mask = vceq_u8(ctrl, dup); |
368 | | return BitMaskType(vget_lane_u64(vreinterpret_u64_u8(mask), 0)); |
369 | | } |
370 | | |
371 | | auto MaskEmpty() const { |
372 | | uint64_t mask = |
373 | | vget_lane_u64(vreinterpret_u64_u8(vceq_s8( |
374 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)), |
375 | | vreinterpret_s8_u8(ctrl))), |
376 | | 0); |
377 | | return NonIterableBitMaskType(mask); |
378 | | } |
379 | | |
380 | | // Returns a bitmask representing the positions of full slots. |
381 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
382 | | // original and mirrored. |
383 | | auto MaskFull() const { |
384 | | uint64_t mask = vget_lane_u64( |
385 | | vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl), |
386 | | vdup_n_s8(static_cast<int8_t>(0)))), |
387 | | 0); |
388 | | return BitMaskType(mask); |
389 | | } |
390 | | |
391 | | // Returns a bitmask representing the positions of non full slots. |
392 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
393 | | // It is useful in contexts when kSentinel is not present. |
394 | | auto MaskNonFull() const { |
395 | | uint64_t mask = vget_lane_u64( |
396 | | vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl), |
397 | | vdup_n_s8(static_cast<int8_t>(0)))), |
398 | | 0); |
399 | | return BitMaskType(mask); |
400 | | } |
401 | | |
402 | | auto MaskEmptyOrDeleted() const { |
403 | | uint64_t mask = |
404 | | vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( |
405 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)), |
406 | | vreinterpret_s8_u8(ctrl))), |
407 | | 0); |
408 | | return NonIterableBitMaskType(mask); |
409 | | } |
410 | | |
411 | | NonIterableBitMaskType MaskFullOrSentinel() const { |
412 | | uint64_t mask = vget_lane_u64( |
413 | | vreinterpret_u64_u8( |
414 | | vcgt_s8(vreinterpret_s8_u8(ctrl), |
415 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel) - 1))), |
416 | | 0); |
417 | | return NonIterableBitMaskType(mask); |
418 | | } |
419 | | |
420 | | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
421 | | uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); |
422 | | constexpr uint64_t slsbs = 0x0202020202020202ULL; |
423 | | constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL; |
424 | | auto x = slsbs & (mask >> 6); |
425 | | auto res = (x + midbs) | kMsbs8Bytes; |
426 | | little_endian::Store64(dst, res); |
427 | | } |
428 | | |
429 | | uint8x8_t ctrl; |
430 | | }; |
431 | | #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN |
432 | | |
433 | | struct GroupPortableImpl { |
434 | | static constexpr size_t kWidth = 8; |
435 | | using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3, |
436 | | /*NullifyBitsOnIteration=*/false>; |
437 | | using NonIterableBitMaskType = |
438 | | NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>; |
439 | | |
440 | | explicit GroupPortableImpl(const ctrl_t* pos) |
441 | 0 | : ctrl(little_endian::Load64(pos)) {} |
442 | | |
443 | 0 | BitMaskType Match(h2_t hash) const { |
444 | 0 | // For the technique, see: |
445 | 0 | // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord |
446 | 0 | // (Determine if a word has a byte equal to n). |
447 | 0 | // |
448 | 0 | // Caveat: there are false positives but: |
449 | 0 | // - they only occur if there is a real match |
450 | 0 | // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel |
451 | 0 | // - they will be handled gracefully by subsequent checks in code |
452 | 0 | // |
453 | 0 | // Example: |
454 | 0 | // v = 0x1716151413121110 |
455 | 0 | // hash = 0x12 |
456 | 0 | // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 |
457 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
458 | 0 | auto x = ctrl ^ (lsbs * hash); |
459 | 0 | return BitMaskType((x - lsbs) & ~x & kMsbs8Bytes); |
460 | 0 | } |
461 | | |
462 | 0 | auto MaskEmpty() const { |
463 | 0 | return NonIterableBitMaskType((ctrl & ~(ctrl << 6)) & kMsbs8Bytes); |
464 | 0 | } |
465 | | |
466 | | // Returns a bitmask representing the positions of full slots. |
467 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
468 | | // original and mirrored. |
469 | 0 | auto MaskFull() const { |
470 | 0 | return BitMaskType((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes); |
471 | 0 | } |
472 | | |
473 | | // Returns a bitmask representing the positions of non full slots. |
474 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
475 | | // It is useful in contexts when kSentinel is not present. |
476 | 0 | auto MaskNonFull() const { return BitMaskType(ctrl & kMsbs8Bytes); } |
477 | | |
478 | 0 | auto MaskEmptyOrDeleted() const { |
479 | 0 | return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes); |
480 | 0 | } |
481 | | |
482 | 0 | auto MaskFullOrSentinel() const { |
483 | 0 | return NonIterableBitMaskType((~ctrl | (ctrl << 7)) & kMsbs8Bytes); |
484 | 0 | } |
485 | | |
486 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
487 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
488 | 0 | auto x = ctrl & kMsbs8Bytes; |
489 | 0 | auto res = (~x + (x >> 7)) & ~lsbs; |
490 | 0 | little_endian::Store64(dst, res); |
491 | 0 | } |
492 | | |
493 | | uint64_t ctrl; |
494 | | }; |
495 | | |
496 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
497 | | using Group = GroupSse2Impl; |
498 | | using GroupFullEmptyOrDeleted = GroupSse2Impl; |
499 | | #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
500 | | using Group = GroupAArch64Impl; |
501 | | // For Aarch64, we use the portable implementation for counting and masking |
502 | | // full, empty or deleted group elements. This is to avoid the latency of moving |
503 | | // between data GPRs and Neon registers when it does not provide a benefit. |
504 | | // Using Neon is profitable when we call Match(), but is not when we don't, |
505 | | // which is the case when we do *EmptyOrDeleted and MaskFull operations. |
506 | | // It is difficult to make a similar approach beneficial on other architectures |
507 | | // such as x86 since they have much lower GPR <-> vector register transfer |
508 | | // latency and 16-wide Groups. |
509 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
510 | | #else |
511 | | using Group = GroupPortableImpl; |
512 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
513 | | #endif |
514 | | |
515 | | } // namespace container_internal |
516 | | ABSL_NAMESPACE_END |
517 | | } // namespace absl |
518 | | |
519 | | #undef ABSL_SWISSTABLE_ASSERT |
520 | | |
521 | | #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |