/src/abseil-cpp/absl/container/internal/hashtable_control_bytes.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2025 The Abseil Authors |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // This file contains the implementation of the hashtable control bytes |
16 | | // manipulation. |
17 | | |
18 | | #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |
19 | | #define ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |
20 | | |
21 | | #include <cassert> |
22 | | #include <cstddef> |
23 | | #include <cstdint> |
24 | | #include <type_traits> |
25 | | |
26 | | #include "absl/base/config.h" |
27 | | |
28 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
29 | | #include <emmintrin.h> |
30 | | #endif |
31 | | |
32 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
33 | | #include <tmmintrin.h> |
34 | | #endif |
35 | | |
36 | | #ifdef _MSC_VER |
37 | | #include <intrin.h> |
38 | | #endif |
39 | | |
40 | | #ifdef ABSL_INTERNAL_HAVE_ARM_NEON |
41 | | #include <arm_neon.h> |
42 | | #endif |
43 | | |
44 | | #include "absl/base/optimization.h" |
45 | | #include "absl/numeric/bits.h" |
46 | | #include "absl/base/internal/endian.h" |
47 | | |
48 | | namespace absl { |
49 | | ABSL_NAMESPACE_BEGIN |
50 | | namespace container_internal { |
51 | | |
52 | | #ifdef ABSL_SWISSTABLE_ASSERT |
53 | | #error ABSL_SWISSTABLE_ASSERT cannot be directly set |
54 | | #else |
55 | | // We use this macro for assertions that users may see when the table is in an |
56 | | // invalid state that sanitizers may help diagnose. |
57 | | #define ABSL_SWISSTABLE_ASSERT(CONDITION) \ |
58 | 0 | assert((CONDITION) && "Try enabling sanitizers.") |
59 | | #endif |
60 | | |
61 | | |
62 | | template <typename T> |
63 | 0 | uint32_t TrailingZeros(T x) { |
64 | 0 | ABSL_ASSUME(x != 0); |
65 | 0 | return static_cast<uint32_t>(countr_zero(x)); |
66 | 0 | } |
67 | | |
68 | | // 8 bytes bitmask with most significant bit set for every byte. |
69 | | constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL; |
70 | | // 8 kEmpty bytes that is useful for small table initialization. |
71 | | constexpr uint64_t k8EmptyBytes = kMsbs8Bytes; |
72 | | |
73 | | // An abstract bitmask, such as that emitted by a SIMD instruction. |
74 | | // |
75 | | // Specifically, this type implements a simple bitset whose representation is |
76 | | // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number |
77 | | // of abstract bits in the bitset, while `Shift` is the log-base-two of the |
78 | | // width of an abstract bit in the representation. |
79 | | // This mask provides operations for any number of real bits set in an abstract |
80 | | // bit. To add iteration on top of that, implementation must guarantee no more |
81 | | // than the most significant real bit is set in a set abstract bit. |
82 | | template <class T, int SignificantBits, int Shift = 0> |
83 | | class NonIterableBitMask { |
84 | | public: |
85 | 221M | explicit NonIterableBitMask(T mask) : mask_(mask) {} absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::NonIterableBitMask(unsigned short) Line | Count | Source | 85 | 221M | explicit NonIterableBitMask(T mask) : mask_(mask) {} |
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::NonIterableBitMask(unsigned long) |
86 | | |
87 | 64.9M | explicit operator bool() const { return this->mask_ != 0; } |
88 | | |
89 | | // Returns the index of the lowest *abstract* bit set in `self`. |
90 | 35.5M | uint32_t LowestBitSet() const { |
91 | 35.5M | return container_internal::TrailingZeros(mask_) >> Shift; |
92 | 35.5M | } absl::container_internal::NonIterableBitMask<unsigned short, 16, 0>::LowestBitSet() const Line | Count | Source | 90 | 35.5M | uint32_t LowestBitSet() const { | 91 | 35.5M | return container_internal::TrailingZeros(mask_) >> Shift; | 92 | 35.5M | } |
Unexecuted instantiation: absl::container_internal::NonIterableBitMask<unsigned long, 8, 3>::LowestBitSet() const |
93 | | |
94 | | // Returns the number of trailing zero *abstract* bits. |
95 | 0 | uint32_t TrailingZeros() const { |
96 | 0 | return container_internal::TrailingZeros(mask_) >> Shift; |
97 | 0 | } |
98 | | |
99 | | // Returns the number of leading zero *abstract* bits. |
100 | 0 | uint32_t LeadingZeros() const { |
101 | 0 | constexpr int total_significant_bits = SignificantBits << Shift; |
102 | 0 | constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; |
103 | 0 | return static_cast<uint32_t>( |
104 | 0 | countl_zero(static_cast<T>(mask_ << extra_bits))) >> |
105 | 0 | Shift; |
106 | 0 | } |
107 | | |
108 | | T mask_; |
109 | | }; |
110 | | |
111 | | // Mask that can be iterable |
112 | | // |
113 | | // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just |
114 | | // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When |
115 | | // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as |
116 | | // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. |
117 | | // If NullifyBitsOnIteration is true (only allowed for Shift == 3), |
118 | | // non zero abstract bit is allowed to have additional bits |
119 | | // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not). |
120 | | // |
121 | | // For example: |
122 | | // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2 |
123 | | // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3 |
124 | | template <class T, int SignificantBits, int Shift = 0, |
125 | | bool NullifyBitsOnIteration = false> |
126 | | class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> { |
127 | | using Base = NonIterableBitMask<T, SignificantBits, Shift>; |
128 | | static_assert(std::is_unsigned<T>::value, ""); |
129 | | static_assert(Shift == 0 || Shift == 3, ""); |
130 | | static_assert(!NullifyBitsOnIteration || Shift == 3, ""); |
131 | | |
132 | | public: |
133 | 0 | explicit BitMask(T mask) : Base(mask) { |
134 | 0 | if (Shift == 3 && !NullifyBitsOnIteration) { |
135 | 0 | ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes)); |
136 | 0 | } |
137 | 0 | } |
138 | | // BitMask is an iterator over the indices of its abstract bits. |
139 | | using value_type = int; |
140 | | using iterator = BitMask; |
141 | | using const_iterator = BitMask; |
142 | | |
143 | 13.2M | BitMask& operator++() { |
144 | 13.2M | if (Shift == 3 && NullifyBitsOnIteration) { |
145 | 0 | this->mask_ &= kMsbs8Bytes; |
146 | 0 | } |
147 | 13.2M | this->mask_ &= (this->mask_ - 1); |
148 | 13.2M | return *this; |
149 | 13.2M | } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator++() absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator++() Line | Count | Source | 143 | 13.2M | BitMask& operator++() { | 144 | 13.2M | if (Shift == 3 && NullifyBitsOnIteration) { | 145 | 0 | this->mask_ &= kMsbs8Bytes; | 146 | 0 | } | 147 | 13.2M | this->mask_ &= (this->mask_ - 1); | 148 | 13.2M | return *this; | 149 | 13.2M | } |
|
150 | | |
151 | 26.4M | uint32_t operator*() const { return Base::LowestBitSet(); } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::operator*() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::operator*() const Line | Count | Source | 151 | 26.4M | uint32_t operator*() const { return Base::LowestBitSet(); } |
|
152 | | |
153 | 78.4M | BitMask begin() const { return *this; } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::begin() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::begin() const Line | Count | Source | 153 | 78.4M | BitMask begin() const { return *this; } |
|
154 | 78.4M | BitMask end() const { return BitMask(0); } Unexecuted instantiation: absl::container_internal::BitMask<unsigned long, 8, 3, false>::end() const absl::container_internal::BitMask<unsigned short, 16, 0, false>::end() const Line | Count | Source | 154 | 78.4M | BitMask end() const { return BitMask(0); } |
|
155 | | |
156 | | private: |
157 | | friend bool operator==(const BitMask& a, const BitMask& b) { |
158 | | return a.mask_ == b.mask_; |
159 | | } |
160 | 91.6M | friend bool operator!=(const BitMask& a, const BitMask& b) { |
161 | 91.6M | return a.mask_ != b.mask_; |
162 | 91.6M | } Unexecuted instantiation: absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned long, 8, 3, false> const&, absl::container_internal::BitMask<unsigned long, 8, 3, false> const&) absl::container_internal::operator!=(absl::container_internal::BitMask<unsigned short, 16, 0, false> const&, absl::container_internal::BitMask<unsigned short, 16, 0, false> const&) Line | Count | Source | 160 | 91.6M | friend bool operator!=(const BitMask& a, const BitMask& b) { | 161 | 91.6M | return a.mask_ != b.mask_; | 162 | 91.6M | } |
|
163 | | }; |
164 | | |
165 | | using h2_t = uint8_t; |
166 | | |
167 | | // The values here are selected for maximum performance. See the static asserts |
168 | | // below for details. |
169 | | |
170 | | // A `ctrl_t` is a single control byte, which can have one of four |
171 | | // states: empty, deleted, full (which has an associated seven-bit h2_t value) |
172 | | // and the sentinel. They have the following bit patterns: |
173 | | // |
174 | | // empty: 1 0 0 0 0 0 0 0 |
175 | | // deleted: 1 1 1 1 1 1 1 0 |
176 | | // full: 0 h h h h h h h // h represents the hash bits. |
177 | | // sentinel: 1 1 1 1 1 1 1 1 |
178 | | // |
179 | | // These values are specifically tuned for SSE-flavored SIMD. |
180 | | // The static_asserts below detail the source of these choices. |
181 | | // |
182 | | // We use an enum class so that when strict aliasing is enabled, the compiler |
183 | | // knows ctrl_t doesn't alias other types. |
184 | | enum class ctrl_t : int8_t { |
185 | | kEmpty = -128, // 0b10000000 |
186 | | kDeleted = -2, // 0b11111110 |
187 | | kSentinel = -1, // 0b11111111 |
188 | | }; |
189 | | static_assert( |
190 | | (static_cast<int8_t>(ctrl_t::kEmpty) & |
191 | | static_cast<int8_t>(ctrl_t::kDeleted) & |
192 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0, |
193 | | "Special markers need to have the MSB to make checking for them efficient"); |
194 | | static_assert( |
195 | | ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, |
196 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " |
197 | | "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); |
198 | | static_assert( |
199 | | ctrl_t::kSentinel == static_cast<ctrl_t>(-1), |
200 | | "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " |
201 | | "registers (pcmpeqd xmm, xmm)"); |
202 | | static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128), |
203 | | "ctrl_t::kEmpty must be -128 to make the SIMD check for its " |
204 | | "existence efficient (psignb xmm, xmm)"); |
205 | | static_assert( |
206 | | (~static_cast<int8_t>(ctrl_t::kEmpty) & |
207 | | ~static_cast<int8_t>(ctrl_t::kDeleted) & |
208 | | static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0, |
209 | | "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " |
210 | | "shared by ctrl_t::kSentinel to make the scalar test for " |
211 | | "MaskEmptyOrDeleted() efficient"); |
212 | | static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2), |
213 | | "ctrl_t::kDeleted must be -2 to make the implementation of " |
214 | | "ConvertSpecialToEmptyAndFullToDeleted efficient"); |
215 | | |
216 | | // Helpers for checking the state of a control byte. |
217 | 142k | inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } |
218 | 8.84M | inline bool IsFull(ctrl_t c) { |
219 | | // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0` |
220 | | // is not a value in the enum. Both ways are equivalent, but this way makes |
221 | | // linters happier. |
222 | 8.84M | return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0; |
223 | 8.84M | } |
224 | 0 | inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } |
225 | 662k | inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } |
226 | | |
227 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
228 | | // Quick reference guide for intrinsics used below: |
229 | | // |
230 | | // * __m128i: An XMM (128-bit) word. |
231 | | // |
232 | | // * _mm_setzero_si128: Returns a zero vector. |
233 | | // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. |
234 | | // |
235 | | // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. |
236 | | // * _mm_and_si128: Ands two i128s together. |
237 | | // * _mm_or_si128: Ors two i128s together. |
238 | | // * _mm_andnot_si128: And-nots two i128s together. |
239 | | // |
240 | | // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, |
241 | | // filling each lane with 0x00 or 0xff. |
242 | | // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. |
243 | | // |
244 | | // * _mm_loadu_si128: Performs an unaligned load of an i128. |
245 | | // * _mm_storeu_si128: Performs an unaligned store of an i128. |
246 | | // |
247 | | // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first |
248 | | // argument if the corresponding lane of the second |
249 | | // argument is positive, negative, or zero, respectively. |
250 | | // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a |
251 | | // bitmask consisting of those bits. |
252 | | // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low |
253 | | // four bits of each i8 lane in the second argument as |
254 | | // indices. |
255 | | |
256 | | // https://github.com/abseil/abseil-cpp/issues/209 |
257 | | // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 |
258 | | // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char |
259 | | // Work around this by using the portable implementation of Group |
260 | | // when using -funsigned-char under GCC. |
261 | 88.6k | inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { |
262 | | #if defined(__GNUC__) && !defined(__clang__) |
263 | | if (std::is_unsigned<char>::value) { |
264 | | const __m128i mask = _mm_set1_epi8(0x80); |
265 | | const __m128i diff = _mm_subs_epi8(b, a); |
266 | | return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); |
267 | | } |
268 | | #endif |
269 | 88.6k | return _mm_cmpgt_epi8(a, b); |
270 | 88.6k | } |
271 | | |
272 | | struct GroupSse2Impl { |
273 | | static constexpr size_t kWidth = 16; // the number of slots per group |
274 | | using BitMaskType = BitMask<uint16_t, kWidth>; |
275 | | using NonIterableBitMaskType = NonIterableBitMask<uint16_t, kWidth>; |
276 | | |
277 | 78.7M | explicit GroupSse2Impl(const ctrl_t* pos) { |
278 | 78.7M | ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos)); |
279 | 78.7M | } |
280 | | |
281 | | // Returns a bitmask representing the positions of slots that match hash. |
282 | | BitMaskType Match(h2_t hash) const { |
283 | | auto match = _mm_set1_epi8(static_cast<char>(hash)); |
284 | | return BitMaskType( |
285 | | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
286 | | } |
287 | | |
288 | | // Returns a bitmask representing the positions of empty slots. |
289 | 64.5M | NonIterableBitMaskType MaskEmpty() const { |
290 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
291 | | // This only works because ctrl_t::kEmpty is -128. |
292 | | return NonIterableBitMaskType( |
293 | | static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); |
294 | | #else |
295 | 64.5M | auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty)); |
296 | 64.5M | return NonIterableBitMaskType( |
297 | 64.5M | static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); |
298 | 64.5M | #endif |
299 | 64.5M | } |
300 | | |
301 | | // Returns a bitmask representing the positions of full slots. |
302 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
303 | | // original and mirrored. |
304 | 585k | BitMaskType MaskFull() const { |
305 | 585k | return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff)); |
306 | 585k | } |
307 | | |
308 | | // Returns a bitmask representing the positions of non full slots. |
309 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
310 | | // It is useful in contexts when kSentinel is not present. |
311 | 218k | auto MaskNonFull() const { |
312 | 218k | return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl))); |
313 | 218k | } |
314 | | |
315 | | // Returns a bitmask representing the positions of empty or deleted slots. |
316 | 88.6k | NonIterableBitMaskType MaskEmptyOrDeleted() const { |
317 | 88.6k | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel)); |
318 | 88.6k | return NonIterableBitMaskType(static_cast<uint16_t>( |
319 | 88.6k | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); |
320 | 88.6k | } |
321 | | |
322 | | // Returns a bitmask representing the positions of full or sentinel slots. |
323 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
324 | | // original and mirrored. |
325 | 0 | NonIterableBitMaskType MaskFullOrSentinel() const { |
326 | 0 | auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel) - 1); |
327 | 0 | return NonIterableBitMaskType(static_cast<uint16_t>( |
328 | 0 | _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(ctrl, special)))); |
329 | 0 | } |
330 | | |
331 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
332 | 0 | auto msbs = _mm_set1_epi8(static_cast<char>(-128)); |
333 | 0 | auto x126 = _mm_set1_epi8(126); |
334 | | #ifdef ABSL_INTERNAL_HAVE_SSSE3 |
335 | | auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); |
336 | | #else |
337 | 0 | auto zero = _mm_setzero_si128(); |
338 | 0 | auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); |
339 | 0 | auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); |
340 | 0 | #endif |
341 | 0 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); |
342 | 0 | } |
343 | | |
344 | | __m128i ctrl; |
345 | | }; |
346 | | #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 |
347 | | |
348 | | #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
349 | | struct GroupAArch64Impl { |
350 | | static constexpr size_t kWidth = 8; |
351 | | using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3, |
352 | | /*NullifyBitsOnIteration=*/true>; |
353 | | using NonIterableBitMaskType = |
354 | | NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>; |
355 | | |
356 | | explicit GroupAArch64Impl(const ctrl_t* pos) { |
357 | | ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos)); |
358 | | } |
359 | | |
360 | | auto Match(h2_t hash) const { |
361 | | uint8x8_t dup = vdup_n_u8(hash); |
362 | | auto mask = vceq_u8(ctrl, dup); |
363 | | return BitMaskType(vget_lane_u64(vreinterpret_u64_u8(mask), 0)); |
364 | | } |
365 | | |
366 | | auto MaskEmpty() const { |
367 | | uint64_t mask = |
368 | | vget_lane_u64(vreinterpret_u64_u8(vceq_s8( |
369 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)), |
370 | | vreinterpret_s8_u8(ctrl))), |
371 | | 0); |
372 | | return NonIterableBitMaskType(mask); |
373 | | } |
374 | | |
375 | | // Returns a bitmask representing the positions of full slots. |
376 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
377 | | // original and mirrored. |
378 | | auto MaskFull() const { |
379 | | uint64_t mask = vget_lane_u64( |
380 | | vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl), |
381 | | vdup_n_s8(static_cast<int8_t>(0)))), |
382 | | 0); |
383 | | return BitMaskType(mask); |
384 | | } |
385 | | |
386 | | // Returns a bitmask representing the positions of non full slots. |
387 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
388 | | // It is useful in contexts when kSentinel is not present. |
389 | | auto MaskNonFull() const { |
390 | | uint64_t mask = vget_lane_u64( |
391 | | vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl), |
392 | | vdup_n_s8(static_cast<int8_t>(0)))), |
393 | | 0); |
394 | | return BitMaskType(mask); |
395 | | } |
396 | | |
397 | | auto MaskEmptyOrDeleted() const { |
398 | | uint64_t mask = |
399 | | vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( |
400 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)), |
401 | | vreinterpret_s8_u8(ctrl))), |
402 | | 0); |
403 | | return NonIterableBitMaskType(mask); |
404 | | } |
405 | | |
406 | | NonIterableBitMaskType MaskFullOrSentinel() const { |
407 | | uint64_t mask = vget_lane_u64( |
408 | | vreinterpret_u64_u8( |
409 | | vcgt_s8(vreinterpret_s8_u8(ctrl), |
410 | | vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel) - 1))), |
411 | | 0); |
412 | | return NonIterableBitMaskType(mask); |
413 | | } |
414 | | |
415 | | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
416 | | uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); |
417 | | constexpr uint64_t slsbs = 0x0202020202020202ULL; |
418 | | constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL; |
419 | | auto x = slsbs & (mask >> 6); |
420 | | auto res = (x + midbs) | kMsbs8Bytes; |
421 | | little_endian::Store64(dst, res); |
422 | | } |
423 | | |
424 | | uint8x8_t ctrl; |
425 | | }; |
426 | | #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN |
427 | | |
428 | | struct GroupPortableImpl { |
429 | | static constexpr size_t kWidth = 8; |
430 | | using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3, |
431 | | /*NullifyBitsOnIteration=*/false>; |
432 | | using NonIterableBitMaskType = |
433 | | NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>; |
434 | | |
435 | | explicit GroupPortableImpl(const ctrl_t* pos) |
436 | 0 | : ctrl(little_endian::Load64(pos)) {} |
437 | | |
438 | 0 | BitMaskType Match(h2_t hash) const { |
439 | 0 | // For the technique, see: |
440 | 0 | // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord |
441 | 0 | // (Determine if a word has a byte equal to n). |
442 | 0 | // |
443 | 0 | // Caveat: there are false positives but: |
444 | 0 | // - they only occur if there is a real match |
445 | 0 | // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel |
446 | 0 | // - they will be handled gracefully by subsequent checks in code |
447 | 0 | // |
448 | 0 | // Example: |
449 | 0 | // v = 0x1716151413121110 |
450 | 0 | // hash = 0x12 |
451 | 0 | // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 |
452 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
453 | 0 | auto x = ctrl ^ (lsbs * hash); |
454 | 0 | return BitMaskType((x - lsbs) & ~x & kMsbs8Bytes); |
455 | 0 | } |
456 | | |
457 | 0 | auto MaskEmpty() const { |
458 | 0 | return NonIterableBitMaskType((ctrl & ~(ctrl << 6)) & kMsbs8Bytes); |
459 | 0 | } |
460 | | |
461 | | // Returns a bitmask representing the positions of full slots. |
462 | | // Note: for `is_small()` tables group may contain the "same" slot twice: |
463 | | // original and mirrored. |
464 | 0 | auto MaskFull() const { |
465 | 0 | return BitMaskType((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes); |
466 | 0 | } |
467 | | |
468 | | // Returns a bitmask representing the positions of non full slots. |
469 | | // Note: this includes: kEmpty, kDeleted, kSentinel. |
470 | | // It is useful in contexts when kSentinel is not present. |
471 | 0 | auto MaskNonFull() const { return BitMaskType(ctrl & kMsbs8Bytes); } |
472 | | |
473 | 0 | auto MaskEmptyOrDeleted() const { |
474 | 0 | return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes); |
475 | 0 | } |
476 | | |
477 | 0 | auto MaskFullOrSentinel() const { |
478 | 0 | return NonIterableBitMaskType((~ctrl | (ctrl << 7)) & kMsbs8Bytes); |
479 | 0 | } |
480 | | |
481 | 0 | void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { |
482 | 0 | constexpr uint64_t lsbs = 0x0101010101010101ULL; |
483 | 0 | auto x = ctrl & kMsbs8Bytes; |
484 | 0 | auto res = (~x + (x >> 7)) & ~lsbs; |
485 | 0 | little_endian::Store64(dst, res); |
486 | 0 | } |
487 | | |
488 | | uint64_t ctrl; |
489 | | }; |
490 | | |
491 | | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
492 | | using Group = GroupSse2Impl; |
493 | | using GroupFullEmptyOrDeleted = GroupSse2Impl; |
494 | | #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) |
495 | | using Group = GroupAArch64Impl; |
496 | | // For Aarch64, we use the portable implementation for counting and masking |
497 | | // full, empty or deleted group elements. This is to avoid the latency of moving |
498 | | // between data GPRs and Neon registers when it does not provide a benefit. |
499 | | // Using Neon is profitable when we call Match(), but is not when we don't, |
500 | | // which is the case when we do *EmptyOrDeleted and MaskFull operations. |
501 | | // It is difficult to make a similar approach beneficial on other architectures |
502 | | // such as x86 since they have much lower GPR <-> vector register transfer |
503 | | // latency and 16-wide Groups. |
504 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
505 | | #else |
506 | | using Group = GroupPortableImpl; |
507 | | using GroupFullEmptyOrDeleted = GroupPortableImpl; |
508 | | #endif |
509 | | |
510 | | } // namespace container_internal |
511 | | ABSL_NAMESPACE_END |
512 | | } // namespace absl |
513 | | |
514 | | #undef ABSL_SWISSTABLE_ASSERT |
515 | | |
516 | | #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_ |