/src/abseil-cpp/absl/hash/internal/city.cc
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2018 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  | //  | 
15  |  | // This file provides CityHash64() and related functions.  | 
16  |  | //  | 
17  |  | // It's probably possible to create even faster hash functions by  | 
18  |  | // writing a program that systematically explores some of the space of  | 
19  |  | // possible hash functions, by using SIMD instructions, or by  | 
20  |  | // compromising on hash quality.  | 
21  |  |  | 
22  |  | #include "absl/hash/internal/city.h"  | 
23  |  |  | 
24  |  | #include <string.h>  // for memcpy and memset  | 
25  |  | #include <algorithm>  | 
26  |  |  | 
27  |  | #include "absl/base/config.h"  | 
28  |  | #include "absl/base/internal/endian.h"  | 
29  |  | #include "absl/base/internal/unaligned_access.h"  | 
30  |  | #include "absl/base/optimization.h"  | 
31  |  |  | 
32  |  | namespace absl { | 
33  |  | ABSL_NAMESPACE_BEGIN  | 
34  |  | namespace hash_internal { | 
35  |  |  | 
36  |  | #ifdef ABSL_IS_BIG_ENDIAN  | 
37  |  | #define uint32_in_expected_order(x) (absl::gbswap_32(x))  | 
38  |  | #define uint64_in_expected_order(x) (absl::gbswap_64(x))  | 
39  |  | #else  | 
40  | 0  | #define uint32_in_expected_order(x) (x)  | 
41  | 0  | #define uint64_in_expected_order(x) (x)  | 
42  |  | #endif  | 
43  |  |  | 
44  | 0  | static uint64_t Fetch64(const char *p) { | 
45  | 0  |   return uint64_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD64(p));  | 
46  | 0  | }  | 
47  |  |  | 
48  | 0  | static uint32_t Fetch32(const char *p) { | 
49  | 0  |   return uint32_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD32(p));  | 
50  | 0  | }  | 
51  |  |  | 
52  |  | // Some primes between 2^63 and 2^64 for various uses.  | 
53  |  | static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;  | 
54  |  | static const uint64_t k1 = 0xb492b66fbe98f273ULL;  | 
55  |  | static const uint64_t k2 = 0x9ae16a3b2f90404fULL;  | 
56  |  |  | 
57  |  | // Magic numbers for 32-bit hashing.  Copied from Murmur3.  | 
58  |  | static const uint32_t c1 = 0xcc9e2d51;  | 
59  |  | static const uint32_t c2 = 0x1b873593;  | 
60  |  |  | 
61  |  | // A 32-bit to 32-bit integer hash copied from Murmur3.  | 
62  | 0  | static uint32_t fmix(uint32_t h) { | 
63  | 0  |   h ^= h >> 16;  | 
64  | 0  |   h *= 0x85ebca6b;  | 
65  | 0  |   h ^= h >> 13;  | 
66  | 0  |   h *= 0xc2b2ae35;  | 
67  | 0  |   h ^= h >> 16;  | 
68  | 0  |   return h;  | 
69  | 0  | }  | 
70  |  |  | 
71  | 0  | static uint32_t Rotate32(uint32_t val, int shift) { | 
72  |  |   // Avoid shifting by 32: doing so yields an undefined result.  | 
73  | 0  |   return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));  | 
74  | 0  | }  | 
75  |  |  | 
76  |  | #undef PERMUTE3  | 
77  |  | #define PERMUTE3(a, b, c) \  | 
78  | 0  |   do {                    \ | 
79  | 0  |     std::swap(a, b);      \  | 
80  | 0  |     std::swap(a, c);      \  | 
81  | 0  |   } while (0)  | 
82  |  |  | 
83  | 0  | static uint32_t Mur(uint32_t a, uint32_t h) { | 
84  |  |   // Helper from Murmur3 for combining two 32-bit values.  | 
85  | 0  |   a *= c1;  | 
86  | 0  |   a = Rotate32(a, 17);  | 
87  | 0  |   a *= c2;  | 
88  | 0  |   h ^= a;  | 
89  | 0  |   h = Rotate32(h, 19);  | 
90  | 0  |   return h * 5 + 0xe6546b64;  | 
91  | 0  | }  | 
92  |  |  | 
93  | 0  | static uint32_t Hash32Len13to24(const char *s, size_t len) { | 
94  | 0  |   uint32_t a = Fetch32(s - 4 + (len >> 1));  | 
95  | 0  |   uint32_t b = Fetch32(s + 4);  | 
96  | 0  |   uint32_t c = Fetch32(s + len - 8);  | 
97  | 0  |   uint32_t d = Fetch32(s + (len >> 1));  | 
98  | 0  |   uint32_t e = Fetch32(s);  | 
99  | 0  |   uint32_t f = Fetch32(s + len - 4);  | 
100  | 0  |   uint32_t h = static_cast<uint32_t>(len);  | 
101  |  | 
  | 
102  | 0  |   return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));  | 
103  | 0  | }  | 
104  |  |  | 
105  | 0  | static uint32_t Hash32Len0to4(const char *s, size_t len) { | 
106  | 0  |   uint32_t b = 0;  | 
107  | 0  |   uint32_t c = 9;  | 
108  | 0  |   for (size_t i = 0; i < len; i++) { | 
109  | 0  |     signed char v = static_cast<signed char>(s[i]);  | 
110  | 0  |     b = b * c1 + static_cast<uint32_t>(v);  | 
111  | 0  |     c ^= b;  | 
112  | 0  |   }  | 
113  | 0  |   return fmix(Mur(b, Mur(static_cast<uint32_t>(len), c)));  | 
114  | 0  | }  | 
115  |  |  | 
116  | 0  | static uint32_t Hash32Len5to12(const char *s, size_t len) { | 
117  | 0  |   uint32_t a = static_cast<uint32_t>(len), b = a * 5, c = 9, d = b;  | 
118  | 0  |   a += Fetch32(s);  | 
119  | 0  |   b += Fetch32(s + len - 4);  | 
120  | 0  |   c += Fetch32(s + ((len >> 1) & 4));  | 
121  | 0  |   return fmix(Mur(c, Mur(b, Mur(a, d))));  | 
122  | 0  | }  | 
123  |  |  | 
124  | 0  | uint32_t CityHash32(const char *s, size_t len) { | 
125  | 0  |   if (len <= 24) { | 
126  | 0  |     return len <= 12  | 
127  | 0  |                ? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len))  | 
128  | 0  |                : Hash32Len13to24(s, len);  | 
129  | 0  |   }  | 
130  |  |  | 
131  |  |   // len > 24  | 
132  | 0  |   uint32_t h = static_cast<uint32_t>(len), g = c1 * h, f = g;  | 
133  |  | 
  | 
134  | 0  |   uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;  | 
135  | 0  |   uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;  | 
136  | 0  |   uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;  | 
137  | 0  |   uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;  | 
138  | 0  |   uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;  | 
139  | 0  |   h ^= a0;  | 
140  | 0  |   h = Rotate32(h, 19);  | 
141  | 0  |   h = h * 5 + 0xe6546b64;  | 
142  | 0  |   h ^= a2;  | 
143  | 0  |   h = Rotate32(h, 19);  | 
144  | 0  |   h = h * 5 + 0xe6546b64;  | 
145  | 0  |   g ^= a1;  | 
146  | 0  |   g = Rotate32(g, 19);  | 
147  | 0  |   g = g * 5 + 0xe6546b64;  | 
148  | 0  |   g ^= a3;  | 
149  | 0  |   g = Rotate32(g, 19);  | 
150  | 0  |   g = g * 5 + 0xe6546b64;  | 
151  | 0  |   f += a4;  | 
152  | 0  |   f = Rotate32(f, 19);  | 
153  | 0  |   f = f * 5 + 0xe6546b64;  | 
154  | 0  |   size_t iters = (len - 1) / 20;  | 
155  | 0  |   do { | 
156  | 0  |     uint32_t b0 = Rotate32(Fetch32(s) * c1, 17) * c2;  | 
157  | 0  |     uint32_t b1 = Fetch32(s + 4);  | 
158  | 0  |     uint32_t b2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;  | 
159  | 0  |     uint32_t b3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;  | 
160  | 0  |     uint32_t b4 = Fetch32(s + 16);  | 
161  | 0  |     h ^= b0;  | 
162  | 0  |     h = Rotate32(h, 18);  | 
163  | 0  |     h = h * 5 + 0xe6546b64;  | 
164  | 0  |     f += b1;  | 
165  | 0  |     f = Rotate32(f, 19);  | 
166  | 0  |     f = f * c1;  | 
167  | 0  |     g += b2;  | 
168  | 0  |     g = Rotate32(g, 18);  | 
169  | 0  |     g = g * 5 + 0xe6546b64;  | 
170  | 0  |     h ^= b3 + b1;  | 
171  | 0  |     h = Rotate32(h, 19);  | 
172  | 0  |     h = h * 5 + 0xe6546b64;  | 
173  | 0  |     g ^= b4;  | 
174  | 0  |     g = absl::gbswap_32(g) * 5;  | 
175  | 0  |     h += b4 * 5;  | 
176  | 0  |     h = absl::gbswap_32(h);  | 
177  | 0  |     f += b0;  | 
178  | 0  |     PERMUTE3(f, h, g);  | 
179  | 0  |     s += 20;  | 
180  | 0  |   } while (--iters != 0);  | 
181  | 0  |   g = Rotate32(g, 11) * c1;  | 
182  | 0  |   g = Rotate32(g, 17) * c1;  | 
183  | 0  |   f = Rotate32(f, 11) * c1;  | 
184  | 0  |   f = Rotate32(f, 17) * c1;  | 
185  | 0  |   h = Rotate32(h + g, 19);  | 
186  | 0  |   h = h * 5 + 0xe6546b64;  | 
187  | 0  |   h = Rotate32(h, 17) * c1;  | 
188  | 0  |   h = Rotate32(h + f, 19);  | 
189  | 0  |   h = h * 5 + 0xe6546b64;  | 
190  | 0  |   h = Rotate32(h, 17) * c1;  | 
191  | 0  |   return h;  | 
192  | 0  | }  | 
193  |  |  | 
194  |  | // Bitwise right rotate.  Normally this will compile to a single  | 
195  |  | // instruction, especially if the shift is a manifest constant.  | 
196  | 0  | static uint64_t Rotate(uint64_t val, int shift) { | 
197  |  |   // Avoid shifting by 64: doing so yields an undefined result.  | 
198  | 0  |   return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));  | 
199  | 0  | }  | 
200  |  |  | 
201  | 0  | static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); } | 
202  |  |  | 
203  | 0  | static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) { | 
204  |  |   // Murmur-inspired hashing.  | 
205  | 0  |   uint64_t a = (u ^ v) * mul;  | 
206  | 0  |   a ^= (a >> 47);  | 
207  | 0  |   uint64_t b = (v ^ a) * mul;  | 
208  | 0  |   b ^= (b >> 47);  | 
209  | 0  |   b *= mul;  | 
210  | 0  |   return b;  | 
211  | 0  | }  | 
212  |  |  | 
213  | 0  | static uint64_t HashLen16(uint64_t u, uint64_t v) { | 
214  | 0  |   const uint64_t kMul = 0x9ddfea08eb382d69ULL;  | 
215  | 0  |   return HashLen16(u, v, kMul);  | 
216  | 0  | }  | 
217  |  |  | 
218  | 0  | static uint64_t HashLen0to16(const char *s, size_t len) { | 
219  | 0  |   if (len >= 8) { | 
220  | 0  |     uint64_t mul = k2 + len * 2;  | 
221  | 0  |     uint64_t a = Fetch64(s) + k2;  | 
222  | 0  |     uint64_t b = Fetch64(s + len - 8);  | 
223  | 0  |     uint64_t c = Rotate(b, 37) * mul + a;  | 
224  | 0  |     uint64_t d = (Rotate(a, 25) + b) * mul;  | 
225  | 0  |     return HashLen16(c, d, mul);  | 
226  | 0  |   }  | 
227  | 0  |   if (len >= 4) { | 
228  | 0  |     uint64_t mul = k2 + len * 2;  | 
229  | 0  |     uint64_t a = Fetch32(s);  | 
230  | 0  |     return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);  | 
231  | 0  |   }  | 
232  | 0  |   if (len > 0) { | 
233  | 0  |     uint8_t a = static_cast<uint8_t>(s[0]);  | 
234  | 0  |     uint8_t b = static_cast<uint8_t>(s[len >> 1]);  | 
235  | 0  |     uint8_t c = static_cast<uint8_t>(s[len - 1]);  | 
236  | 0  |     uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);  | 
237  | 0  |     uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);  | 
238  | 0  |     return ShiftMix(y * k2 ^ z * k0) * k2;  | 
239  | 0  |   }  | 
240  | 0  |   return k2;  | 
241  | 0  | }  | 
242  |  |  | 
243  |  | // This probably works well for 16-byte strings as well, but it may be overkill  | 
244  |  | // in that case.  | 
245  | 0  | static uint64_t HashLen17to32(const char *s, size_t len) { | 
246  | 0  |   uint64_t mul = k2 + len * 2;  | 
247  | 0  |   uint64_t a = Fetch64(s) * k1;  | 
248  | 0  |   uint64_t b = Fetch64(s + 8);  | 
249  | 0  |   uint64_t c = Fetch64(s + len - 8) * mul;  | 
250  | 0  |   uint64_t d = Fetch64(s + len - 16) * k2;  | 
251  | 0  |   return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,  | 
252  | 0  |                    a + Rotate(b + k2, 18) + c, mul);  | 
253  | 0  | }  | 
254  |  |  | 
255  |  | // Return a 16-byte hash for 48 bytes.  Quick and dirty.  | 
256  |  | // Callers do best to use "random-looking" values for a and b.  | 
257  |  | static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(  | 
258  | 0  |     uint64_t w, uint64_t x, uint64_t y, uint64_t z, uint64_t a, uint64_t b) { | 
259  | 0  |   a += w;  | 
260  | 0  |   b = Rotate(b + a + z, 21);  | 
261  | 0  |   uint64_t c = a;  | 
262  | 0  |   a += x;  | 
263  | 0  |   a += y;  | 
264  | 0  |   b += Rotate(a, 44);  | 
265  | 0  |   return std::make_pair(a + z, b + c);  | 
266  | 0  | }  | 
267  |  |  | 
268  |  | // Return a 16-byte hash for s[0] ... s[31], a, and b.  Quick and dirty.  | 
269  |  | static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s,  | 
270  |  |                                                             uint64_t a,  | 
271  | 0  |                                                             uint64_t b) { | 
272  | 0  |   return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),  | 
273  | 0  |                                 Fetch64(s + 24), a, b);  | 
274  | 0  | }  | 
275  |  |  | 
276  |  | // Return an 8-byte hash for 33 to 64 bytes.  | 
277  | 0  | static uint64_t HashLen33to64(const char *s, size_t len) { | 
278  | 0  |   uint64_t mul = k2 + len * 2;  | 
279  | 0  |   uint64_t a = Fetch64(s) * k2;  | 
280  | 0  |   uint64_t b = Fetch64(s + 8);  | 
281  | 0  |   uint64_t c = Fetch64(s + len - 24);  | 
282  | 0  |   uint64_t d = Fetch64(s + len - 32);  | 
283  | 0  |   uint64_t e = Fetch64(s + 16) * k2;  | 
284  | 0  |   uint64_t f = Fetch64(s + 24) * 9;  | 
285  | 0  |   uint64_t g = Fetch64(s + len - 8);  | 
286  | 0  |   uint64_t h = Fetch64(s + len - 16) * mul;  | 
287  | 0  |   uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;  | 
288  | 0  |   uint64_t v = ((a + g) ^ d) + f + 1;  | 
289  | 0  |   uint64_t w = absl::gbswap_64((u + v) * mul) + h;  | 
290  | 0  |   uint64_t x = Rotate(e + f, 42) + c;  | 
291  | 0  |   uint64_t y = (absl::gbswap_64((v + w) * mul) + g) * mul;  | 
292  | 0  |   uint64_t z = e + f + c;  | 
293  | 0  |   a = absl::gbswap_64((x + z) * mul + y) + b;  | 
294  | 0  |   b = ShiftMix((z + a) * mul + d + h) * mul;  | 
295  | 0  |   return b + x;  | 
296  | 0  | }  | 
297  |  |  | 
298  | 0  | uint64_t CityHash64(const char *s, size_t len) { | 
299  | 0  |   if (len <= 32) { | 
300  | 0  |     if (len <= 16) { | 
301  | 0  |       return HashLen0to16(s, len);  | 
302  | 0  |     } else { | 
303  | 0  |       return HashLen17to32(s, len);  | 
304  | 0  |     }  | 
305  | 0  |   } else if (len <= 64) { | 
306  | 0  |     return HashLen33to64(s, len);  | 
307  | 0  |   }  | 
308  |  |  | 
309  |  |   // For strings over 64 bytes we hash the end first, and then as we  | 
310  |  |   // loop we keep 56 bytes of state: v, w, x, y, and z.  | 
311  | 0  |   uint64_t x = Fetch64(s + len - 40);  | 
312  | 0  |   uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);  | 
313  | 0  |   uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));  | 
314  | 0  |   std::pair<uint64_t, uint64_t> v =  | 
315  | 0  |       WeakHashLen32WithSeeds(s + len - 64, len, z);  | 
316  | 0  |   std::pair<uint64_t, uint64_t> w =  | 
317  | 0  |       WeakHashLen32WithSeeds(s + len - 32, y + k1, x);  | 
318  | 0  |   x = x * k1 + Fetch64(s);  | 
319  |  |  | 
320  |  |   // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.  | 
321  | 0  |   len = (len - 1) & ~static_cast<size_t>(63);  | 
322  | 0  |   do { | 
323  | 0  |     x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;  | 
324  | 0  |     y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;  | 
325  | 0  |     x ^= w.second;  | 
326  | 0  |     y += v.first + Fetch64(s + 40);  | 
327  | 0  |     z = Rotate(z + w.first, 33) * k1;  | 
328  | 0  |     v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);  | 
329  | 0  |     w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));  | 
330  | 0  |     std::swap(z, x);  | 
331  | 0  |     s += 64;  | 
332  | 0  |     len -= 64;  | 
333  | 0  |   } while (len != 0);  | 
334  | 0  |   return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,  | 
335  | 0  |                    HashLen16(v.second, w.second) + x);  | 
336  | 0  | }  | 
337  |  |  | 
338  | 0  | uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) { | 
339  | 0  |   return CityHash64WithSeeds(s, len, k2, seed);  | 
340  | 0  | }  | 
341  |  |  | 
342  |  | uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,  | 
343  | 0  |                              uint64_t seed1) { | 
344  | 0  |   return HashLen16(CityHash64(s, len) - seed0, seed1);  | 
345  | 0  | }  | 
346  |  |  | 
347  |  | }  // namespace hash_internal  | 
348  |  | ABSL_NAMESPACE_END  | 
349  |  | }  // namespace absl  |