/src/qtbase/src/3rdparty/siphash/siphash.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (C) 2012-2014 Jean-Philippe Aumasson |
2 | | // Copyright (C) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to> |
3 | | // Copyright (C) 2016 Intel Corporation. |
4 | | // SPDX-License-Identifier: CC0-1.0 |
5 | | |
6 | | #include <QtCore/qassert.h> |
7 | | #include <QtCore/qcompilerdetection.h> |
8 | | #include <QtCore/qendian.h> |
9 | | |
10 | | #ifdef Q_CC_GNU |
11 | | # define DECL_HOT_FUNCTION __attribute__((hot)) |
12 | | #else |
13 | | # define DECL_HOT_FUNCTION |
14 | | #endif |
15 | | |
16 | | #include <cstdint> |
17 | | |
18 | | QT_USE_NAMESPACE |
19 | | |
20 | | namespace { |
21 | | |
22 | | // This is an inlined version of the SipHash implementation that is |
23 | | // trying to avoid some memcpy's from uint64 to uint8[] and back. |
24 | | |
25 | 0 | #define ROTL(x, b) (((x) << (b)) | ((x) >> (sizeof(x) * 8 - (b)))) |
26 | | |
27 | 0 | #define SIPROUND \ |
28 | 0 | do { \ |
29 | 0 | v0 += v1; \ |
30 | 0 | v1 = ROTL(v1, 13); \ |
31 | 0 | v1 ^= v0; \ |
32 | 0 | v0 = ROTL(v0, 32); \ |
33 | 0 | v2 += v3; \ |
34 | 0 | v3 = ROTL(v3, 16); \ |
35 | 0 | v3 ^= v2; \ |
36 | 0 | v0 += v3; \ |
37 | 0 | v3 = ROTL(v3, 21); \ |
38 | 0 | v3 ^= v0; \ |
39 | 0 | v2 += v1; \ |
40 | 0 | v1 = ROTL(v1, 17); \ |
41 | 0 | v1 ^= v2; \ |
42 | 0 | v2 = ROTL(v2, 32); \ |
43 | 0 | } while (0) |
44 | | |
45 | | template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash64 |
46 | | { |
47 | | /* "somepseudorandomlygeneratedbytes" */ |
48 | | uint64_t v0 = 0x736f6d6570736575ULL; |
49 | | uint64_t v1 = 0x646f72616e646f6dULL; |
50 | | uint64_t v2 = 0x6c7967656e657261ULL; |
51 | | uint64_t v3 = 0x7465646279746573ULL; |
52 | | uint64_t b; |
53 | | uint64_t k0; |
54 | | uint64_t k1; |
55 | | |
56 | | inline SipHash64(uint64_t fulllen, uint64_t seed, uint64_t seed2); |
57 | | inline void addBlock(const uint8_t *in, size_t inlen); |
58 | | inline uint64_t finalize(const uint8_t *in, size_t left); |
59 | | }; |
60 | | |
61 | | template <int cROUNDS, int dROUNDS> |
62 | | SipHash64<cROUNDS, dROUNDS>::SipHash64(uint64_t inlen, uint64_t seed, uint64_t seed2) |
63 | 0 | { |
64 | 0 | b = inlen << 56; |
65 | 0 | k0 = seed; |
66 | 0 | k1 = seed2; |
67 | 0 | v3 ^= k1; |
68 | 0 | v2 ^= k0; |
69 | 0 | v1 ^= k1; |
70 | 0 | v0 ^= k0; |
71 | 0 | } |
72 | | |
73 | | template <int cROUNDS, int dROUNDS> DECL_HOT_FUNCTION void |
74 | | SipHash64<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen) |
75 | 0 | { |
76 | 0 | Q_ASSERT((inlen & 7ULL) == 0); |
77 | 0 | int i; |
78 | 0 | const uint8_t *end = in + inlen; |
79 | 0 | for (; in != end; in += 8) { |
80 | 0 | uint64_t m = qFromUnaligned<uint64_t>(in); |
81 | 0 | v3 ^= m; |
82 | |
|
83 | 0 | for (i = 0; i < cROUNDS; ++i) |
84 | 0 | SIPROUND; |
85 | |
|
86 | 0 | v0 ^= m; |
87 | 0 | } |
88 | 0 | } |
89 | | |
90 | | template <int cROUNDS, int dROUNDS> DECL_HOT_FUNCTION uint64_t |
91 | | SipHash64<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left) |
92 | 0 | { |
93 | 0 | int i; |
94 | 0 | switch (left) { |
95 | 0 | case 7: |
96 | 0 | b |= ((uint64_t)in[6]) << 48; |
97 | 0 | Q_FALLTHROUGH(); |
98 | 0 | case 6: |
99 | 0 | b |= ((uint64_t)in[5]) << 40; |
100 | 0 | Q_FALLTHROUGH(); |
101 | 0 | case 5: |
102 | 0 | b |= ((uint64_t)in[4]) << 32; |
103 | 0 | Q_FALLTHROUGH(); |
104 | 0 | case 4: |
105 | 0 | b |= ((uint64_t)in[3]) << 24; |
106 | 0 | Q_FALLTHROUGH(); |
107 | 0 | case 3: |
108 | 0 | b |= ((uint64_t)in[2]) << 16; |
109 | 0 | Q_FALLTHROUGH(); |
110 | 0 | case 2: |
111 | 0 | b |= ((uint64_t)in[1]) << 8; |
112 | 0 | Q_FALLTHROUGH(); |
113 | 0 | case 1: |
114 | 0 | b |= ((uint64_t)in[0]); |
115 | 0 | break; |
116 | 0 | case 0: |
117 | 0 | break; |
118 | 0 | } |
119 | | |
120 | 0 | v3 ^= b; |
121 | |
|
122 | 0 | for (i = 0; i < cROUNDS; ++i) |
123 | 0 | SIPROUND; |
124 | |
|
125 | 0 | v0 ^= b; |
126 | |
|
127 | 0 | v2 ^= 0xff; |
128 | |
|
129 | 0 | for (i = 0; i < dROUNDS; ++i) |
130 | 0 | SIPROUND; |
131 | |
|
132 | 0 | b = v0 ^ v1 ^ v2 ^ v3; |
133 | 0 | return b; |
134 | 0 | } |
135 | | #undef SIPROUND |
136 | | |
137 | | // This is a "SipHash" implementation adopted for 32bit platforms. It performs |
138 | | // basically the same operations as the 64bit version using 4 byte at a time |
139 | | // instead of 8. |
140 | | // |
141 | | // To make this work, we also need to change the constants for the mixing |
142 | | // rotations in ROTL. We're simply using half of the 64bit constants, rounded up |
143 | | // for odd numbers. |
144 | | // |
145 | | // For the v0-v4 constants, simply use the first four bytes of the 64 bit versions. |
146 | | // |
147 | | |
148 | | #define SIPROUND \ |
149 | | do { \ |
150 | | v0 += v1; \ |
151 | | v1 = ROTL(v1, 7); \ |
152 | | v1 ^= v0; \ |
153 | | v0 = ROTL(v0, 16); \ |
154 | | v2 += v3; \ |
155 | | v3 = ROTL(v3, 8); \ |
156 | | v3 ^= v2; \ |
157 | | v0 += v3; \ |
158 | | v3 = ROTL(v3, 11); \ |
159 | | v3 ^= v0; \ |
160 | | v2 += v1; \ |
161 | | v1 = ROTL(v1, 9); \ |
162 | | v1 ^= v2; \ |
163 | | v2 = ROTL(v2, 16); \ |
164 | | } while (0) |
165 | | |
166 | | template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash32 |
167 | | { |
168 | | /* "somepseudorandomlygeneratedbytes" */ |
169 | | uint v0 = 0x736f6d65U; |
170 | | uint v1 = 0x646f7261U; |
171 | | uint v2 = 0x6c796765U; |
172 | | uint v3 = 0x74656462U; |
173 | | uint b; |
174 | | uint k0; |
175 | | uint k1; |
176 | | |
177 | | inline SipHash32(size_t fulllen, uint seed, uint seed2); |
178 | | inline void addBlock(const uint8_t *in, size_t inlen); |
179 | | inline uint finalize(const uint8_t *in, size_t left); |
180 | | }; |
181 | | |
182 | | template <int cROUNDS, int dROUNDS> inline |
183 | | SipHash32<cROUNDS, dROUNDS>::SipHash32(size_t inlen, uint seed, uint seed2) |
184 | | { |
185 | | uint k0 = seed; |
186 | | uint k1 = seed2; |
187 | | b = inlen << 24; |
188 | | v3 ^= k1; |
189 | | v2 ^= k0; |
190 | | v1 ^= k1; |
191 | | v0 ^= k0; |
192 | | } |
193 | | |
194 | | template <int cROUNDS, int dROUNDS> inline DECL_HOT_FUNCTION void |
195 | | SipHash32<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen) |
196 | | { |
197 | | Q_ASSERT((inlen & 3ULL) == 0); |
198 | | int i; |
199 | | const uint8_t *end = in + inlen; |
200 | | for (; in != end; in += 4) { |
201 | | uint m = qFromUnaligned<uint>(in); |
202 | | v3 ^= m; |
203 | | |
204 | | for (i = 0; i < cROUNDS; ++i) |
205 | | SIPROUND; |
206 | | |
207 | | v0 ^= m; |
208 | | } |
209 | | } |
210 | | |
211 | | template <int cROUNDS, int dROUNDS> inline DECL_HOT_FUNCTION uint |
212 | | SipHash32<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left) |
213 | | { |
214 | | int i; |
215 | | switch (left) { |
216 | | case 3: |
217 | | b |= ((uint)in[2]) << 16; |
218 | | Q_FALLTHROUGH(); |
219 | | case 2: |
220 | | b |= ((uint)in[1]) << 8; |
221 | | Q_FALLTHROUGH(); |
222 | | case 1: |
223 | | b |= ((uint)in[0]); |
224 | | break; |
225 | | case 0: |
226 | | break; |
227 | | } |
228 | | |
229 | | v3 ^= b; |
230 | | |
231 | | for (i = 0; i < cROUNDS; ++i) |
232 | | SIPROUND; |
233 | | |
234 | | v0 ^= b; |
235 | | |
236 | | v2 ^= 0xff; |
237 | | |
238 | | for (i = 0; i < dROUNDS; ++i) |
239 | | SIPROUND; |
240 | | |
241 | | b = v0 ^ v1 ^ v2 ^ v3; |
242 | | return b; |
243 | | } |
244 | | #undef SIPROUND |
245 | | #undef ROTL |
246 | | |
247 | | // Use SipHash-1-2, which has similar performance characteristics as |
248 | | // Qt 4's hash implementation, instead of the SipHash-2-4 default |
249 | | template <int cROUNDS = 1, int dROUNDS = 2> |
250 | | using SipHash = std::conditional_t<sizeof(void *) == 8, |
251 | | SipHash64<cROUNDS, dROUNDS>, SipHash32<cROUNDS, dROUNDS>>; |
252 | | |
253 | | } // unnamed namespace |
254 | | |
255 | | Q_NEVER_INLINE DECL_HOT_FUNCTION |
256 | | static size_t siphash(const uint8_t *in, size_t inlen, size_t seed, size_t seed2) |
257 | 0 | { |
258 | 0 | constexpr size_t TailSizeMask = sizeof(void *) - 1; |
259 | 0 | SipHash<> hasher(inlen, seed, seed2); |
260 | 0 | hasher.addBlock(in, inlen & ~TailSizeMask); |
261 | 0 | return hasher.finalize(in + (inlen & ~TailSizeMask), inlen & TailSizeMask); |
262 | 0 | } |