Line | Count | Source (jump to first uncovered line) |
1 | | #include <string.h> |
2 | | #include <stdio.h> |
3 | | #include "siphash.h" |
4 | | #ifndef SIP_HASH_STREAMING |
5 | | #define SIP_HASH_STREAMING 1 |
6 | | #endif |
7 | | |
8 | | #if defined(__MINGW32__) |
9 | | #include <sys/param.h> |
10 | | |
11 | | /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */ |
12 | | #define __LITTLE_ENDIAN LITTLE_ENDIAN |
13 | | #define __BIG_ENDIAN BIG_ENDIAN |
14 | | #elif defined(_WIN32) |
15 | | #define BYTE_ORDER __LITTLE_ENDIAN |
16 | | #elif !defined(BYTE_ORDER) |
17 | | #include <endian.h> |
18 | | #endif |
19 | | |
20 | | #ifndef LITTLE_ENDIAN |
21 | | #define LITTLE_ENDIAN __LITTLE_ENDIAN |
22 | | #endif |
23 | | #ifndef BIG_ENDIAN |
24 | | #define BIG_ENDIAN __BIG_ENDIAN |
25 | | #endif |
26 | | |
27 | | #if BYTE_ORDER == LITTLE_ENDIAN |
28 | | #define lo u32[0] |
29 | | #define hi u32[1] |
30 | | #elif BYTE_ORDER == BIG_ENDIAN |
31 | | #define hi u32[0] |
32 | | #define lo u32[1] |
33 | | #else |
34 | | #error "Only strictly little or big endian supported" |
35 | | #endif |
36 | | |
37 | | /* __POWERPC__ added to accommodate Darwin case. */ |
38 | | #ifndef UNALIGNED_WORD_ACCESS |
39 | | # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ |
40 | | defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \ |
41 | | defined(__powerpc64__) || defined(__POWERPC__) || defined(__aarch64__) || \ |
42 | | defined(__mc68020__) |
43 | | # define UNALIGNED_WORD_ACCESS 1 |
44 | | # endif |
45 | | #endif |
46 | | #ifndef UNALIGNED_WORD_ACCESS |
47 | | # define UNALIGNED_WORD_ACCESS 0 |
48 | | #endif |
49 | | |
50 | | #define U8TO32_LE(p) \ |
51 | 0 | (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \ |
52 | 0 | ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \ |
53 | | |
54 | | #define U32TO8_LE(p, v) \ |
55 | | do { \ |
56 | | (p)[0] = (uint8_t)((v) ); \ |
57 | | (p)[1] = (uint8_t)((v) >> 8); \ |
58 | | (p)[2] = (uint8_t)((v) >> 16); \ |
59 | | (p)[3] = (uint8_t)((v) >> 24); \ |
60 | | } while (0) |
61 | | |
62 | | #ifdef HAVE_UINT64_T |
63 | | #define U8TO64_LE(p) \ |
64 | 0 | ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 ) |
65 | | |
66 | | #define U64TO8_LE(p, v) \ |
67 | | do { \ |
68 | | U32TO8_LE((p), (uint32_t)((v) )); \ |
69 | | U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \ |
70 | | } while (0) |
71 | | |
72 | | #define ROTL64(v, s) \ |
73 | 0 | ((v) << (s)) | ((v) >> (64 - (s))) |
74 | | |
75 | 0 | #define ROTL64_TO(v, s) ((v) = ROTL64((v), (s))) |
76 | | |
77 | 0 | #define ADD64_TO(v, s) ((v) += (s)) |
78 | 0 | #define XOR64_TO(v, s) ((v) ^= (s)) |
79 | 0 | #define XOR64_INT(v, x) ((v) ^= (x)) |
80 | | #else |
81 | | #define U8TO64_LE(p) u8to64_le(p) |
82 | | static inline uint64_t |
83 | | u8to64_le(const uint8_t *p) |
84 | | { |
85 | | uint64_t ret; |
86 | | ret.lo = U8TO32_LE(p); |
87 | | ret.hi = U8TO32_LE(p + 4); |
88 | | return ret; |
89 | | } |
90 | | |
91 | | #define U64TO8_LE(p, v) u64to8_le(p, v) |
92 | | static inline void |
93 | | u64to8_le(uint8_t *p, uint64_t v) |
94 | | { |
95 | | U32TO8_LE(p, v.lo); |
96 | | U32TO8_LE(p + 4, v.hi); |
97 | | } |
98 | | |
99 | | #define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \ |
100 | | (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s))) |
101 | | static inline uint64_t * |
102 | | rotl64_to(uint64_t *v, unsigned int s) |
103 | | { |
104 | | uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s)); |
105 | | uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s)); |
106 | | v->hi = uhi; |
107 | | v->lo = ulo; |
108 | | return v; |
109 | | } |
110 | | |
111 | | static inline uint64_t * |
112 | | rotl64_swap(uint64_t *v) |
113 | | { |
114 | | uint32_t t = v->lo; |
115 | | v->lo = v->hi; |
116 | | v->hi = t; |
117 | | return v; |
118 | | } |
119 | | |
120 | | #define ADD64_TO(v, s) add64_to(&(v), (s)) |
121 | | static inline uint64_t * |
122 | | add64_to(uint64_t *v, const uint64_t s) |
123 | | { |
124 | | v->lo += s.lo; |
125 | | v->hi += s.hi; |
126 | | if (v->lo < s.lo) v->hi++; |
127 | | return v; |
128 | | } |
129 | | |
130 | | #define XOR64_TO(v, s) xor64_to(&(v), (s)) |
131 | | static inline uint64_t * |
132 | | xor64_to(uint64_t *v, const uint64_t s) |
133 | | { |
134 | | v->lo ^= s.lo; |
135 | | v->hi ^= s.hi; |
136 | | return v; |
137 | | } |
138 | | |
139 | | #define XOR64_INT(v, x) ((v).lo ^= (x)) |
140 | | #endif |
141 | | |
142 | | static const union { |
143 | | char bin[32]; |
144 | | uint64_t u64[4]; |
145 | | } sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"}; |
146 | | #define sip_init_state sip_init_state_bin.u64 |
147 | | |
148 | | #if SIP_HASH_STREAMING |
149 | | struct sip_interface_st { |
150 | | void (*init)(sip_state *s, const uint8_t *key); |
151 | | void (*update)(sip_state *s, const uint8_t *data, size_t len); |
152 | | void (*final)(sip_state *s, uint64_t *digest); |
153 | | }; |
154 | | |
155 | | static void int_sip_init(sip_state *state, const uint8_t *key); |
156 | | static void int_sip_update(sip_state *state, const uint8_t *data, size_t len); |
157 | | static void int_sip_final(sip_state *state, uint64_t *digest); |
158 | | |
159 | | static const sip_interface sip_methods = { |
160 | | int_sip_init, |
161 | | int_sip_update, |
162 | | int_sip_final |
163 | | }; |
164 | | #endif /* SIP_HASH_STREAMING */ |
165 | | |
166 | 0 | #define SIP_COMPRESS(v0, v1, v2, v3) \ |
167 | 0 | do { \ |
168 | 0 | ADD64_TO((v0), (v1)); \ |
169 | 0 | ADD64_TO((v2), (v3)); \ |
170 | 0 | ROTL64_TO((v1), 13); \ |
171 | 0 | ROTL64_TO((v3), 16); \ |
172 | 0 | XOR64_TO((v1), (v0)); \ |
173 | 0 | XOR64_TO((v3), (v2)); \ |
174 | 0 | ROTL64_TO((v0), 32); \ |
175 | 0 | ADD64_TO((v2), (v1)); \ |
176 | 0 | ADD64_TO((v0), (v3)); \ |
177 | 0 | ROTL64_TO((v1), 17); \ |
178 | 0 | ROTL64_TO((v3), 21); \ |
179 | 0 | XOR64_TO((v1), (v2)); \ |
180 | 0 | XOR64_TO((v3), (v0)); \ |
181 | 0 | ROTL64_TO((v2), 32); \ |
182 | 0 | } while(0) |
183 | | |
184 | | #if SIP_HASH_STREAMING |
185 | | static void |
186 | | int_sip_dump(sip_state *state) |
187 | | { |
188 | | int v; |
189 | | |
190 | | for (v = 0; v < 4; v++) { |
191 | | #ifdef HAVE_UINT64_T |
192 | | printf("v%d: %" PRIx64 "\n", v, state->v[v]); |
193 | | #else |
194 | | printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo); |
195 | | #endif |
196 | | } |
197 | | } |
198 | | |
199 | | static void |
200 | | int_sip_init(sip_state *state, const uint8_t key[16]) |
201 | | { |
202 | | uint64_t k0, k1; |
203 | | |
204 | | k0 = U8TO64_LE(key); |
205 | | k1 = U8TO64_LE(key + sizeof(uint64_t)); |
206 | | |
207 | | state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]); |
208 | | state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]); |
209 | | state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]); |
210 | | state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]); |
211 | | } |
212 | | |
213 | | static inline void |
214 | | int_sip_round(sip_state *state, int n) |
215 | | { |
216 | | int i; |
217 | | |
218 | | for (i = 0; i < n; i++) { |
219 | | SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]); |
220 | | } |
221 | | } |
222 | | |
223 | | static inline void |
224 | | int_sip_update_block(sip_state *state, uint64_t m) |
225 | | { |
226 | | XOR64_TO(state->v[3], m); |
227 | | int_sip_round(state, state->c); |
228 | | XOR64_TO(state->v[0], m); |
229 | | } |
230 | | |
231 | | static inline void |
232 | | int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen) |
233 | | { |
234 | | int to_read; |
235 | | uint64_t m; |
236 | | |
237 | | if (!state->buflen) return; |
238 | | |
239 | | to_read = sizeof(uint64_t) - state->buflen; |
240 | | memcpy(state->buf + state->buflen, *pdata, to_read); |
241 | | m = U8TO64_LE(state->buf); |
242 | | int_sip_update_block(state, m); |
243 | | *pdata += to_read; |
244 | | *plen -= to_read; |
245 | | state->buflen = 0; |
246 | | } |
247 | | |
248 | | static inline void |
249 | | int_sip_post_update(sip_state *state, const uint8_t *data, size_t len) |
250 | | { |
251 | | uint8_t r = len % sizeof(uint64_t); |
252 | | if (r) { |
253 | | memcpy(state->buf, data + len - r, r); |
254 | | state->buflen = r; |
255 | | } |
256 | | } |
257 | | |
258 | | static void |
259 | | int_sip_update(sip_state *state, const uint8_t *data, size_t len) |
260 | | { |
261 | | uint64_t *end; |
262 | | uint64_t *data64; |
263 | | |
264 | | state->msglen_byte = state->msglen_byte + (len % 256); |
265 | | data64 = (uint64_t *) data; |
266 | | |
267 | | int_sip_pre_update(state, &data, &len); |
268 | | |
269 | | end = data64 + (len / sizeof(uint64_t)); |
270 | | |
271 | | #if BYTE_ORDER == LITTLE_ENDIAN |
272 | | while (data64 != end) { |
273 | | int_sip_update_block(state, *data64++); |
274 | | } |
275 | | #elif BYTE_ORDER == BIG_ENDIAN |
276 | | { |
277 | | uint64_t m; |
278 | | uint8_t *data8 = data; |
279 | | for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) { |
280 | | m = U8TO64_LE(data8); |
281 | | int_sip_update_block(state, m); |
282 | | } |
283 | | } |
284 | | #endif |
285 | | |
286 | | int_sip_post_update(state, data, len); |
287 | | } |
288 | | |
289 | | static inline void |
290 | | int_sip_pad_final_block(sip_state *state) |
291 | | { |
292 | | int i; |
293 | | /* pad with 0's and finalize with msg_len mod 256 */ |
294 | | for (i = state->buflen; i < sizeof(uint64_t); i++) { |
295 | | state->buf[i] = 0x00; |
296 | | } |
297 | | state->buf[sizeof(uint64_t) - 1] = state->msglen_byte; |
298 | | } |
299 | | |
300 | | static void |
301 | | int_sip_final(sip_state *state, uint64_t *digest) |
302 | | { |
303 | | uint64_t m; |
304 | | |
305 | | int_sip_pad_final_block(state); |
306 | | |
307 | | m = U8TO64_LE(state->buf); |
308 | | int_sip_update_block(state, m); |
309 | | |
310 | | XOR64_INT(state->v[2], 0xff); |
311 | | |
312 | | int_sip_round(state, state->d); |
313 | | |
314 | | *digest = state->v[0]; |
315 | | XOR64_TO(*digest, state->v[1]); |
316 | | XOR64_TO(*digest, state->v[2]); |
317 | | XOR64_TO(*digest, state->v[3]); |
318 | | } |
319 | | |
320 | | sip_hash * |
321 | | sip_hash_new(const uint8_t key[16], int c, int d) |
322 | | { |
323 | | sip_hash *h = NULL; |
324 | | |
325 | | if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL; |
326 | | return sip_hash_init(h, key, c, d); |
327 | | } |
328 | | |
329 | | sip_hash * |
330 | | sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d) |
331 | | { |
332 | | h->state->c = c; |
333 | | h->state->d = d; |
334 | | h->state->buflen = 0; |
335 | | h->state->msglen_byte = 0; |
336 | | h->methods = &sip_methods; |
337 | | h->methods->init(h->state, key); |
338 | | return h; |
339 | | } |
340 | | |
341 | | int |
342 | | sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len) |
343 | | { |
344 | | h->methods->update(h->state, msg, len); |
345 | | return 1; |
346 | | } |
347 | | |
348 | | int |
349 | | sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len) |
350 | | { |
351 | | uint64_t digest64; |
352 | | uint8_t *ret; |
353 | | |
354 | | h->methods->final(h->state, &digest64); |
355 | | if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0; |
356 | | U64TO8_LE(ret, digest64); |
357 | | *len = sizeof(uint64_t); |
358 | | *digest = ret; |
359 | | |
360 | | return 1; |
361 | | } |
362 | | |
363 | | int |
364 | | sip_hash_final_integer(sip_hash *h, uint64_t *digest) |
365 | | { |
366 | | h->methods->final(h->state, digest); |
367 | | return 1; |
368 | | } |
369 | | |
370 | | int |
371 | | sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len) |
372 | | { |
373 | | if (!sip_hash_update(h, data, data_len)) return 0; |
374 | | return sip_hash_final(h, digest, digest_len); |
375 | | } |
376 | | |
377 | | int |
378 | | sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest) |
379 | | { |
380 | | if (!sip_hash_update(h, data, data_len)) return 0; |
381 | | return sip_hash_final_integer(h, digest); |
382 | | } |
383 | | |
384 | | void |
385 | | sip_hash_free(sip_hash *h) |
386 | | { |
387 | | free(h); |
388 | | } |
389 | | |
390 | | void |
391 | | sip_hash_dump(sip_hash *h) |
392 | | { |
393 | | int_sip_dump(h->state); |
394 | | } |
395 | | #endif /* SIP_HASH_STREAMING */ |
396 | | |
397 | 0 | #define SIP_ROUND(m, v0, v1, v2, v3) \ |
398 | 0 | do { \ |
399 | 0 | XOR64_TO((v3), (m)); \ |
400 | 0 | SIP_COMPRESS(v0, v1, v2, v3); \ |
401 | 0 | XOR64_TO((v0), (m)); \ |
402 | 0 | } while (0) |
403 | | |
404 | | uint64_t |
405 | | sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len) |
406 | 0 | { |
407 | 0 | uint64_t k0, k1; |
408 | 0 | uint64_t v0, v1, v2, v3; |
409 | 0 | uint64_t m, last; |
410 | 0 | const uint8_t *end = data + len - (len % sizeof(uint64_t)); |
411 | |
|
412 | 0 | k0 = U8TO64_LE(key); |
413 | 0 | k1 = U8TO64_LE(key + sizeof(uint64_t)); |
414 | |
|
415 | 0 | v0 = k0; XOR64_TO(v0, sip_init_state[0]); |
416 | 0 | v1 = k1; XOR64_TO(v1, sip_init_state[1]); |
417 | 0 | v2 = k0; XOR64_TO(v2, sip_init_state[2]); |
418 | 0 | v3 = k1; XOR64_TO(v3, sip_init_state[3]); |
419 | |
|
420 | 0 | #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS |
421 | 0 | { |
422 | 0 | uint64_t *data64 = (uint64_t *)data; |
423 | 0 | while (data64 != (uint64_t *) end) { |
424 | 0 | m = *data64++; |
425 | 0 | SIP_ROUND(m, v0, v1, v2, v3); |
426 | 0 | } |
427 | 0 | } |
428 | | #else |
429 | | for (; data != end; data += sizeof(uint64_t)) { |
430 | | m = U8TO64_LE(data); |
431 | | SIP_ROUND(m, v0, v1, v2, v3); |
432 | | } |
433 | | #endif |
434 | |
|
435 | 0 | #ifdef HAVE_UINT64_T |
436 | 0 | last = (uint64_t)len << 56; |
437 | 0 | #define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8)) |
438 | | #else |
439 | | last.hi = len << 24; |
440 | | last.lo = 0; |
441 | | #define OR_BYTE(n) do { \ |
442 | | if (n >= 4) \ |
443 | | last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \ |
444 | | else \ |
445 | | last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \ |
446 | | } while (0) |
447 | | #endif |
448 | |
|
449 | 0 | switch (len % sizeof(uint64_t)) { |
450 | 0 | case 7: |
451 | 0 | OR_BYTE(6); |
452 | 0 | case 6: |
453 | 0 | OR_BYTE(5); |
454 | 0 | case 5: |
455 | 0 | OR_BYTE(4); |
456 | 0 | case 4: |
457 | 0 | #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS |
458 | 0 | #ifdef HAVE_UINT64_T |
459 | 0 | last |= (uint64_t) ((uint32_t *) end)[0]; |
460 | | #else |
461 | | last.lo |= ((uint32_t *) end)[0]; |
462 | | #endif |
463 | 0 | break; |
464 | | #else |
465 | | OR_BYTE(3); |
466 | | #endif |
467 | 0 | case 3: |
468 | 0 | OR_BYTE(2); |
469 | 0 | case 2: |
470 | 0 | OR_BYTE(1); |
471 | 0 | case 1: |
472 | 0 | OR_BYTE(0); |
473 | 0 | break; |
474 | 0 | case 0: |
475 | 0 | break; |
476 | 0 | } |
477 | | |
478 | 0 | SIP_ROUND(last, v0, v1, v2, v3); |
479 | |
|
480 | 0 | XOR64_INT(v2, 0xff); |
481 | |
|
482 | 0 | SIP_COMPRESS(v0, v1, v2, v3); |
483 | 0 | SIP_COMPRESS(v0, v1, v2, v3); |
484 | 0 | SIP_COMPRESS(v0, v1, v2, v3); |
485 | |
|
486 | 0 | XOR64_TO(v0, v1); |
487 | 0 | XOR64_TO(v0, v2); |
488 | 0 | XOR64_TO(v0, v3); |
489 | 0 | return v0; |
490 | 0 | } |