Coverage Report

Created: 2025-06-13 06:55

/work/_deps/lz4-src/lib/xxhash.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
*  xxHash - Fast Hash algorithm
3
*  Copyright (C) 2012-2016, Yann Collet
4
*
5
*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
*
7
*  Redistribution and use in source and binary forms, with or without
8
*  modification, are permitted provided that the following conditions are
9
*  met:
10
*
11
*  * Redistributions of source code must retain the above copyright
12
*  notice, this list of conditions and the following disclaimer.
13
*  * Redistributions in binary form must reproduce the above
14
*  copyright notice, this list of conditions and the following disclaimer
15
*  in the documentation and/or other materials provided with the
16
*  distribution.
17
*
18
*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
*
30
*  You can contact the author at :
31
*  - xxHash homepage: http://www.xxhash.com
32
*  - xxHash source repository : https://github.com/Cyan4973/xxHash
33
*/
34
35
36
/* *************************************
37
*  Tuning parameters
38
***************************************/
39
/*!XXH_FORCE_MEMORY_ACCESS :
40
 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41
 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42
 * The below switch allow to select different access method for improved performance.
43
 * Method 0 (default) : use `memcpy()`. Safe and portable.
44
 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45
 *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46
 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47
 *            It can generate buggy code on targets which do not support unaligned memory accesses.
48
 *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49
 * See http://stackoverflow.com/a/32095106/646947 for details.
50
 * Prefer these methods in priority order (0 > 1 > 2)
51
 */
52
#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
53
#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
54
                        || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
55
                        || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
56
#    define XXH_FORCE_MEMORY_ACCESS 2
57
#  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
58
  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
59
                    || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
60
                    || defined(__ARM_ARCH_7S__) ))
61
#    define XXH_FORCE_MEMORY_ACCESS 1
62
#  endif
63
#endif
64
65
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
66
 * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
67
 * When this macro is enabled, xxHash actively checks input for null pointer.
68
 * It it is, result for null input pointers is the same as a null-length input.
69
 */
70
#ifndef XXH_ACCEPT_NULL_INPUT_POINTER   /* can be defined externally */
71
#  define XXH_ACCEPT_NULL_INPUT_POINTER 0
72
#endif
73
74
/*!XXH_FORCE_NATIVE_FORMAT :
75
 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
76
 * Results are therefore identical for little-endian and big-endian CPU.
77
 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
78
 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
79
 * to improve speed for Big-endian CPU.
80
 * This option has no impact on Little_Endian CPU.
81
 */
82
#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
83
0
#  define XXH_FORCE_NATIVE_FORMAT 0
84
#endif
85
86
/*!XXH_FORCE_ALIGN_CHECK :
87
 * This is a minor performance trick, only useful with lots of very small keys.
88
 * It means : check for aligned/unaligned input.
89
 * The check costs one initial branch per hash;
90
 * set it to 0 when the input is guaranteed to be aligned,
91
 * or when alignment doesn't matter for performance.
92
 */
93
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
94
#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
95
4.90k
#    define XXH_FORCE_ALIGN_CHECK 0
96
#  else
97
#    define XXH_FORCE_ALIGN_CHECK 1
98
#  endif
99
#endif
100
101
102
/* *************************************
103
*  Includes & Memory related functions
104
***************************************/
105
/*! Modify the local functions below should you wish to use some other memory routines
106
*   for malloc(), free() */
107
#include <stdlib.h>
108
0
static void* XXH_malloc(size_t s) { return malloc(s); }
109
0
static void  XXH_free  (void* p)  { free(p); }
110
/*! and for memcpy() */
111
#include <string.h>
112
110k
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
113
114
#include <assert.h>   /* assert */
115
116
#define XXH_STATIC_LINKING_ONLY
117
#include "xxhash.h"
118
119
120
/* *************************************
121
*  Compiler Specific Options
122
***************************************/
123
#if defined (_MSC_VER) && !defined (__clang__)    /* MSVC */
124
#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
125
#  define FORCE_INLINE static __forceinline
126
#else
127
#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
128
#    if defined (__GNUC__) || defined (__clang__)
129
#      define FORCE_INLINE static inline __attribute__((always_inline))
130
#    else
131
#      define FORCE_INLINE static inline
132
#    endif
133
#  else
134
#    define FORCE_INLINE static
135
#  endif /* __STDC_VERSION__ */
136
#endif
137
138
139
/* *************************************
140
*  Basic Types
141
***************************************/
142
#ifndef MEM_MODULE
143
# if !defined (__VMS) \
144
  && (defined (__cplusplus) \
145
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
146
#   include <stdint.h>
147
    typedef uint8_t  BYTE;
148
    typedef uint16_t U16;
149
    typedef uint32_t U32;
150
# else
151
    typedef unsigned char      BYTE;
152
    typedef unsigned short     U16;
153
    typedef unsigned int       U32;
154
# endif
155
#endif
156
157
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
158
159
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
160
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
161
162
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
163
164
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
165
/* currently only defined for gcc and icc */
166
typedef union { U32 u32; } __attribute__((packed)) unalign;
167
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
168
169
#else
170
171
/* portable and safe solution. Generally efficient.
172
 * see : http://stackoverflow.com/a/32095106/646947
173
 */
174
static U32 XXH_read32(const void* memPtr)
175
15.0M
{
176
15.0M
    U32 val;
177
15.0M
    memcpy(&val, memPtr, sizeof(val));
178
15.0M
    return val;
179
15.0M
}
180
181
#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
182
183
184
/* ****************************************
185
*  Compiler-specific Functions and Macros
186
******************************************/
187
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
188
189
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
190
#if defined(_MSC_VER)
191
#  define XXH_rotl32(x,r) _rotl(x,r)
192
#  define XXH_rotl64(x,r) _rotl64(x,r)
193
#else
194
15.0M
#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
195
3.88G
#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
196
#endif
197
198
#if defined(_MSC_VER)     /* Visual Studio */
199
#  define XXH_swap32 _byteswap_ulong
200
#elif XXH_GCC_VERSION >= 403
201
#  define XXH_swap32 __builtin_bswap32
202
#else
203
static U32 XXH_swap32 (U32 x)
204
0
{
205
0
    return  ((x << 24) & 0xff000000 ) |
206
0
            ((x <<  8) & 0x00ff0000 ) |
207
0
            ((x >>  8) & 0x0000ff00 ) |
208
0
            ((x >> 24) & 0x000000ff );
209
0
}
210
#endif
211
212
213
/* *************************************
214
*  Architecture Macros
215
***************************************/
216
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianness;
217
218
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
219
#ifndef XXH_CPU_LITTLE_ENDIAN
220
static int XXH_isLittleEndian(void)
221
13.8k
{
222
13.8k
    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
223
13.8k
    return one.c[0];
224
13.8k
}
225
13.9k
#   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
226
#endif
227
228
229
/* ***************************
230
*  Memory reads
231
*****************************/
232
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
233
234
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianness endian, XXH_alignment align)
235
15.0M
{
236
15.0M
    if (align==XXH_unaligned)
237
18.4E
        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
238
6.48k
    else
239
6.48k
        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
240
15.0M
}
241
242
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianness endian)
243
4.98M
{
244
4.98M
    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
245
4.98M
}
246
247
static U32 XXH_readBE32(const void* ptr)
248
0
{
249
0
    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
250
0
}
251
252
253
/* *************************************
254
*  Macros
255
***************************************/
256
0
#define XXH_STATIC_ASSERT(c)  { enum { XXH_sa = 1/(int)(!!(c)) }; }  /* use after variable declarations */
257
0
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
258
259
260
/* *******************************************************************
261
*  32-bit hash functions
262
*********************************************************************/
263
static const U32 PRIME32_1 = 2654435761U;
264
static const U32 PRIME32_2 = 2246822519U;
265
static const U32 PRIME32_3 = 3266489917U;
266
static const U32 PRIME32_4 =  668265263U;
267
static const U32 PRIME32_5 =  374761393U;
268
269
static U32 XXH32_round(U32 seed, U32 input)
270
15.0M
{
271
15.0M
    seed += input * PRIME32_2;
272
15.0M
    seed  = XXH_rotl32(seed, 13);
273
15.0M
    seed *= PRIME32_1;
274
15.0M
    return seed;
275
15.0M
}
276
277
/* mix all bits */
278
static U32 XXH32_avalanche(U32 h32)
279
8.70k
{
280
8.70k
    h32 ^= h32 >> 15;
281
8.70k
    h32 *= PRIME32_2;
282
8.70k
    h32 ^= h32 >> 13;
283
8.70k
    h32 *= PRIME32_3;
284
8.70k
    h32 ^= h32 >> 16;
285
8.70k
    return(h32);
286
8.70k
}
287
288
10.6M
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
289
290
static U32
291
XXH32_finalize(U32 h32, const void* ptr, size_t len,
292
                XXH_endianness endian, XXH_alignment align)
293
294
8.70k
{
295
8.70k
    const BYTE* p = (const BYTE*)ptr;
296
297
8.70k
#define PROCESS1               \
298
14.9k
    h32 += (*p++) * PRIME32_5; \
299
14.9k
    h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
300
301
8.70k
#define PROCESS4                         \
302
8.70k
    h32 += XXH_get32bits(p) * PRIME32_3; \
303
5.87k
    p+=4;                                \
304
5.87k
    h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
305
306
8.70k
    switch(len&15)  /* or switch(bEnd - p) */
307
8.70k
    {
308
64
      case 12:      PROCESS4;
309
                    /* fallthrough */
310
190
      case 8:       PROCESS4;
311
                    /* fallthrough */
312
329
      case 4:       PROCESS4;
313
329
                    return XXH32_avalanche(h32);
314
315
29
      case 13:      PROCESS4;
316
                    /* fallthrough */
317
62
      case 9:       PROCESS4;
318
                    /* fallthrough */
319
89
      case 5:       PROCESS4;
320
89
                    PROCESS1;
321
89
                    return XXH32_avalanche(h32);
322
323
199
      case 14:      PROCESS4;
324
                    /* fallthrough */
325
262
      case 10:      PROCESS4;
326
                    /* fallthrough */
327
294
      case 6:       PROCESS4;
328
294
                    PROCESS1;
329
294
                    PROCESS1;
330
294
                    return XXH32_avalanche(h32);
331
332
290
      case 15:      PROCESS4;
333
                    /* fallthrough */
334
1.93k
      case 11:      PROCESS4;
335
                    /* fallthrough */
336
2.13k
      case 7:       PROCESS4;
337
                    /* fallthrough */
338
3.61k
      case 3:       PROCESS1;
339
                    /* fallthrough */
340
5.03k
      case 2:       PROCESS1;
341
                    /* fallthrough */
342
5.59k
      case 1:       PROCESS1;
343
                    /* fallthrough */
344
7.99k
      case 0:       return XXH32_avalanche(h32);
345
8.70k
    }
346
0
    assert(0);
347
0
    return h32;   /* reaching this point is deemed impossible */
348
8.70k
}
349
350
351
FORCE_INLINE U32
352
XXH32_endian_align(const void* input, size_t len, U32 seed,
353
                    XXH_endianness endian, XXH_alignment align)
354
4.90k
{
355
4.90k
    const BYTE* p = (const BYTE*)input;
356
4.90k
    const BYTE* bEnd = p + len;
357
4.90k
    U32 h32;
358
359
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
360
    if (p==NULL) {
361
        len=0;
362
        bEnd=p=(const BYTE*)(size_t)16;
363
    }
364
#endif
365
366
4.90k
    if (len>=16) {
367
4.59k
        const BYTE* const limit = bEnd - 15;
368
4.59k
        U32 v1 = seed + PRIME32_1 + PRIME32_2;
369
4.59k
        U32 v2 = seed + PRIME32_2;
370
4.59k
        U32 v3 = seed + 0;
371
4.59k
        U32 v4 = seed - PRIME32_1;
372
373
2.65M
        do {
374
2.65M
            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
375
2.65M
            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
376
2.65M
            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
377
2.65M
            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
378
2.65M
        } while (p < limit);
379
380
4.59k
        h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
381
4.59k
            + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
382
4.59k
    } else {
383
314
        h32  = seed + PRIME32_5;
384
314
    }
385
386
4.90k
    h32 += (U32)len;
387
388
4.90k
    return XXH32_finalize(h32, p, len&15, endian, align);
389
4.90k
}
390
391
392
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
393
4.90k
{
394
#if 0
395
    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
396
    XXH32_state_t state;
397
    XXH32_reset(&state, seed);
398
    XXH32_update(&state, input, len);
399
    return XXH32_digest(&state);
400
#else
401
4.90k
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
402
403
4.90k
    if (XXH_FORCE_ALIGN_CHECK) {
404
0
        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
405
0
            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
406
0
                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
407
0
            else
408
0
                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
409
0
    }   }
410
411
4.90k
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
412
4.90k
        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
413
1
    else
414
1
        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
415
4.90k
#endif
416
4.90k
}
417
418
419
420
/*======   Hash streaming   ======*/
421
422
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
423
0
{
424
0
    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
425
0
}
426
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
427
0
{
428
0
    XXH_free(statePtr);
429
0
    return XXH_OK;
430
0
}
431
432
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
433
0
{
434
0
    memcpy(dstState, srcState, sizeof(*dstState));
435
0
}
436
437
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
438
4.63k
{
439
4.63k
    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
440
4.63k
    memset(&state, 0, sizeof(state));
441
4.63k
    state.v1 = seed + PRIME32_1 + PRIME32_2;
442
4.63k
    state.v2 = seed + PRIME32_2;
443
4.63k
    state.v3 = seed + 0;
444
4.63k
    state.v4 = seed - PRIME32_1;
445
    /* do not write into reserved, planned to be removed in a future version */
446
4.63k
    memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
447
4.63k
    return XXH_OK;
448
4.63k
}
449
450
451
FORCE_INLINE XXH_errorcode
452
XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianness endian)
453
5.26k
{
454
5.26k
    if (input==NULL)
455
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
456
        return XXH_OK;
457
#else
458
0
        return XXH_ERROR;
459
5.26k
#endif
460
461
5.26k
    {   const BYTE* p = (const BYTE*)input;
462
5.26k
        const BYTE* const bEnd = p + len;
463
464
5.26k
        state->total_len_32 += (unsigned)len;
465
5.26k
        state->large_len |= (len>=16) | (state->total_len_32>=16);
466
467
5.26k
        if (state->memsize + len < 16)  {   /* fill in tmp buffer */
468
921
            XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
469
921
            state->memsize += (unsigned)len;
470
921
            return XXH_OK;
471
921
        }
472
473
4.34k
        if (state->memsize) {   /* some data left from previous update */
474
466
            XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
475
466
            {   const U32* p32 = state->mem32;
476
466
                state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
477
466
                state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
478
466
                state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
479
466
                state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
480
466
            }
481
466
            p += 16-state->memsize;
482
466
            state->memsize = 0;
483
466
        }
484
485
4.34k
        if (p <= bEnd-16) {
486
4.28k
            const BYTE* const limit = bEnd - 16;
487
4.28k
            U32 v1 = state->v1;
488
4.28k
            U32 v2 = state->v2;
489
4.28k
            U32 v3 = state->v3;
490
4.28k
            U32 v4 = state->v4;
491
492
1.24M
            do {
493
1.24M
                v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
494
1.24M
                v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
495
1.24M
                v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
496
1.24M
                v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
497
1.24M
            } while (p<=limit);
498
499
4.28k
            state->v1 = v1;
500
4.28k
            state->v2 = v2;
501
4.28k
            state->v3 = v3;
502
4.28k
            state->v4 = v4;
503
4.28k
        }
504
505
4.34k
        if (p < bEnd) {
506
2.55k
            XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
507
2.55k
            state->memsize = (unsigned)(bEnd-p);
508
2.55k
        }
509
4.34k
    }
510
511
0
    return XXH_OK;
512
5.26k
}
513
514
515
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
516
5.26k
{
517
5.26k
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
518
519
5.26k
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
520
5.26k
        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
521
0
    else
522
0
        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
523
5.26k
}
524
525
526
FORCE_INLINE U32
527
XXH32_digest_endian (const XXH32_state_t* state, XXH_endianness endian)
528
3.78k
{
529
3.78k
    U32 h32;
530
531
3.78k
    if (state->large_len) {
532
3.41k
        h32 = XXH_rotl32(state->v1, 1)
533
3.41k
            + XXH_rotl32(state->v2, 7)
534
3.41k
            + XXH_rotl32(state->v3, 12)
535
3.41k
            + XXH_rotl32(state->v4, 18);
536
3.41k
    } else {
537
365
        h32 = state->v3 /* == seed */ + PRIME32_5;
538
365
    }
539
540
3.78k
    h32 += state->total_len_32;
541
542
3.78k
    return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
543
3.78k
}
544
545
546
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
547
3.78k
{
548
3.78k
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
549
550
3.78k
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
551
3.78k
        return XXH32_digest_endian(state_in, XXH_littleEndian);
552
0
    else
553
0
        return XXH32_digest_endian(state_in, XXH_bigEndian);
554
3.78k
}
555
556
557
/*======   Canonical representation   ======*/
558
559
/*! Default XXH result types are basic unsigned 32 and 64 bits.
560
*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
561
*   These functions allow transformation of hash result into and from its canonical format.
562
*   This way, hash values can be written into a file or buffer, remaining comparable across different systems.
563
*/
564
565
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
566
0
{
567
0
    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
568
0
    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
569
0
    memcpy(dst, &hash, sizeof(*dst));
570
0
}
571
572
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
573
0
{
574
0
    return XXH_readBE32(src);
575
0
}
576
577
578
#ifndef XXH_NO_LONG_LONG
579
580
/* *******************************************************************
581
*  64-bit hash functions
582
*********************************************************************/
583
584
/*======   Memory access   ======*/
585
586
#ifndef MEM_MODULE
587
# define MEM_MODULE
588
# if !defined (__VMS) \
589
  && (defined (__cplusplus) \
590
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
591
#   include <stdint.h>
592
    typedef uint64_t U64;
593
# else
594
    /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
595
    typedef unsigned long long U64;
596
# endif
597
#endif
598
599
600
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
601
602
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
603
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
604
605
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
606
607
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
608
/* currently only defined for gcc and icc */
609
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
610
static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
611
612
#else
613
614
/* portable and safe solution. Generally efficient.
615
 * see : http://stackoverflow.com/a/32095106/646947
616
 */
617
618
static U64 XXH_read64(const void* memPtr)
619
3.88G
{
620
3.88G
    U64 val;
621
3.88G
    memcpy(&val, memPtr, sizeof(val));
622
3.88G
    return val;
623
3.88G
}
624
625
#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
626
627
#if defined(_MSC_VER)     /* Visual Studio */
628
#  define XXH_swap64 _byteswap_uint64
629
#elif XXH_GCC_VERSION >= 403
630
#  define XXH_swap64 __builtin_bswap64
631
#else
632
static U64 XXH_swap64 (U64 x)
633
0
{
634
0
    return  ((x << 56) & 0xff00000000000000ULL) |
635
0
            ((x << 40) & 0x00ff000000000000ULL) |
636
0
            ((x << 24) & 0x0000ff0000000000ULL) |
637
0
            ((x << 8)  & 0x000000ff00000000ULL) |
638
0
            ((x >> 8)  & 0x00000000ff000000ULL) |
639
0
            ((x >> 24) & 0x0000000000ff0000ULL) |
640
0
            ((x >> 40) & 0x000000000000ff00ULL) |
641
0
            ((x >> 56) & 0x00000000000000ffULL);
642
0
}
643
#endif
644
645
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianness endian, XXH_alignment align)
646
0
{
647
0
    if (align==XXH_unaligned)
648
0
        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
649
0
    else
650
0
        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
651
0
}
652
653
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianness endian)
654
0
{
655
0
    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
656
0
}
657
658
static U64 XXH_readBE64(const void* ptr)
659
0
{
660
0
    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
661
0
}
662
663
664
/*======   xxh64   ======*/
665
666
static const U64 PRIME64_1 = 11400714785074694791ULL;
667
static const U64 PRIME64_2 = 14029467366897019727ULL;
668
static const U64 PRIME64_3 =  1609587929392839161ULL;
669
static const U64 PRIME64_4 =  9650029242287828579ULL;
670
static const U64 PRIME64_5 =  2870177450012600261ULL;
671
672
static U64 XXH64_round(U64 acc, U64 input)
673
3.88G
{
674
3.88G
    acc += input * PRIME64_2;
675
3.88G
    acc  = XXH_rotl64(acc, 31);
676
3.88G
    acc *= PRIME64_1;
677
3.88G
    return acc;
678
3.88G
}
679
680
static U64 XXH64_mergeRound(U64 acc, U64 val)
681
1.14k
{
682
1.14k
    val  = XXH64_round(0, val);
683
1.14k
    acc ^= val;
684
1.14k
    acc  = acc * PRIME64_1 + PRIME64_4;
685
1.14k
    return acc;
686
1.14k
}
687
688
static U64 XXH64_avalanche(U64 h64)
689
325
{
690
325
    h64 ^= h64 >> 33;
691
325
    h64 *= PRIME64_2;
692
325
    h64 ^= h64 >> 29;
693
325
    h64 *= PRIME64_3;
694
325
    h64 ^= h64 >> 32;
695
325
    return h64;
696
325
}
697
698
699
0
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
700
701
static U64
702
XXH64_finalize(U64 h64, const void* ptr, size_t len,
703
               XXH_endianness endian, XXH_alignment align)
704
0
{
705
0
    const BYTE* p = (const BYTE*)ptr;
706
707
0
#define PROCESS1_64            \
708
0
    h64 ^= (*p++) * PRIME64_5; \
709
0
    h64 = XXH_rotl64(h64, 11) * PRIME64_1;
710
711
0
#define PROCESS4_64          \
712
0
    h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
713
0
    p+=4;                    \
714
0
    h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
715
716
0
#define PROCESS8_64 {        \
717
0
    U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
718
0
    p+=8;                    \
719
0
    h64 ^= k1;               \
720
0
    h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
721
0
}
722
723
0
    switch(len&31) {
724
0
      case 24: PROCESS8_64;
725
                    /* fallthrough */
726
0
      case 16: PROCESS8_64;
727
                    /* fallthrough */
728
0
      case  8: PROCESS8_64;
729
0
               return XXH64_avalanche(h64);
730
731
0
      case 28: PROCESS8_64;
732
                    /* fallthrough */
733
0
      case 20: PROCESS8_64;
734
                    /* fallthrough */
735
0
      case 12: PROCESS8_64;
736
                    /* fallthrough */
737
0
      case  4: PROCESS4_64;
738
0
               return XXH64_avalanche(h64);
739
740
0
      case 25: PROCESS8_64;
741
                    /* fallthrough */
742
0
      case 17: PROCESS8_64;
743
                    /* fallthrough */
744
0
      case  9: PROCESS8_64;
745
0
               PROCESS1_64;
746
0
               return XXH64_avalanche(h64);
747
748
0
      case 29: PROCESS8_64;
749
                    /* fallthrough */
750
0
      case 21: PROCESS8_64;
751
                    /* fallthrough */
752
0
      case 13: PROCESS8_64;
753
                    /* fallthrough */
754
0
      case  5: PROCESS4_64;
755
0
               PROCESS1_64;
756
0
               return XXH64_avalanche(h64);
757
758
0
      case 26: PROCESS8_64;
759
                    /* fallthrough */
760
0
      case 18: PROCESS8_64;
761
                    /* fallthrough */
762
0
      case 10: PROCESS8_64;
763
0
               PROCESS1_64;
764
0
               PROCESS1_64;
765
0
               return XXH64_avalanche(h64);
766
767
0
      case 30: PROCESS8_64;
768
                    /* fallthrough */
769
0
      case 22: PROCESS8_64;
770
                    /* fallthrough */
771
0
      case 14: PROCESS8_64;
772
                    /* fallthrough */
773
0
      case  6: PROCESS4_64;
774
0
               PROCESS1_64;
775
0
               PROCESS1_64;
776
0
               return XXH64_avalanche(h64);
777
778
0
      case 27: PROCESS8_64;
779
                    /* fallthrough */
780
0
      case 19: PROCESS8_64;
781
                    /* fallthrough */
782
0
      case 11: PROCESS8_64;
783
0
               PROCESS1_64;
784
0
               PROCESS1_64;
785
0
               PROCESS1_64;
786
0
               return XXH64_avalanche(h64);
787
788
0
      case 31: PROCESS8_64;
789
                    /* fallthrough */
790
0
      case 23: PROCESS8_64;
791
                    /* fallthrough */
792
0
      case 15: PROCESS8_64;
793
                    /* fallthrough */
794
0
      case  7: PROCESS4_64;
795
                    /* fallthrough */
796
0
      case  3: PROCESS1_64;
797
                    /* fallthrough */
798
0
      case  2: PROCESS1_64;
799
                    /* fallthrough */
800
0
      case  1: PROCESS1_64;
801
                    /* fallthrough */
802
0
      case  0: return XXH64_avalanche(h64);
803
0
    }
804
805
    /* impossible to reach */
806
0
    assert(0);
807
0
    return 0;  /* unreachable, but some compilers complain without it */
808
0
}
809
810
FORCE_INLINE U64
811
XXH64_endian_align(const void* input, size_t len, U64 seed,
812
                XXH_endianness endian, XXH_alignment align)
813
0
{
814
0
    const BYTE* p = (const BYTE*)input;
815
0
    const BYTE* bEnd = p + len;
816
0
    U64 h64;
817
818
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
819
    if (p==NULL) {
820
        len=0;
821
        bEnd=p=(const BYTE*)(size_t)32;
822
    }
823
#endif
824
825
0
    if (len>=32) {
826
0
        const BYTE* const limit = bEnd - 32;
827
0
        U64 v1 = seed + PRIME64_1 + PRIME64_2;
828
0
        U64 v2 = seed + PRIME64_2;
829
0
        U64 v3 = seed + 0;
830
0
        U64 v4 = seed - PRIME64_1;
831
832
0
        do {
833
0
            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
834
0
            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
835
0
            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
836
0
            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
837
0
        } while (p<=limit);
838
839
0
        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
840
0
        h64 = XXH64_mergeRound(h64, v1);
841
0
        h64 = XXH64_mergeRound(h64, v2);
842
0
        h64 = XXH64_mergeRound(h64, v3);
843
0
        h64 = XXH64_mergeRound(h64, v4);
844
845
0
    } else {
846
0
        h64  = seed + PRIME64_5;
847
0
    }
848
849
0
    h64 += (U64) len;
850
851
0
    return XXH64_finalize(h64, p, len, endian, align);
852
0
}
853
854
855
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
856
0
{
857
#if 0
858
    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
859
    XXH64_state_t state;
860
    XXH64_reset(&state, seed);
861
    XXH64_update(&state, input, len);
862
    return XXH64_digest(&state);
863
#else
864
0
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
865
866
0
    if (XXH_FORCE_ALIGN_CHECK) {
867
0
        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
868
0
            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
869
0
                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
870
0
            else
871
0
                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
872
0
    }   }
873
874
0
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
875
0
        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
876
0
    else
877
0
        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
878
0
#endif
879
0
}
880
881
/*======   Hash Streaming   ======*/
882
883
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
884
0
{
885
0
    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
886
0
}
887
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
888
0
{
889
0
    XXH_free(statePtr);
890
0
    return XXH_OK;
891
0
}
892
893
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
894
0
{
895
0
    memcpy(dstState, srcState, sizeof(*dstState));
896
0
}
897
898
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
899
0
{
900
0
    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
901
0
    memset(&state, 0, sizeof(state));
902
0
    state.v1 = seed + PRIME64_1 + PRIME64_2;
903
0
    state.v2 = seed + PRIME64_2;
904
0
    state.v3 = seed + 0;
905
0
    state.v4 = seed - PRIME64_1;
906
     /* do not write into reserved, planned to be removed in a future version */
907
0
    memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
908
0
    return XXH_OK;
909
0
}
910
911
FORCE_INLINE XXH_errorcode
912
XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianness endian)
913
0
{
914
0
    if (input==NULL)
915
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
916
        return XXH_OK;
917
#else
918
0
        return XXH_ERROR;
919
0
#endif
920
921
0
    {   const BYTE* p = (const BYTE*)input;
922
0
        const BYTE* const bEnd = p + len;
923
924
0
        state->total_len += len;
925
926
0
        if (state->memsize + len < 32) {  /* fill in tmp buffer */
927
0
            XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
928
0
            state->memsize += (U32)len;
929
0
            return XXH_OK;
930
0
        }
931
932
0
        if (state->memsize) {   /* tmp buffer is full */
933
0
            XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
934
0
            state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
935
0
            state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
936
0
            state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
937
0
            state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
938
0
            p += 32-state->memsize;
939
0
            state->memsize = 0;
940
0
        }
941
942
0
        if (p+32 <= bEnd) {
943
0
            const BYTE* const limit = bEnd - 32;
944
0
            U64 v1 = state->v1;
945
0
            U64 v2 = state->v2;
946
0
            U64 v3 = state->v3;
947
0
            U64 v4 = state->v4;
948
949
0
            do {
950
0
                v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
951
0
                v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
952
0
                v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
953
0
                v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
954
0
            } while (p<=limit);
955
956
0
            state->v1 = v1;
957
0
            state->v2 = v2;
958
0
            state->v3 = v3;
959
0
            state->v4 = v4;
960
0
        }
961
962
0
        if (p < bEnd) {
963
0
            XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
964
0
            state->memsize = (unsigned)(bEnd-p);
965
0
        }
966
0
    }
967
968
0
    return XXH_OK;
969
0
}
970
971
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
972
0
{
973
0
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
974
975
0
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
976
0
        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
977
0
    else
978
0
        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
979
0
}
980
981
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianness endian)
982
0
{
983
0
    U64 h64;
984
985
0
    if (state->total_len >= 32) {
986
0
        U64 const v1 = state->v1;
987
0
        U64 const v2 = state->v2;
988
0
        U64 const v3 = state->v3;
989
0
        U64 const v4 = state->v4;
990
991
0
        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
992
0
        h64 = XXH64_mergeRound(h64, v1);
993
0
        h64 = XXH64_mergeRound(h64, v2);
994
0
        h64 = XXH64_mergeRound(h64, v3);
995
0
        h64 = XXH64_mergeRound(h64, v4);
996
0
    } else {
997
0
        h64  = state->v3 /*seed*/ + PRIME64_5;
998
0
    }
999
1000
0
    h64 += (U64) state->total_len;
1001
1002
0
    return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
1003
0
}
1004
1005
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
1006
0
{
1007
0
    XXH_endianness endian_detected = (XXH_endianness)XXH_CPU_LITTLE_ENDIAN;
1008
1009
0
    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
1010
0
        return XXH64_digest_endian(state_in, XXH_littleEndian);
1011
0
    else
1012
0
        return XXH64_digest_endian(state_in, XXH_bigEndian);
1013
0
}
1014
1015
1016
/*====== Canonical representation   ======*/
1017
1018
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
1019
0
{
1020
0
    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
1021
0
    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
1022
0
    memcpy(dst, &hash, sizeof(*dst));
1023
0
}
1024
1025
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
1026
0
{
1027
0
    return XXH_readBE64(src);
1028
0
}
1029
1030
#endif  /* XXH_NO_LONG_LONG */