Coverage Report

Created: 2024-09-08 07:17

/src/rocksdb/util/xxhash.h
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
//  This source code is licensed under both the GPLv2 (found in the
3
//  COPYING file in the root directory) and Apache 2.0 License
4
//  (found in the LICENSE.Apache file in the root directory).
5
6
/* BEGIN RocksDB customizations */
7
#ifndef XXH_STATIC_LINKING_ONLY
8
// Using compiled xxhash.cc
9
#define XXH_STATIC_LINKING_ONLY 1
10
#endif  // !defined(XXH_STATIC_LINKING_ONLY)
11
#ifndef XXH_NAMESPACE
12
#define XXH_NAMESPACE ROCKSDB_
13
#endif  // !defined(XXH_NAMESPACE)
14
15
#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) || \
16
     defined(XXH_IMPLEMENTATION)) &&                        \
17
    !defined(XXH_IMPLEM_13a8737387)
18
#if defined(__cplusplus) && (__cplusplus > 202002L)
19
/* C++23 and future versions have std::unreachable() */
20
#include <utility> /* std::unreachable() */
21
#endif
22
#endif
23
/* END RocksDB customizations */
24
25
// clang-format off
26
/*
27
 * xxHash - Extremely Fast Hash algorithm
28
 * Header File
29
 * Copyright (C) 2012-2021 Yann Collet
30
 *
31
 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
32
 *
33
 * Redistribution and use in source and binary forms, with or without
34
 * modification, are permitted provided that the following conditions are
35
 * met:
36
 *
37
 *    * Redistributions of source code must retain the above copyright
38
 *      notice, this list of conditions and the following disclaimer.
39
 *    * Redistributions in binary form must reproduce the above
40
 *      copyright notice, this list of conditions and the following disclaimer
41
 *      in the documentation and/or other materials provided with the
42
 *      distribution.
43
 *
44
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
46
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
47
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
48
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
49
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
50
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
54
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55
 *
56
 * You can contact the author at:
57
 *   - xxHash homepage: https://www.xxhash.com
58
 *   - xxHash source repository: https://github.com/Cyan4973/xxHash
59
 */
60
61
/*!
62
 * @mainpage xxHash
63
 *
64
 * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
65
 * limits.
66
 *
67
 * It is proposed in four flavors, in three families:
68
 * 1. @ref XXH32_family
69
 *   - Classic 32-bit hash function. Simple, compact, and runs on almost all
70
 *     32-bit and 64-bit systems.
71
 * 2. @ref XXH64_family
72
 *   - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
73
 *     64-bit systems (but _not_ 32-bit systems).
74
 * 3. @ref XXH3_family
75
 *   - Modern 64-bit and 128-bit hash function family which features improved
76
 *     strength and performance across the board, especially on smaller data.
77
 *     It benefits greatly from SIMD and 64-bit without requiring it.
78
 *
79
 * Benchmarks
80
 * ---
81
 * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
82
 * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
83
 *
84
 * | Hash Name            | ISA ext | Width | Large Data Speed | Small Data Velocity |
85
 * | -------------------- | ------- | ----: | ---------------: | ------------------: |
86
 * | XXH3_64bits()        | @b AVX2 |    64 |        59.4 GB/s |               133.1 |
87
 * | MeowHash             | AES-NI  |   128 |        58.2 GB/s |                52.5 |
88
 * | XXH3_128bits()       | @b AVX2 |   128 |        57.9 GB/s |               118.1 |
89
 * | CLHash               | PCLMUL  |    64 |        37.1 GB/s |                58.1 |
90
 * | XXH3_64bits()        | @b SSE2 |    64 |        31.5 GB/s |               133.1 |
91
 * | XXH3_128bits()       | @b SSE2 |   128 |        29.6 GB/s |               118.1 |
92
 * | RAM sequential read  |         |   N/A |        28.0 GB/s |                 N/A |
93
 * | ahash                | AES-NI  |    64 |        22.5 GB/s |               107.2 |
94
 * | City64               |         |    64 |        22.0 GB/s |                76.6 |
95
 * | T1ha2                |         |    64 |        22.0 GB/s |                99.0 |
96
 * | City128              |         |   128 |        21.7 GB/s |                57.7 |
97
 * | FarmHash             | AES-NI  |    64 |        21.3 GB/s |                71.9 |
98
 * | XXH64()              |         |    64 |        19.4 GB/s |                71.0 |
99
 * | SpookyHash           |         |    64 |        19.3 GB/s |                53.2 |
100
 * | Mum                  |         |    64 |        18.0 GB/s |                67.0 |
101
 * | CRC32C               | SSE4.2  |    32 |        13.0 GB/s |                57.9 |
102
 * | XXH32()              |         |    32 |         9.7 GB/s |                71.9 |
103
 * | City32               |         |    32 |         9.1 GB/s |                66.0 |
104
 * | Blake3*              | @b AVX2 |   256 |         4.4 GB/s |                 8.1 |
105
 * | Murmur3              |         |    32 |         3.9 GB/s |                56.1 |
106
 * | SipHash*             |         |    64 |         3.0 GB/s |                43.2 |
107
 * | Blake3*              | @b SSE2 |   256 |         2.4 GB/s |                 8.1 |
108
 * | HighwayHash          |         |    64 |         1.4 GB/s |                 6.0 |
109
 * | FNV64                |         |    64 |         1.2 GB/s |                62.7 |
110
 * | Blake2*              |         |   256 |         1.1 GB/s |                 5.1 |
111
 * | SHA1*                |         |   160 |         0.8 GB/s |                 5.6 |
112
 * | MD5*                 |         |   128 |         0.6 GB/s |                 7.8 |
113
 * @note
114
 *   - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
115
 *     even though it is mandatory on x64.
116
 *   - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
117
 *     by modern standards.
118
 *   - Small data velocity is a rough average of algorithm's efficiency for small
119
 *     data. For more accurate information, see the wiki.
120
 *   - More benchmarks and strength tests are found on the wiki:
121
 *         https://github.com/Cyan4973/xxHash/wiki
122
 *
123
 * Usage
124
 * ------
125
 * All xxHash variants use a similar API. Changing the algorithm is a trivial
126
 * substitution.
127
 *
128
 * @pre
129
 *    For functions which take an input and length parameter, the following
130
 *    requirements are assumed:
131
 *    - The range from [`input`, `input + length`) is valid, readable memory.
132
 *      - The only exception is if the `length` is `0`, `input` may be `NULL`.
133
 *    - For C++, the objects must have the *TriviallyCopyable* property, as the
134
 *      functions access bytes directly as if it was an array of `unsigned char`.
135
 *
136
 * @anchor single_shot_example
137
 * **Single Shot**
138
 *
139
 * These functions are stateless functions which hash a contiguous block of memory,
140
 * immediately returning the result. They are the easiest and usually the fastest
141
 * option.
142
 *
143
 * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
144
 *
145
 * @code{.c}
146
 *   #include <string.h>
147
 *   #include "xxhash.h"
148
 *
149
 *   // Example for a function which hashes a null terminated string with XXH32().
150
 *   XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
151
 *   {
152
 *       // NULL pointers are only valid if the length is zero
153
 *       size_t length = (string == NULL) ? 0 : strlen(string);
154
 *       return XXH32(string, length, seed);
155
 *   }
156
 * @endcode
157
 *
158
 * @anchor streaming_example
159
 * **Streaming**
160
 *
161
 * These groups of functions allow incremental hashing of unknown size, even
162
 * more than what would fit in a size_t.
163
 *
164
 * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
165
 *
166
 * @code{.c}
167
 *   #include <stdio.h>
168
 *   #include <assert.h>
169
 *   #include "xxhash.h"
170
 *   // Example for a function which hashes a FILE incrementally with XXH3_64bits().
171
 *   XXH64_hash_t hashFile(FILE* f)
172
 *   {
173
 *       // Allocate a state struct. Do not just use malloc() or new.
174
 *       XXH3_state_t* state = XXH3_createState();
175
 *       assert(state != NULL && "Out of memory!");
176
 *       // Reset the state to start a new hashing session.
177
 *       XXH3_64bits_reset(state);
178
 *       char buffer[4096];
179
 *       size_t count;
180
 *       // Read the file in chunks
181
 *       while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
182
 *           // Run update() as many times as necessary to process the data
183
 *           XXH3_64bits_update(state, buffer, count);
184
 *       }
185
 *       // Retrieve the finalized hash. This will not change the state.
186
 *       XXH64_hash_t result = XXH3_64bits_digest(state);
187
 *       // Free the state. Do not use free().
188
 *       XXH3_freeState(state);
189
 *       return result;
190
 *   }
191
 * @endcode
192
 *
193
 * @file xxhash.h
194
 * xxHash prototypes and implementation
195
 */
196
197
#if defined (__cplusplus)
198
extern "C" {
199
#endif
200
201
/* ****************************
202
 *  INLINE mode
203
 ******************************/
204
/*!
205
 * @defgroup public Public API
206
 * Contains details on the public xxHash functions.
207
 * @{
208
 */
209
#ifdef XXH_DOXYGEN
210
/*!
211
 * @brief Exposes the implementation and marks all functions as `inline`.
212
 *
213
 * Use these build macros to inline xxhash into the target unit.
214
 * Inlining improves performance on small inputs, especially when the length is
215
 * expressed as a compile-time constant:
216
 *
217
 *  https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
218
 *
219
 * It also keeps xxHash symbols private to the unit, so they are not exported.
220
 *
221
 * Usage:
222
 * @code{.c}
223
 *     #define XXH_INLINE_ALL
224
 *     #include "xxhash.h"
225
 * @endcode
226
 * Do not compile and link xxhash.o as a separate object, as it is not useful.
227
 */
228
#  define XXH_INLINE_ALL
229
#  undef XXH_INLINE_ALL
230
/*!
231
 * @brief Exposes the implementation without marking functions as inline.
232
 */
233
#  define XXH_PRIVATE_API
234
#  undef XXH_PRIVATE_API
235
/*!
236
 * @brief Emulate a namespace by transparently prefixing all symbols.
237
 *
238
 * If you want to include _and expose_ xxHash functions from within your own
239
 * library, but also want to avoid symbol collisions with other libraries which
240
 * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
241
 * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
242
 * (therefore, avoid empty or numeric values).
243
 *
244
 * Note that no change is required within the calling program as long as it
245
 * includes `xxhash.h`: Regular symbol names will be automatically translated
246
 * by this header.
247
 */
248
#  define XXH_NAMESPACE /* YOUR NAME HERE */
249
#  undef XXH_NAMESPACE
250
#endif
251
252
#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
253
    && !defined(XXH_INLINE_ALL_31684351384)
254
   /* this section should be traversed only once */
255
#  define XXH_INLINE_ALL_31684351384
256
   /* give access to the advanced API, required to compile implementations */
257
#  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
258
#  define XXH_STATIC_LINKING_ONLY
259
   /* make all functions private */
260
#  undef XXH_PUBLIC_API
261
#  if defined(__GNUC__)
262
#    define XXH_PUBLIC_API static __inline __attribute__((unused))
263
#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
264
#    define XXH_PUBLIC_API static inline
265
#  elif defined(_MSC_VER)
266
#    define XXH_PUBLIC_API static __inline
267
#  else
268
     /* note: this version may generate warnings for unused static functions */
269
#    define XXH_PUBLIC_API static
270
#  endif
271
272
   /*
273
    * This part deals with the special case where a unit wants to inline xxHash,
274
    * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
275
    * such as part of some previously included *.h header file.
276
    * Without further action, the new include would just be ignored,
277
    * and functions would effectively _not_ be inlined (silent failure).
278
    * The following macros solve this situation by prefixing all inlined names,
279
    * avoiding naming collision with previous inclusions.
280
    */
281
   /* Before that, we unconditionally #undef all symbols,
282
    * in case they were already defined with XXH_NAMESPACE.
283
    * They will then be redefined for XXH_INLINE_ALL
284
    */
285
#  undef XXH_versionNumber
286
    /* XXH32 */
287
#  undef XXH32
288
#  undef XXH32_createState
289
#  undef XXH32_freeState
290
#  undef XXH32_reset
291
#  undef XXH32_update
292
#  undef XXH32_digest
293
#  undef XXH32_copyState
294
#  undef XXH32_canonicalFromHash
295
#  undef XXH32_hashFromCanonical
296
    /* XXH64 */
297
#  undef XXH64
298
#  undef XXH64_createState
299
#  undef XXH64_freeState
300
#  undef XXH64_reset
301
#  undef XXH64_update
302
#  undef XXH64_digest
303
#  undef XXH64_copyState
304
#  undef XXH64_canonicalFromHash
305
#  undef XXH64_hashFromCanonical
306
    /* XXH3_64bits */
307
#  undef XXH3_64bits
308
#  undef XXH3_64bits_withSecret
309
#  undef XXH3_64bits_withSeed
310
#  undef XXH3_64bits_withSecretandSeed
311
#  undef XXH3_createState
312
#  undef XXH3_freeState
313
#  undef XXH3_copyState
314
#  undef XXH3_64bits_reset
315
#  undef XXH3_64bits_reset_withSeed
316
#  undef XXH3_64bits_reset_withSecret
317
#  undef XXH3_64bits_update
318
#  undef XXH3_64bits_digest
319
#  undef XXH3_generateSecret
320
    /* XXH3_128bits */
321
#  undef XXH128
322
#  undef XXH3_128bits
323
#  undef XXH3_128bits_withSeed
324
#  undef XXH3_128bits_withSecret
325
#  undef XXH3_128bits_reset
326
#  undef XXH3_128bits_reset_withSeed
327
#  undef XXH3_128bits_reset_withSecret
328
#  undef XXH3_128bits_reset_withSecretandSeed
329
#  undef XXH3_128bits_update
330
#  undef XXH3_128bits_digest
331
#  undef XXH128_isEqual
332
#  undef XXH128_cmp
333
#  undef XXH128_canonicalFromHash
334
#  undef XXH128_hashFromCanonical
335
    /* Finally, free the namespace itself */
336
#  undef XXH_NAMESPACE
337
338
    /* employ the namespace for XXH_INLINE_ALL */
339
#  define XXH_NAMESPACE XXH_INLINE_
340
   /*
341
    * Some identifiers (enums, type names) are not symbols,
342
    * but they must nonetheless be renamed to avoid redeclaration.
343
    * Alternative solution: do not redeclare them.
344
    * However, this requires some #ifdefs, and has a more dispersed impact.
345
    * Meanwhile, renaming can be achieved in a single place.
346
    */
347
#  define XXH_IPREF(Id)   XXH_NAMESPACE ## Id
348
#  define XXH_OK XXH_IPREF(XXH_OK)
349
#  define XXH_ERROR XXH_IPREF(XXH_ERROR)
350
#  define XXH_errorcode XXH_IPREF(XXH_errorcode)
351
#  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
352
#  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
353
#  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
354
#  define XXH32_state_s XXH_IPREF(XXH32_state_s)
355
#  define XXH32_state_t XXH_IPREF(XXH32_state_t)
356
#  define XXH64_state_s XXH_IPREF(XXH64_state_s)
357
#  define XXH64_state_t XXH_IPREF(XXH64_state_t)
358
#  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
359
#  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
360
#  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
361
   /* Ensure the header is parsed again, even if it was previously included */
362
#  undef XXHASH_H_5627135585666179
363
#  undef XXHASH_H_STATIC_13879238742
364
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
365
366
/* ****************************************************************
367
 *  Stable API
368
 *****************************************************************/
369
#ifndef XXHASH_H_5627135585666179
370
#define XXHASH_H_5627135585666179 1
371
372
/*! @brief Marks a global symbol. */
373
#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
374
#  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
375
#    ifdef XXH_EXPORT
376
#      define XXH_PUBLIC_API __declspec(dllexport)
377
#    elif XXH_IMPORT
378
#      define XXH_PUBLIC_API __declspec(dllimport)
379
#    endif
380
#  else
381
#    define XXH_PUBLIC_API   /* do nothing */
382
#  endif
383
#endif
384
385
#ifdef XXH_NAMESPACE
386
3.61M
#  define XXH_CAT(A,B) A##B
387
3.61M
#  define XXH_NAME2(A,B) XXH_CAT(A,B)
388
#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
389
/* XXH32 */
390
0
#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
391
0
#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
392
0
#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
393
0
#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
394
0
#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
395
0
#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
396
#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
397
#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
398
#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
399
/* XXH64 */
400
0
#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
401
0
#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
402
0
#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
403
0
#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
404
0
#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
405
0
#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
406
#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
407
#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
408
#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
409
/* XXH3_64bits */
410
2.38M
#  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
411
1.47k
#  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
412
0
#  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
413
#  define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
414
5.86k
#  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
415
5.86k
#  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
416
#  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
417
1.16M
#  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
418
0
#  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
419
0
#  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
420
0
#  define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
421
19.8k
#  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
422
8.20k
#  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
423
#  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
424
#  define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
425
/* XXH3_128bits */
426
0
#  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
427
4
#  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
428
25.8k
#  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
429
0
#  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
430
#  define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
431
#  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
432
#  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
433
#  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
434
#  define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
435
#  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
436
#  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
437
#  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
438
#  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
439
0
#  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
440
0
#  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
441
#endif
442
443
444
/* *************************************
445
*  Compiler specifics
446
***************************************/
447
448
/* specific declaration modes for Windows */
449
#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
450
#  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
451
#    ifdef XXH_EXPORT
452
#      define XXH_PUBLIC_API __declspec(dllexport)
453
#    elif XXH_IMPORT
454
#      define XXH_PUBLIC_API __declspec(dllimport)
455
#    endif
456
#  else
457
#    define XXH_PUBLIC_API   /* do nothing */
458
#  endif
459
#endif
460
461
#if defined (__GNUC__)
462
# define XXH_CONSTF  __attribute__((const))
463
# define XXH_PUREF   __attribute__((pure))
464
# define XXH_MALLOCF __attribute__((malloc))
465
#else
466
# define XXH_CONSTF  /* disable */
467
# define XXH_PUREF
468
# define XXH_MALLOCF
469
#endif
470
471
/* *************************************
472
*  Version
473
***************************************/
474
0
#define XXH_VERSION_MAJOR    0
475
0
#define XXH_VERSION_MINOR    8
476
0
#define XXH_VERSION_RELEASE  1
477
/*! @brief Version number, encoded as two digits each */
478
0
#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
479
480
/*!
481
 * @brief Obtains the xxHash version.
482
 *
483
 * This is mostly useful when xxHash is compiled as a shared library,
484
 * since the returned value comes from the library, as opposed to header file.
485
 *
486
 * @return @ref XXH_VERSION_NUMBER of the invoked library.
487
 */
488
XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
489
490
491
/* ****************************
492
*  Common basic types
493
******************************/
494
#include <stddef.h>   /* size_t */
495
/*!
496
 * @brief Exit code for the streaming API.
497
 */
498
typedef enum {
499
    XXH_OK = 0, /*!< OK */
500
    XXH_ERROR   /*!< Error */
501
} XXH_errorcode;
502
503
504
/*-**********************************************************************
505
*  32-bit hash
506
************************************************************************/
507
#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
508
/*!
509
 * @brief An unsigned 32-bit integer.
510
 *
511
 * Not necessarily defined to `uint32_t` but functionally equivalent.
512
 */
513
typedef uint32_t XXH32_hash_t;
514
515
#elif !defined (__VMS) \
516
  && (defined (__cplusplus) \
517
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
518
#   include <stdint.h>
519
    typedef uint32_t XXH32_hash_t;
520
521
#else
522
#   include <limits.h>
523
#   if UINT_MAX == 0xFFFFFFFFUL
524
      typedef unsigned int XXH32_hash_t;
525
#   elif ULONG_MAX == 0xFFFFFFFFUL
526
      typedef unsigned long XXH32_hash_t;
527
#   else
528
#     error "unsupported platform: need a 32-bit type"
529
#   endif
530
#endif
531
532
/*!
533
 * @}
534
 *
535
 * @defgroup XXH32_family XXH32 family
536
 * @ingroup public
537
 * Contains functions used in the classic 32-bit xxHash algorithm.
538
 *
539
 * @note
540
 *   XXH32 is useful for older platforms, with no or poor 64-bit performance.
541
 *   Note that the @ref XXH3_family provides competitive speed for both 32-bit
542
 *   and 64-bit systems, and offers true 64/128 bit hash results.
543
 *
544
 * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
545
 * @see @ref XXH32_impl for implementation details
546
 * @{
547
 */
548
549
/*!
550
 * @brief Calculates the 32-bit hash of @p input using xxHash32.
551
 *
552
 * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
553
 *
554
 * See @ref single_shot_example "Single Shot Example" for an example.
555
 *
556
 * @param input The block of data to be hashed, at least @p length bytes in size.
557
 * @param length The length of @p input, in bytes.
558
 * @param seed The 32-bit seed to alter the hash's output predictably.
559
 *
560
 * @pre
561
 *   The memory between @p input and @p input + @p length must be valid,
562
 *   readable, contiguous memory. However, if @p length is `0`, @p input may be
563
 *   `NULL`. In C++, this also must be *TriviallyCopyable*.
564
 *
565
 * @return The calculated 32-bit hash value.
566
 *
567
 * @see
568
 *    XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
569
 *    Direct equivalents for the other variants of xxHash.
570
 * @see
571
 *    XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
572
 */
573
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
574
575
#ifndef XXH_NO_STREAM
576
/*!
577
 * Streaming functions generate the xxHash value from an incremental input.
578
 * This method is slower than single-call functions, due to state management.
579
 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
580
 *
581
 * An XXH state must first be allocated using `XXH*_createState()`.
582
 *
583
 * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
584
 *
585
 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
586
 *
587
 * The function returns an error code, with 0 meaning OK, and any other value
588
 * meaning there is an error.
589
 *
590
 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
591
 * This function returns the nn-bits hash as an int or long long.
592
 *
593
 * It's still possible to continue inserting input into the hash state after a
594
 * digest, and generate new hash values later on by invoking `XXH*_digest()`.
595
 *
596
 * When done, release the state using `XXH*_freeState()`.
597
 *
598
 * @see streaming_example at the top of @ref xxhash.h for an example.
599
 */
600
601
/*!
602
 * @typedef struct XXH32_state_s XXH32_state_t
603
 * @brief The opaque state struct for the XXH32 streaming API.
604
 *
605
 * @see XXH32_state_s for details.
606
 */
607
typedef struct XXH32_state_s XXH32_state_t;
608
609
/*!
610
 * @brief Allocates an @ref XXH32_state_t.
611
 *
612
 * Must be freed with XXH32_freeState().
613
 * @return An allocated XXH32_state_t on success, `NULL` on failure.
614
 */
615
XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
616
/*!
617
 * @brief Frees an @ref XXH32_state_t.
618
 *
619
 * Must be allocated with XXH32_createState().
620
 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
621
 * @return XXH_OK.
622
 */
623
XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
624
/*!
625
 * @brief Copies one @ref XXH32_state_t to another.
626
 *
627
 * @param dst_state The state to copy to.
628
 * @param src_state The state to copy from.
629
 * @pre
630
 *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
631
 */
632
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
633
634
/*!
635
 * @brief Resets an @ref XXH32_state_t to begin a new hash.
636
 *
637
 * This function resets and seeds a state. Call it before @ref XXH32_update().
638
 *
639
 * @param statePtr The state struct to reset.
640
 * @param seed The 32-bit seed to alter the hash result predictably.
641
 *
642
 * @pre
643
 *   @p statePtr must not be `NULL`.
644
 *
645
 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
646
 */
647
XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
648
649
/*!
650
 * @brief Consumes a block of @p input to an @ref XXH32_state_t.
651
 *
652
 * Call this to incrementally consume blocks of data.
653
 *
654
 * @param statePtr The state struct to update.
655
 * @param input The block of data to be hashed, at least @p length bytes in size.
656
 * @param length The length of @p input, in bytes.
657
 *
658
 * @pre
659
 *   @p statePtr must not be `NULL`.
660
 * @pre
661
 *   The memory between @p input and @p input + @p length must be valid,
662
 *   readable, contiguous memory. However, if @p length is `0`, @p input may be
663
 *   `NULL`. In C++, this also must be *TriviallyCopyable*.
664
 *
665
 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
666
 */
667
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
668
669
/*!
670
 * @brief Returns the calculated hash value from an @ref XXH32_state_t.
671
 *
672
 * @note
673
 *   Calling XXH32_digest() will not affect @p statePtr, so you can update,
674
 *   digest, and update again.
675
 *
676
 * @param statePtr The state struct to calculate the hash from.
677
 *
678
 * @pre
679
 *  @p statePtr must not be `NULL`.
680
 *
681
 * @return The calculated xxHash32 value from that state.
682
 */
683
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
684
#endif /* !XXH_NO_STREAM */
685
686
/*******   Canonical representation   *******/
687
688
/*
689
 * The default return values from XXH functions are unsigned 32 and 64 bit
690
 * integers.
691
 * This the simplest and fastest format for further post-processing.
692
 *
693
 * However, this leaves open the question of what is the order on the byte level,
694
 * since little and big endian conventions will store the same number differently.
695
 *
696
 * The canonical representation settles this issue by mandating big-endian
697
 * convention, the same convention as human-readable numbers (large digits first).
698
 *
699
 * When writing hash values to storage, sending them over a network, or printing
700
 * them, it's highly recommended to use the canonical representation to ensure
701
 * portability across a wider range of systems, present and future.
702
 *
703
 * The following functions allow transformation of hash values to and from
704
 * canonical format.
705
 */
706
707
/*!
708
 * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
709
 */
710
typedef struct {
711
    unsigned char digest[4]; /*!< Hash bytes, big endian */
712
} XXH32_canonical_t;
713
714
/*!
715
 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
716
 *
717
 * @param dst The @ref XXH32_canonical_t pointer to be stored to.
718
 * @param hash The @ref XXH32_hash_t to be converted.
719
 *
720
 * @pre
721
 *   @p dst must not be `NULL`.
722
 */
723
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
724
725
/*!
726
 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
727
 *
728
 * @param src The @ref XXH32_canonical_t to convert.
729
 *
730
 * @pre
731
 *   @p src must not be `NULL`.
732
 *
733
 * @return The converted hash.
734
 */
735
XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
736
737
738
#ifdef __has_attribute
739
# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
740
#else
741
# define XXH_HAS_ATTRIBUTE(x) 0
742
#endif
743
744
/* C-language Attributes are added in C23. */
745
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
746
# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
747
#else
748
# define XXH_HAS_C_ATTRIBUTE(x) 0
749
#endif
750
751
#if defined(__cplusplus) && defined(__has_cpp_attribute)
752
# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
753
#else
754
# define XXH_HAS_CPP_ATTRIBUTE(x) 0
755
#endif
756
757
/*
758
 * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
759
 * introduced in CPP17 and C23.
760
 * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
761
 * C23   : https://en.cppreference.com/w/c/language/attributes/fallthrough
762
 */
763
#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
764
0
# define XXH_FALLTHROUGH [[fallthrough]]
765
#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
766
# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
767
#else
768
# define XXH_FALLTHROUGH /* fallthrough */
769
#endif
770
771
/*
772
 * Define XXH_NOESCAPE for annotated pointers in public API.
773
 * https://clang.llvm.org/docs/AttributeReference.html#noescape
774
 * As of writing this, only supported by clang.
775
 */
776
#if XXH_HAS_ATTRIBUTE(noescape)
777
# define XXH_NOESCAPE __attribute__((noescape))
778
#else
779
# define XXH_NOESCAPE
780
#endif
781
782
783
/*!
784
 * @}
785
 * @ingroup public
786
 * @{
787
 */
788
789
#ifndef XXH_NO_LONG_LONG
790
/*-**********************************************************************
791
*  64-bit hash
792
************************************************************************/
793
#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
794
/*!
795
 * @brief An unsigned 64-bit integer.
796
 *
797
 * Not necessarily defined to `uint64_t` but functionally equivalent.
798
 */
799
typedef uint64_t XXH64_hash_t;
800
#elif !defined (__VMS) \
801
  && (defined (__cplusplus) \
802
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
803
#  include <stdint.h>
804
   typedef uint64_t XXH64_hash_t;
805
#else
806
#  include <limits.h>
807
#  if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
808
     /* LP64 ABI says uint64_t is unsigned long */
809
     typedef unsigned long XXH64_hash_t;
810
#  else
811
     /* the following type must have a width of 64-bit */
812
     typedef unsigned long long XXH64_hash_t;
813
#  endif
814
#endif
815
816
/*!
817
 * @}
818
 *
819
 * @defgroup XXH64_family XXH64 family
820
 * @ingroup public
821
 * @{
822
 * Contains functions used in the classic 64-bit xxHash algorithm.
823
 *
824
 * @note
825
 *   XXH3 provides competitive speed for both 32-bit and 64-bit systems,
826
 *   and offers true 64/128 bit hash results.
827
 *   It provides better speed for systems with vector processing capabilities.
828
 */
829
830
/*!
831
 * @brief Calculates the 64-bit hash of @p input using xxHash64.
832
 *
833
 * This function usually runs faster on 64-bit systems, but slower on 32-bit
834
 * systems (see benchmark).
835
 *
836
 * @param input The block of data to be hashed, at least @p length bytes in size.
837
 * @param length The length of @p input, in bytes.
838
 * @param seed The 64-bit seed to alter the hash's output predictably.
839
 *
840
 * @pre
841
 *   The memory between @p input and @p input + @p length must be valid,
842
 *   readable, contiguous memory. However, if @p length is `0`, @p input may be
843
 *   `NULL`. In C++, this also must be *TriviallyCopyable*.
844
 *
845
 * @return The calculated 64-bit hash.
846
 *
847
 * @see
848
 *    XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
849
 *    Direct equivalents for the other variants of xxHash.
850
 * @see
851
 *    XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
852
 */
853
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
854
855
/*******   Streaming   *******/
856
#ifndef XXH_NO_STREAM
857
/*!
858
 * @brief The opaque state struct for the XXH64 streaming API.
859
 *
860
 * @see XXH64_state_s for details.
861
 */
862
typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
863
XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
864
XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
865
XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
866
867
XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
868
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
869
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
870
#endif /* !XXH_NO_STREAM */
871
/*******   Canonical representation   *******/
872
typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
873
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
874
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
875
876
#ifndef XXH_NO_XXH3
877
878
/*!
879
 * @}
880
 * ************************************************************************
881
 * @defgroup XXH3_family XXH3 family
882
 * @ingroup public
883
 * @{
884
 *
885
 * XXH3 is a more recent hash algorithm featuring:
886
 *  - Improved speed for both small and large inputs
887
 *  - True 64-bit and 128-bit outputs
888
 *  - SIMD acceleration
889
 *  - Improved 32-bit viability
890
 *
891
 * Speed analysis methodology is explained here:
892
 *
893
 *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
894
 *
895
 * Compared to XXH64, expect XXH3 to run approximately
896
 * ~2x faster on large inputs and >3x faster on small ones,
897
 * exact differences vary depending on platform.
898
 *
899
 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
900
 * but does not require it.
901
 * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
902
 * at competitive speeds, even without vector support. Further details are
903
 * explained in the implementation.
904
 *
905
 * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
906
 * ZVector and scalar targets. This can be controlled via the @ref XXH_VECTOR
907
 * macro. For the x86 family, an automatic dispatcher is included separately
908
 * in @ref xxh_x86dispatch.c.
909
 *
910
 * XXH3 implementation is portable:
911
 * it has a generic C90 formulation that can be compiled on any platform,
912
 * all implementations generage exactly the same hash value on all platforms.
913
 * Starting from v0.8.0, it's also labelled "stable", meaning that
914
 * any future version will also generate the same hash value.
915
 *
916
 * XXH3 offers 2 variants, _64bits and _128bits.
917
 *
918
 * When only 64 bits are needed, prefer invoking the _64bits variant, as it
919
 * reduces the amount of mixing, resulting in faster speed on small inputs.
920
 * It's also generally simpler to manipulate a scalar return type than a struct.
921
 *
922
 * The API supports one-shot hashing, streaming mode, and custom secrets.
923
 */
924
/*-**********************************************************************
925
*  XXH3 64-bit variant
926
************************************************************************/
927
928
/*!
929
 * @brief 64-bit unseeded variant of XXH3.
930
 *
931
 * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however
932
 * it may have slightly better performance due to constant propagation of the
933
 * defaults.
934
 *
935
 * @see
936
 *    XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms
937
 * @see
938
 *    XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
939
 * @see
940
 *    XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version.
941
 */
942
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
943
944
/*!
945
 * @brief 64-bit seeded variant of XXH3
946
 *
947
 * This variant generates a custom secret on the fly based on default secret
948
 * altered using the `seed` value.
949
 *
950
 * While this operation is decently fast, note that it's not completely free.
951
 *
952
 * @note
953
 *    seed == 0 produces the same results as @ref XXH3_64bits().
954
 *
955
 * @param input The data to hash
956
 * @param length The length
957
 * @param seed The 64-bit seed to alter the state.
958
 */
959
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
960
961
/*!
962
 * The bare minimum size for a custom secret.
963
 *
964
 * @see
965
 *  XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
966
 *  XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
967
 */
968
199k
#define XXH3_SECRET_SIZE_MIN 136
969
970
/*!
971
 * @brief 64-bit variant of XXH3 with a custom "secret".
972
 *
973
 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
974
 * This makes it more difficult for an external actor to prepare an intentional collision.
975
 * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
976
 * However, the quality of the secret impacts the dispersion of the hash algorithm.
977
 * Therefore, the secret _must_ look like a bunch of random bytes.
978
 * Avoid "trivial" or structured data such as repeated sequences or a text document.
979
 * Whenever in doubt about the "randomness" of the blob of bytes,
980
 * consider employing "XXH3_generateSecret()" instead (see below).
981
 * It will generate a proper high entropy secret derived from the blob of bytes.
982
 * Another advantage of using XXH3_generateSecret() is that
983
 * it guarantees that all bits within the initial blob of bytes
984
 * will impact every bit of the output.
985
 * This is not necessarily the case when using the blob of bytes directly
986
 * because, when hashing _small_ inputs, only a portion of the secret is employed.
987
 */
988
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
989
990
991
/*******   Streaming   *******/
992
#ifndef XXH_NO_STREAM
993
/*
994
 * Streaming requires state maintenance.
995
 * This operation costs memory and CPU.
996
 * As a consequence, streaming is slower than one-shot hashing.
997
 * For better performance, prefer one-shot functions whenever applicable.
998
 */
999
1000
/*!
1001
 * @brief The state struct for the XXH3 streaming API.
1002
 *
1003
 * @see XXH3_state_s for details.
1004
 */
1005
typedef struct XXH3_state_s XXH3_state_t;
1006
XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
1007
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
1008
XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
1009
1010
/*
1011
 * XXH3_64bits_reset():
1012
 * Initialize with default parameters.
1013
 * digest will be equivalent to `XXH3_64bits()`.
1014
 */
1015
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1016
/*
1017
 * XXH3_64bits_reset_withSeed():
1018
 * Generate a custom secret from `seed`, and store it into `statePtr`.
1019
 * digest will be equivalent to `XXH3_64bits_withSeed()`.
1020
 */
1021
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1022
/*!
1023
 * XXH3_64bits_reset_withSecret():
1024
 * `secret` is referenced, it _must outlive_ the hash streaming session.
1025
 * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
1026
 * and the quality of produced hash values depends on secret's entropy
1027
 * (secret's content should look like a bunch of random bytes).
1028
 * When in doubt about the randomness of a candidate `secret`,
1029
 * consider employing `XXH3_generateSecret()` instead (see below).
1030
 */
1031
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1032
1033
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1034
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t  XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1035
#endif /* !XXH_NO_STREAM */
1036
1037
/* note : canonical representation of XXH3 is the same as XXH64
1038
 * since they both produce XXH64_hash_t values */
1039
1040
1041
/*-**********************************************************************
1042
*  XXH3 128-bit variant
1043
************************************************************************/
1044
1045
/*!
1046
 * @brief The return value from 128-bit hashes.
1047
 *
1048
 * Stored in little endian order, although the fields themselves are in native
1049
 * endianness.
1050
 */
1051
typedef struct {
1052
    XXH64_hash_t low64;   /*!< `value & 0xFFFFFFFFFFFFFFFF` */
1053
    XXH64_hash_t high64;  /*!< `value >> 64` */
1054
} XXH128_hash_t;
1055
1056
/*!
1057
 * @brief Unseeded 128-bit variant of XXH3
1058
 *
1059
 * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
1060
 * for shorter inputs.
1061
 *
1062
 * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however
1063
 * it may have slightly better performance due to constant propagation of the
1064
 * defaults.
1065
 *
1066
 * @see
1067
 *    XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms
1068
 * @see
1069
 *    XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
1070
 * @see
1071
 *    XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version.
1072
 */
1073
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
1074
/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */
1075
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1076
/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */
1077
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
1078
1079
/*******   Streaming   *******/
1080
#ifndef XXH_NO_STREAM
1081
/*
1082
 * Streaming requires state maintenance.
1083
 * This operation costs memory and CPU.
1084
 * As a consequence, streaming is slower than one-shot hashing.
1085
 * For better performance, prefer one-shot functions whenever applicable.
1086
 *
1087
 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
1088
 * Use already declared XXH3_createState() and XXH3_freeState().
1089
 *
1090
 * All reset and streaming functions have same meaning as their 64-bit counterpart.
1091
 */
1092
1093
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1094
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1095
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1096
1097
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1098
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1099
#endif /* !XXH_NO_STREAM */
1100
1101
/* Following helper functions make it possible to compare XXH128_hast_t values.
1102
 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
1103
 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
1104
1105
/*!
1106
 * XXH128_isEqual():
1107
 * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
1108
 */
1109
XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
1110
1111
/*!
1112
 * @brief Compares two @ref XXH128_hash_t
1113
 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
1114
 *
1115
 * @return: >0 if *h128_1  > *h128_2
1116
 *          =0 if *h128_1 == *h128_2
1117
 *          <0 if *h128_1  < *h128_2
1118
 */
1119
XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
1120
1121
1122
/*******   Canonical representation   *******/
1123
typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
1124
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
1125
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
1126
1127
1128
#endif  /* !XXH_NO_XXH3 */
1129
#endif  /* XXH_NO_LONG_LONG */
1130
1131
/*!
1132
 * @}
1133
 */
1134
#endif /* XXHASH_H_5627135585666179 */
1135
1136
1137
1138
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
1139
#define XXHASH_H_STATIC_13879238742
1140
/* ****************************************************************************
1141
 * This section contains declarations which are not guaranteed to remain stable.
1142
 * They may change in future versions, becoming incompatible with a different
1143
 * version of the library.
1144
 * These declarations should only be used with static linking.
1145
 * Never use them in association with dynamic linking!
1146
 ***************************************************************************** */
1147
1148
/*
1149
 * These definitions are only present to allow static allocation
1150
 * of XXH states, on stack or in a struct, for example.
1151
 * Never **ever** access their members directly.
1152
 */
1153
1154
/*!
1155
 * @internal
1156
 * @brief Structure for XXH32 streaming API.
1157
 *
1158
 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1159
 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1160
 * an opaque type. This allows fields to safely be changed.
1161
 *
1162
 * Typedef'd to @ref XXH32_state_t.
1163
 * Do not access the members of this struct directly.
1164
 * @see XXH64_state_s, XXH3_state_s
1165
 */
1166
struct XXH32_state_s {
1167
   XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
1168
   XXH32_hash_t large_len;    /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
1169
   XXH32_hash_t v[4];         /*!< Accumulator lanes */
1170
   XXH32_hash_t mem32[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
1171
   XXH32_hash_t memsize;      /*!< Amount of data in @ref mem32 */
1172
   XXH32_hash_t reserved;     /*!< Reserved field. Do not read nor write to it. */
1173
};   /* typedef'd to XXH32_state_t */
1174
1175
1176
#ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
1177
1178
/*!
1179
 * @internal
1180
 * @brief Structure for XXH64 streaming API.
1181
 *
1182
 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1183
 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1184
 * an opaque type. This allows fields to safely be changed.
1185
 *
1186
 * Typedef'd to @ref XXH64_state_t.
1187
 * Do not access the members of this struct directly.
1188
 * @see XXH32_state_s, XXH3_state_s
1189
 */
1190
struct XXH64_state_s {
1191
   XXH64_hash_t total_len;    /*!< Total length hashed. This is always 64-bit. */
1192
   XXH64_hash_t v[4];         /*!< Accumulator lanes */
1193
   XXH64_hash_t mem64[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
1194
   XXH32_hash_t memsize;      /*!< Amount of data in @ref mem64 */
1195
   XXH32_hash_t reserved32;   /*!< Reserved field, needed for padding anyways*/
1196
   XXH64_hash_t reserved64;   /*!< Reserved field. Do not read or write to it. */
1197
};   /* typedef'd to XXH64_state_t */
1198
1199
#ifndef XXH_NO_XXH3
1200
1201
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1202
#  include <stdalign.h>
1203
#  define XXH_ALIGN(n)      alignas(n)
1204
#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1205
/* In C++ alignas() is a keyword */
1206
233k
#  define XXH_ALIGN(n)      alignas(n)
1207
#elif defined(__GNUC__)
1208
#  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
1209
#elif defined(_MSC_VER)
1210
#  define XXH_ALIGN(n)      __declspec(align(n))
1211
#else
1212
#  define XXH_ALIGN(n)   /* disabled */
1213
#endif
1214
1215
/* Old GCC versions only accept the attribute after the type in structures. */
1216
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
1217
    && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1218
    && defined(__GNUC__)
1219
#   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1220
#else
1221
#   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1222
#endif
1223
1224
/*!
1225
 * @brief The size of the internal XXH3 buffer.
1226
 *
1227
 * This is the optimal update size for incremental hashing.
1228
 *
1229
 * @see XXH3_64b_update(), XXH3_128b_update().
1230
 */
1231
51.3k
#define XXH3_INTERNALBUFFER_SIZE 256
1232
1233
/*!
1234
 * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1235
 *
1236
 * This is the size used in @ref XXH3_kSecret and the seeded functions.
1237
 *
1238
 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1239
 */
1240
#define XXH3_SECRET_DEFAULT_SIZE 192
1241
1242
/*!
1243
 * @internal
1244
 * @brief Structure for XXH3 streaming API.
1245
 *
1246
 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1247
 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1248
 * Otherwise it is an opaque type.
1249
 * Never use this definition in combination with dynamic library.
1250
 * This allows fields to safely be changed in the future.
1251
 *
1252
 * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1253
 * Do not allocate this with `malloc()` or `new`,
1254
 * it will not be sufficiently aligned.
1255
 * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1256
 *
1257
 * Typedef'd to @ref XXH3_state_t.
1258
 * Do never access the members of this struct directly.
1259
 *
1260
 * @see XXH3_INITSTATE() for stack initialization.
1261
 * @see XXH3_createState(), XXH3_freeState().
1262
 * @see XXH32_state_s, XXH64_state_s
1263
 */
1264
struct XXH3_state_s {
1265
   XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1266
       /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
1267
   XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1268
       /*!< Used to store a custom secret generated from a seed. */
1269
   XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1270
       /*!< The internal buffer. @see XXH32_state_s::mem32 */
1271
   XXH32_hash_t bufferedSize;
1272
       /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1273
   XXH32_hash_t useSeed;
1274
       /*!< Reserved field. Needed for padding on 64-bit. */
1275
   size_t nbStripesSoFar;
1276
       /*!< Number or stripes processed. */
1277
   XXH64_hash_t totalLen;
1278
       /*!< Total length hashed. 64-bit even on 32-bit targets. */
1279
   size_t nbStripesPerBlock;
1280
       /*!< Number of stripes per block. */
1281
   size_t secretLimit;
1282
       /*!< Size of @ref customSecret or @ref extSecret */
1283
   XXH64_hash_t seed;
1284
       /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1285
   XXH64_hash_t reserved64;
1286
       /*!< Reserved field. */
1287
   const unsigned char* extSecret;
1288
       /*!< Reference to an external secret for the _withSecret variants, NULL
1289
        *   for other variants. */
1290
   /* note: there may be some padding at the end due to alignment on 64 bytes */
1291
}; /* typedef'd to XXH3_state_t */
1292
1293
#undef XXH_ALIGN_MEMBER
1294
1295
/*!
1296
 * @brief Initializes a stack-allocated `XXH3_state_s`.
1297
 *
1298
 * When the @ref XXH3_state_t structure is merely emplaced on stack,
1299
 * it should be initialized with XXH3_INITSTATE() or a memset()
1300
 * in case its first reset uses XXH3_NNbits_reset_withSeed().
1301
 * This init can be omitted if the first reset uses default or _withSecret mode.
1302
 * This operation isn't necessary when the state is created with XXH3_createState().
1303
 * Note that this doesn't prepare the state for a streaming operation,
1304
 * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1305
 */
1306
5.86k
#define XXH3_INITSTATE(XXH3_state_ptr)   do { (XXH3_state_ptr)->seed = 0; } while (0)
1307
1308
1309
/*!
1310
 * simple alias to pre-selected XXH3_128bits variant
1311
 */
1312
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1313
1314
1315
/* ===   Experimental API   === */
1316
/* Symbols defined below must be considered tied to a specific library version. */
1317
1318
/*!
1319
 * XXH3_generateSecret():
1320
 *
1321
 * Derive a high-entropy secret from any user-defined content, named customSeed.
1322
 * The generated secret can be used in combination with `*_withSecret()` functions.
1323
 * The `_withSecret()` variants are useful to provide a higher level of protection
1324
 * than 64-bit seed, as it becomes much more difficult for an external actor to
1325
 * guess how to impact the calculation logic.
1326
 *
1327
 * The function accepts as input a custom seed of any length and any content,
1328
 * and derives from it a high-entropy secret of length @p secretSize into an
1329
 * already allocated buffer @p secretBuffer.
1330
 *
1331
 * The generated secret can then be used with any `*_withSecret()` variant.
1332
 * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
1333
 * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
1334
 * are part of this list. They all accept a `secret` parameter
1335
 * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
1336
 * _and_ feature very high entropy (consist of random-looking bytes).
1337
 * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
1338
 * be employed to ensure proper quality.
1339
 *
1340
 * @p customSeed can be anything. It can have any size, even small ones,
1341
 * and its content can be anything, even "poor entropy" sources such as a bunch
1342
 * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
1343
 *
1344
 * @pre
1345
 *   - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
1346
 *   - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1347
 *
1348
 * Example code:
1349
 * @code{.c}
1350
 *    #include <stdio.h>
1351
 *    #include <stdlib.h>
1352
 *    #include <string.h>
1353
 *    #define XXH_STATIC_LINKING_ONLY // expose unstable API
1354
 *    #include "xxhash.h"
1355
 *    // Hashes argv[2] using the entropy from argv[1].
1356
 *    int main(int argc, char* argv[])
1357
 *    {
1358
 *        char secret[XXH3_SECRET_SIZE_MIN];
1359
 *        if (argv != 3) { return 1; }
1360
 *        XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
1361
 *        XXH64_hash_t h = XXH3_64bits_withSecret(
1362
 *             argv[2], strlen(argv[2]),
1363
 *             secret, sizeof(secret)
1364
 *        );
1365
 *        printf("%016llx\n", (unsigned long long) h);
1366
 *    }
1367
 * @endcode
1368
 */
1369
XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
1370
1371
/*!
1372
 * @brief Generate the same secret as the _withSeed() variants.
1373
 *
1374
 * The generated secret can be used in combination with
1375
 *`*_withSecret()` and `_withSecretandSeed()` variants.
1376
 *
1377
 * Example C++ `std::string` hash class:
1378
 * @code{.cpp}
1379
 *    #include <string>
1380
 *    #define XXH_STATIC_LINKING_ONLY // expose unstable API
1381
 *    #include "xxhash.h"
1382
 *    // Slow, seeds each time
1383
 *    class HashSlow {
1384
 *        XXH64_hash_t seed;
1385
 *    public:
1386
 *        HashSlow(XXH64_hash_t s) : seed{s} {}
1387
 *        size_t operator()(const std::string& x) const {
1388
 *            return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
1389
 *        }
1390
 *    };
1391
 *    // Fast, caches the seeded secret for future uses.
1392
 *    class HashFast {
1393
 *        unsigned char secret[XXH3_SECRET_SIZE_MIN];
1394
 *    public:
1395
 *        HashFast(XXH64_hash_t s) {
1396
 *            XXH3_generateSecret_fromSeed(secret, seed);
1397
 *        }
1398
 *        size_t operator()(const std::string& x) const {
1399
 *            return size_t{
1400
 *                XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
1401
 *            };
1402
 *        }
1403
 *    };
1404
 * @endcode
1405
 * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
1406
 * @param seed The seed to seed the state.
1407
 */
1408
XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
1409
1410
/*!
1411
 * These variants generate hash values using either
1412
 * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1413
 * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1414
 *
1415
 * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1416
 * `_withSeed()` has to generate the secret on the fly for "large" keys.
1417
 * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1418
 * `_withSecret()` has to generate the masks on the fly for "small" keys,
1419
 * which requires more instructions than _withSeed() variants.
1420
 * Therefore, _withSecretandSeed variant combines the best of both worlds.
1421
 *
1422
 * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
1423
 * this variant produces *exactly* the same results as `_withSeed()` variant,
1424
 * hence offering only a pure speed benefit on "large" input,
1425
 * by skipping the need to regenerate the secret for every large input.
1426
 *
1427
 * Another usage scenario is to hash the secret to a 64-bit hash value,
1428
 * for example with XXH3_64bits(), which then becomes the seed,
1429
 * and then employ both the seed and the secret in _withSecretandSeed().
1430
 * On top of speed, an added benefit is that each bit in the secret
1431
 * has a 50% chance to swap each bit in the output, via its impact to the seed.
1432
 *
1433
 * This is not guaranteed when using the secret directly in "small data" scenarios,
1434
 * because only portions of the secret are employed for small data.
1435
 */
1436
XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
1437
XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
1438
                              XXH_NOESCAPE const void* secret, size_t secretSize,
1439
                              XXH64_hash_t seed);
1440
/*! @copydoc XXH3_64bits_withSecretandSeed() */
1441
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
1442
XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
1443
                               XXH_NOESCAPE const void* secret, size_t secretSize,
1444
                               XXH64_hash_t seed64);
1445
#ifndef XXH_NO_STREAM
1446
/*! @copydoc XXH3_64bits_withSecretandSeed() */
1447
XXH_PUBLIC_API XXH_errorcode
1448
XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1449
                                    XXH_NOESCAPE const void* secret, size_t secretSize,
1450
                                    XXH64_hash_t seed64);
1451
/*! @copydoc XXH3_64bits_withSecretandSeed() */
1452
XXH_PUBLIC_API XXH_errorcode
1453
XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1454
                                     XXH_NOESCAPE const void* secret, size_t secretSize,
1455
                                     XXH64_hash_t seed64);
1456
#endif /* !XXH_NO_STREAM */
1457
1458
#endif  /* !XXH_NO_XXH3 */
1459
#endif  /* XXH_NO_LONG_LONG */
1460
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1461
#  define XXH_IMPLEMENTATION
1462
#endif
1463
1464
#endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1465
1466
1467
/* ======================================================================== */
1468
/* ======================================================================== */
1469
/* ======================================================================== */
1470
1471
1472
/*-**********************************************************************
1473
 * xxHash implementation
1474
 *-**********************************************************************
1475
 * xxHash's implementation used to be hosted inside xxhash.c.
1476
 *
1477
 * However, inlining requires implementation to be visible to the compiler,
1478
 * hence be included alongside the header.
1479
 * Previously, implementation was hosted inside xxhash.c,
1480
 * which was then #included when inlining was activated.
1481
 * This construction created issues with a few build and install systems,
1482
 * as it required xxhash.c to be stored in /include directory.
1483
 *
1484
 * xxHash implementation is now directly integrated within xxhash.h.
1485
 * As a consequence, xxhash.c is no longer needed in /include.
1486
 *
1487
 * xxhash.c is still available and is still useful.
1488
 * In a "normal" setup, when xxhash is not inlined,
1489
 * xxhash.h only exposes the prototypes and public symbols,
1490
 * while xxhash.c can be built into an object file xxhash.o
1491
 * which can then be linked into the final binary.
1492
 ************************************************************************/
1493
1494
#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1495
   || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1496
#  define XXH_IMPLEM_13a8737387
1497
1498
/* *************************************
1499
*  Tuning parameters
1500
***************************************/
1501
1502
/*!
1503
 * @defgroup tuning Tuning parameters
1504
 * @{
1505
 *
1506
 * Various macros to control xxHash's behavior.
1507
 */
1508
#ifdef XXH_DOXYGEN
1509
/*!
1510
 * @brief Define this to disable 64-bit code.
1511
 *
1512
 * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
1513
 */
1514
#  define XXH_NO_LONG_LONG
1515
#  undef XXH_NO_LONG_LONG /* don't actually */
1516
/*!
1517
 * @brief Controls how unaligned memory is accessed.
1518
 *
1519
 * By default, access to unaligned memory is controlled by `memcpy()`, which is
1520
 * safe and portable.
1521
 *
1522
 * Unfortunately, on some target/compiler combinations, the generated assembly
1523
 * is sub-optimal.
1524
 *
1525
 * The below switch allow selection of a different access method
1526
 * in the search for improved performance.
1527
 *
1528
 * @par Possible options:
1529
 *
1530
 *  - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1531
 *   @par
1532
 *     Use `memcpy()`. Safe and portable. Note that most modern compilers will
1533
 *     eliminate the function call and treat it as an unaligned access.
1534
 *
1535
 *  - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
1536
 *   @par
1537
 *     Depends on compiler extensions and is therefore not portable.
1538
 *     This method is safe _if_ your compiler supports it,
1539
 *     and *generally* as fast or faster than `memcpy`.
1540
 *
1541
 *  - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1542
 *  @par
1543
 *     Casts directly and dereferences. This method doesn't depend on the
1544
 *     compiler, but it violates the C standard as it directly dereferences an
1545
 *     unaligned pointer. It can generate buggy code on targets which do not
1546
 *     support unaligned memory accesses, but in some circumstances, it's the
1547
 *     only known way to get the most performance.
1548
 *
1549
 *  - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1550
 *  @par
1551
 *     Also portable. This can generate the best code on old compilers which don't
1552
 *     inline small `memcpy()` calls, and it might also be faster on big-endian
1553
 *     systems which lack a native byteswap instruction. However, some compilers
1554
 *     will emit literal byteshifts even if the target supports unaligned access.
1555
 *  .
1556
 *
1557
 * @warning
1558
 *   Methods 1 and 2 rely on implementation-defined behavior. Use these with
1559
 *   care, as what works on one compiler/platform/optimization level may cause
1560
 *   another to read garbage data or even crash.
1561
 *
1562
 * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1563
 *
1564
 * Prefer these methods in priority order (0 > 3 > 1 > 2)
1565
 */
1566
#  define XXH_FORCE_MEMORY_ACCESS 0
1567
1568
/*!
1569
 * @def XXH_SIZE_OPT
1570
 * @brief Controls how much xxHash optimizes for size.
1571
 *
1572
 * xxHash, when compiled, tends to result in a rather large binary size. This
1573
 * is mostly due to heavy usage to forced inlining and constant folding of the
1574
 * @ref XXH3_family to increase performance.
1575
 *
1576
 * However, some developers prefer size over speed. This option can
1577
 * significantly reduce the size of the generated code. When using the `-Os`
1578
 * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
1579
 * otherwise it is defined to 0.
1580
 *
1581
 * Most of these size optimizations can be controlled manually.
1582
 *
1583
 * This is a number from 0-2.
1584
 *  - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
1585
 *    comes first.
1586
 *  - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
1587
 *    conservative and disables hacks that increase code size. It implies the
1588
 *    options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
1589
 *    and @ref XXH3_NEON_LANES == 8 if they are not already defined.
1590
 *  - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
1591
 *    Performance may cry. For example, the single shot functions just use the
1592
 *    streaming API.
1593
 */
1594
#  define XXH_SIZE_OPT 0
1595
1596
/*!
1597
 * @def XXH_FORCE_ALIGN_CHECK
1598
 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1599
 * and XXH64() only).
1600
 *
1601
 * This is an important performance trick for architectures without decent
1602
 * unaligned memory access performance.
1603
 *
1604
 * It checks for input alignment, and when conditions are met, uses a "fast
1605
 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1606
 * faster_ read speed.
1607
 *
1608
 * The check costs one initial branch per hash, which is generally negligible,
1609
 * but not zero.
1610
 *
1611
 * Moreover, it's not useful to generate an additional code path if memory
1612
 * access uses the same instruction for both aligned and unaligned
1613
 * addresses (e.g. x86 and aarch64).
1614
 *
1615
 * In these cases, the alignment check can be removed by setting this macro to 0.
1616
 * Then the code will always use unaligned memory access.
1617
 * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
1618
 * which are platforms known to offer good unaligned memory accesses performance.
1619
 *
1620
 * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
1621
 *
1622
 * This option does not affect XXH3 (only XXH32 and XXH64).
1623
 */
1624
#  define XXH_FORCE_ALIGN_CHECK 0
1625
1626
/*!
1627
 * @def XXH_NO_INLINE_HINTS
1628
 * @brief When non-zero, sets all functions to `static`.
1629
 *
1630
 * By default, xxHash tries to force the compiler to inline almost all internal
1631
 * functions.
1632
 *
1633
 * This can usually improve performance due to reduced jumping and improved
1634
 * constant folding, but significantly increases the size of the binary which
1635
 * might not be favorable.
1636
 *
1637
 * Additionally, sometimes the forced inlining can be detrimental to performance,
1638
 * depending on the architecture.
1639
 *
1640
 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1641
 * compiler full control on whether to inline or not.
1642
 *
1643
 * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
1644
 * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
1645
 */
1646
#  define XXH_NO_INLINE_HINTS 0
1647
1648
/*!
1649
 * @def XXH32_ENDJMP
1650
 * @brief Whether to use a jump for `XXH32_finalize`.
1651
 *
1652
 * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
1653
 * This is generally preferable for performance,
1654
 * but depending on exact architecture, a jmp may be preferable.
1655
 *
1656
 * This setting is only possibly making a difference for very small inputs.
1657
 */
1658
#  define XXH32_ENDJMP 0
1659
1660
/*!
1661
 * @internal
1662
 * @brief Redefines old internal names.
1663
 *
1664
 * For compatibility with code that uses xxHash's internals before the names
1665
 * were changed to improve namespacing. There is no other reason to use this.
1666
 */
1667
#  define XXH_OLD_NAMES
1668
#  undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1669
1670
/*!
1671
 * @def XXH_NO_STREAM
1672
 * @brief Disables the streaming API.
1673
 *
1674
 * When xxHash is not inlined and the streaming functions are not used, disabling
1675
 * the streaming functions can improve code size significantly, especially with
1676
 * the @ref XXH3_family which tends to make constant folded copies of itself.
1677
 */
1678
#  define XXH_NO_STREAM
1679
#  undef XXH_NO_STREAM /* don't actually */
1680
#endif /* XXH_DOXYGEN */
1681
/*!
1682
 * @}
1683
 */
1684
1685
#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
1686
   /* prefer __packed__ structures (method 1) for GCC
1687
    * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
1688
    * which for some reason does unaligned loads. */
1689
#  if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
1690
#    define XXH_FORCE_MEMORY_ACCESS 1
1691
#  endif
1692
#endif
1693
1694
#ifndef XXH_SIZE_OPT
1695
   /* default to 1 for -Os or -Oz */
1696
#  if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
1697
#    define XXH_SIZE_OPT 1
1698
#  else
1699
#    define XXH_SIZE_OPT 0
1700
#  endif
1701
#endif
1702
1703
#ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
1704
   /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
1705
#  if XXH_SIZE_OPT >= 1 || \
1706
      defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
1707
   || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64)    || defined(_M_ARM) \
1708
   || defined(__loongarch64) /* visual */
1709
0
#    define XXH_FORCE_ALIGN_CHECK 0
1710
#  else
1711
#    define XXH_FORCE_ALIGN_CHECK 1
1712
#  endif
1713
#endif
1714
1715
#ifndef XXH_NO_INLINE_HINTS
1716
#  if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__)  /* -O0, -fno-inline */
1717
#    define XXH_NO_INLINE_HINTS 1
1718
#  else
1719
#    define XXH_NO_INLINE_HINTS 0
1720
#  endif
1721
#endif
1722
1723
#ifndef XXH32_ENDJMP
1724
/* generally preferable for performance */
1725
0
#  define XXH32_ENDJMP 0
1726
#endif
1727
1728
/*!
1729
 * @defgroup impl Implementation
1730
 * @{
1731
 */
1732
1733
1734
/* *************************************
1735
*  Includes & Memory related functions
1736
***************************************/
1737
#if defined(XXH_NO_STREAM)
1738
/* nothing */
1739
#elif defined(XXH_NO_STDLIB)
1740
1741
/* When requesting to disable any mention of stdlib,
1742
 * the library loses the ability to invoked malloc / free.
1743
 * In practice, it means that functions like `XXH*_createState()`
1744
 * will always fail, and return NULL.
1745
 * This flag is useful in situations where
1746
 * xxhash.h is integrated into some kernel, embedded or limited environment
1747
 * without access to dynamic allocation.
1748
 */
1749
1750
static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
1751
static void XXH_free(void* p) { (void)p; }
1752
1753
#else
1754
1755
/*
1756
 * Modify the local functions below should you wish to use
1757
 * different memory routines for malloc() and free()
1758
 */
1759
#include <stdlib.h>
1760
1761
/*!
1762
 * @internal
1763
 * @brief Modify this function to use a different routine than malloc().
1764
 */
1765
5.86k
static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
1766
1767
/*!
1768
 * @internal
1769
 * @brief Modify this function to use a different routine than free().
1770
 */
1771
5.86k
static void XXH_free(void* p) { free(p); }
1772
1773
#endif  /* XXH_NO_STDLIB */
1774
1775
#include <string.h>
1776
1777
/*!
1778
 * @internal
1779
 * @brief Modify this function to use a different routine than memcpy().
1780
 */
1781
static void* XXH_memcpy(void* dest, const void* src, size_t size)
1782
61.3k
{
1783
61.3k
    return memcpy(dest,src,size);
1784
61.3k
}
1785
1786
#include <limits.h>   /* ULLONG_MAX */
1787
1788
1789
/* *************************************
1790
*  Compiler Specific Options
1791
***************************************/
1792
#ifdef _MSC_VER /* Visual Studio warning fix */
1793
#  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1794
#endif
1795
1796
#if XXH_NO_INLINE_HINTS  /* disable inlining hints */
1797
#  if defined(__GNUC__) || defined(__clang__)
1798
#    define XXH_FORCE_INLINE static __attribute__((unused))
1799
#  else
1800
#    define XXH_FORCE_INLINE static
1801
#  endif
1802
#  define XXH_NO_INLINE static
1803
/* enable inlining hints */
1804
#elif defined(__GNUC__) || defined(__clang__)
1805
#  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1806
#  define XXH_NO_INLINE static __attribute__((noinline))
1807
#elif defined(_MSC_VER)  /* Visual Studio */
1808
#  define XXH_FORCE_INLINE static __forceinline
1809
#  define XXH_NO_INLINE static __declspec(noinline)
1810
#elif defined (__cplusplus) \
1811
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
1812
#  define XXH_FORCE_INLINE static inline
1813
#  define XXH_NO_INLINE static
1814
#else
1815
#  define XXH_FORCE_INLINE static
1816
#  define XXH_NO_INLINE static
1817
#endif
1818
1819
1820
1821
/* *************************************
1822
*  Debug
1823
***************************************/
1824
/*!
1825
 * @ingroup tuning
1826
 * @def XXH_DEBUGLEVEL
1827
 * @brief Sets the debugging level.
1828
 *
1829
 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1830
 * compiler's command line options. The value must be a number.
1831
 */
1832
#ifndef XXH_DEBUGLEVEL
1833
#  ifdef DEBUGLEVEL /* backwards compat */
1834
#    define XXH_DEBUGLEVEL DEBUGLEVEL
1835
#  else
1836
#    define XXH_DEBUGLEVEL 0
1837
#  endif
1838
#endif
1839
1840
#if (XXH_DEBUGLEVEL>=1)
1841
#  include <assert.h>   /* note: can still be disabled with NDEBUG */
1842
#  define XXH_ASSERT(c)   assert(c)
1843
#else
1844
43.4M
#  define XXH_ASSERT(c)   XXH_ASSUME(c)
1845
#endif
1846
1847
/* note: use after variable declarations */
1848
#ifndef XXH_STATIC_ASSERT
1849
#  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)    /* C11 */
1850
#    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
1851
#  elif defined(__cplusplus) && (__cplusplus >= 201103L)            /* C++11 */
1852
242k
#    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1853
#  else
1854
#    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1855
#  endif
1856
242k
#  define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1857
#endif
1858
1859
/*!
1860
 * @internal
1861
 * @def XXH_COMPILER_GUARD(var)
1862
 * @brief Used to prevent unwanted optimizations for @p var.
1863
 *
1864
 * It uses an empty GCC inline assembly statement with a register constraint
1865
 * which forces @p var into a general purpose register (eg eax, ebx, ecx
1866
 * on x86) and marks it as modified.
1867
 *
1868
 * This is used in a few places to avoid unwanted autovectorization (e.g.
1869
 * XXH32_round()). All vectorization we want is explicit via intrinsics,
1870
 * and _usually_ isn't wanted elsewhere.
1871
 *
1872
 * We also use it to prevent unwanted constant folding for AArch64 in
1873
 * XXH3_initCustomSecret_scalar().
1874
 */
1875
#if defined(__GNUC__) || defined(__clang__)
1876
614k
#  define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1877
#else
1878
#  define XXH_COMPILER_GUARD(var) ((void)0)
1879
#endif
1880
1881
#if defined(__GNUC__) || defined(__clang__)
1882
#  define XXH_COMPILER_GUARD_W(var) __asm__ __volatile__("" : "+w" (var))
1883
#else
1884
#  define XXH_COMPILER_GUARD_W(var) ((void)0)
1885
#endif
1886
1887
/* *************************************
1888
*  Basic Types
1889
***************************************/
1890
#if !defined (__VMS) \
1891
 && (defined (__cplusplus) \
1892
 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1893
# include <stdint.h>
1894
  typedef uint8_t xxh_u8;
1895
#else
1896
  typedef unsigned char xxh_u8;
1897
#endif
1898
typedef XXH32_hash_t xxh_u32;
1899
1900
#ifdef XXH_OLD_NAMES
1901
#  define BYTE xxh_u8
1902
#  define U8   xxh_u8
1903
#  define U32  xxh_u32
1904
#endif
1905
1906
/* ***   Memory access   *** */
1907
1908
/*!
1909
 * @internal
1910
 * @fn xxh_u32 XXH_read32(const void* ptr)
1911
 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1912
 *
1913
 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1914
 *
1915
 * @param ptr The pointer to read from.
1916
 * @return The 32-bit native endian integer from the bytes at @p ptr.
1917
 */
1918
1919
/*!
1920
 * @internal
1921
 * @fn xxh_u32 XXH_readLE32(const void* ptr)
1922
 * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1923
 *
1924
 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1925
 *
1926
 * @param ptr The pointer to read from.
1927
 * @return The 32-bit little endian integer from the bytes at @p ptr.
1928
 */
1929
1930
/*!
1931
 * @internal
1932
 * @fn xxh_u32 XXH_readBE32(const void* ptr)
1933
 * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1934
 *
1935
 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1936
 *
1937
 * @param ptr The pointer to read from.
1938
 * @return The 32-bit big endian integer from the bytes at @p ptr.
1939
 */
1940
1941
/*!
1942
 * @internal
1943
 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1944
 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1945
 *
1946
 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1947
 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1948
 * always @ref XXH_alignment::XXH_unaligned.
1949
 *
1950
 * @param ptr The pointer to read from.
1951
 * @param align Whether @p ptr is aligned.
1952
 * @pre
1953
 *   If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1954
 *   aligned.
1955
 * @return The 32-bit little endian integer from the bytes at @p ptr.
1956
 */
1957
1958
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1959
/*
1960
 * Manual byteshift. Best for old compilers which don't inline memcpy.
1961
 * We actually directly use XXH_readLE32 and XXH_readBE32.
1962
 */
1963
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1964
1965
/*
1966
 * Force direct memory access. Only works on CPU which support unaligned memory
1967
 * access in hardware.
1968
 */
1969
static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1970
1971
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1972
1973
/*
1974
 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
1975
 * documentation claimed that it only increased the alignment, but actually it
1976
 * can decrease it on gcc, clang, and icc:
1977
 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
1978
 * https://gcc.godbolt.org/z/xYez1j67Y.
1979
 */
1980
#ifdef XXH_OLD_NAMES
1981
typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1982
#endif
1983
static xxh_u32 XXH_read32(const void* ptr)
1984
3.64k
{
1985
3.64k
    typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
1986
3.64k
    return *((const xxh_unalign32*)ptr);
1987
3.64k
}
1988
1989
#else
1990
1991
/*
1992
 * Portable and safe solution. Generally efficient.
1993
 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1994
 */
1995
static xxh_u32 XXH_read32(const void* memPtr)
1996
{
1997
    xxh_u32 val;
1998
    XXH_memcpy(&val, memPtr, sizeof(val));
1999
    return val;
2000
}
2001
2002
#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2003
2004
2005
/* ***   Endianness   *** */
2006
2007
/*!
2008
 * @ingroup tuning
2009
 * @def XXH_CPU_LITTLE_ENDIAN
2010
 * @brief Whether the target is little endian.
2011
 *
2012
 * Defined to 1 if the target is little endian, or 0 if it is big endian.
2013
 * It can be defined externally, for example on the compiler command line.
2014
 *
2015
 * If it is not defined,
2016
 * a runtime check (which is usually constant folded) is used instead.
2017
 *
2018
 * @note
2019
 *   This is not necessarily defined to an integer constant.
2020
 *
2021
 * @see XXH_isLittleEndian() for the runtime check.
2022
 */
2023
#ifndef XXH_CPU_LITTLE_ENDIAN
2024
/*
2025
 * Try to detect endianness automatically, to avoid the nonstandard behavior
2026
 * in `XXH_isLittleEndian()`
2027
 */
2028
#  if defined(_WIN32) /* Windows is always little endian */ \
2029
     || defined(__LITTLE_ENDIAN__) \
2030
     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
2031
31.8M
#    define XXH_CPU_LITTLE_ENDIAN 1
2032
#  elif defined(__BIG_ENDIAN__) \
2033
     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2034
#    define XXH_CPU_LITTLE_ENDIAN 0
2035
#  else
2036
/*!
2037
 * @internal
2038
 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
2039
 *
2040
 * Most compilers will constant fold this.
2041
 */
2042
static int XXH_isLittleEndian(void)
2043
{
2044
    /*
2045
     * Portable and well-defined behavior.
2046
     * Don't use static: it is detrimental to performance.
2047
     */
2048
    const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
2049
    return one.c[0];
2050
}
2051
#   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
2052
#  endif
2053
#endif
2054
2055
2056
2057
2058
/* ****************************************
2059
*  Compiler-specific Functions and Macros
2060
******************************************/
2061
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
2062
2063
#ifdef __has_builtin
2064
#  define XXH_HAS_BUILTIN(x) __has_builtin(x)
2065
#else
2066
#  define XXH_HAS_BUILTIN(x) 0
2067
#endif
2068
2069
2070
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L)
2071
/* C23 and future versions have standard "unreachable()" */
2072
#  include <stddef.h>
2073
#  define XXH_UNREACHABLE() unreachable()
2074
2075
#elif defined(__cplusplus) && (__cplusplus > 202002L)
2076
#  define XXH_UNREACHABLE() std::unreachable()
2077
2078
#elif XXH_HAS_BUILTIN(__builtin_unreachable)
2079
0
#  define XXH_UNREACHABLE() __builtin_unreachable()
2080
2081
#elif defined(_MSC_VER)
2082
#  define XXH_UNREACHABLE() __assume(0)
2083
2084
#else
2085
#  define XXH_UNREACHABLE()
2086
#endif
2087
2088
43.4M
#define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
2089
2090
/*!
2091
 * @internal
2092
 * @def XXH_rotl32(x,r)
2093
 * @brief 32-bit rotate left.
2094
 *
2095
 * @param x The 32-bit integer to be rotated.
2096
 * @param r The number of bits to rotate.
2097
 * @pre
2098
 *   @p r > 0 && @p r < 32
2099
 * @note
2100
 *   @p x and @p r may be evaluated multiple times.
2101
 * @return The rotated result.
2102
 */
2103
#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
2104
                               && XXH_HAS_BUILTIN(__builtin_rotateleft64)
2105
0
#  define XXH_rotl32 __builtin_rotateleft32
2106
3.64k
#  define XXH_rotl64 __builtin_rotateleft64
2107
/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
2108
#elif defined(_MSC_VER)
2109
#  define XXH_rotl32(x,r) _rotl(x,r)
2110
#  define XXH_rotl64(x,r) _rotl64(x,r)
2111
#else
2112
#  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
2113
#  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
2114
#endif
2115
2116
/*!
2117
 * @internal
2118
 * @fn xxh_u32 XXH_swap32(xxh_u32 x)
2119
 * @brief A 32-bit byteswap.
2120
 *
2121
 * @param x The 32-bit integer to byteswap.
2122
 * @return @p x, byteswapped.
2123
 */
2124
#if defined(_MSC_VER)     /* Visual Studio */
2125
#  define XXH_swap32 _byteswap_ulong
2126
#elif XXH_GCC_VERSION >= 403
2127
#  define XXH_swap32 __builtin_bswap32
2128
#else
2129
static xxh_u32 XXH_swap32 (xxh_u32 x)
2130
1.82k
{
2131
1.82k
    return  ((x << 24) & 0xff000000 ) |
2132
1.82k
            ((x <<  8) & 0x00ff0000 ) |
2133
1.82k
            ((x >>  8) & 0x0000ff00 ) |
2134
1.82k
            ((x >> 24) & 0x000000ff );
2135
1.82k
}
2136
#endif
2137
2138
2139
/* ***************************
2140
*  Memory reads
2141
*****************************/
2142
2143
/*!
2144
 * @internal
2145
 * @brief Enum to indicate whether a pointer is aligned.
2146
 */
2147
typedef enum {
2148
    XXH_aligned,  /*!< Aligned */
2149
    XXH_unaligned /*!< Possibly unaligned */
2150
} XXH_alignment;
2151
2152
/*
2153
 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
2154
 *
2155
 * This is ideal for older compilers which don't inline memcpy.
2156
 */
2157
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2158
2159
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
2160
{
2161
    const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2162
    return bytePtr[0]
2163
         | ((xxh_u32)bytePtr[1] << 8)
2164
         | ((xxh_u32)bytePtr[2] << 16)
2165
         | ((xxh_u32)bytePtr[3] << 24);
2166
}
2167
2168
XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
2169
{
2170
    const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2171
    return bytePtr[3]
2172
         | ((xxh_u32)bytePtr[2] << 8)
2173
         | ((xxh_u32)bytePtr[1] << 16)
2174
         | ((xxh_u32)bytePtr[0] << 24);
2175
}
2176
2177
#else
2178
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
2179
3.64k
{
2180
3.64k
    return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
2181
3.64k
}
2182
2183
static xxh_u32 XXH_readBE32(const void* ptr)
2184
0
{
2185
0
    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
2186
0
}
2187
#endif
2188
2189
XXH_FORCE_INLINE xxh_u32
2190
XXH_readLE32_align(const void* ptr, XXH_alignment align)
2191
0
{
2192
0
    if (align==XXH_unaligned) {
2193
0
        return XXH_readLE32(ptr);
2194
0
    } else {
2195
0
        return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
2196
0
    }
2197
0
}
2198
2199
2200
/* *************************************
2201
*  Misc
2202
***************************************/
2203
/*! @ingroup public */
2204
0
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
2205
2206
2207
/* *******************************************************************
2208
*  32-bit hash functions
2209
*********************************************************************/
2210
/*!
2211
 * @}
2212
 * @defgroup XXH32_impl XXH32 implementation
2213
 * @ingroup impl
2214
 *
2215
 * Details on the XXH32 implementation.
2216
 * @{
2217
 */
2218
 /* #define instead of static const, to be used as initializers */
2219
2.71M
#define XXH_PRIME32_1  0x9E3779B1U  /*!< 0b10011110001101110111100110110001 */
2220
1.39M
#define XXH_PRIME32_2  0x85EBCA77U  /*!< 0b10000101111010111100101001110111 */
2221
1.39M
#define XXH_PRIME32_3  0xC2B2AE3DU  /*!< 0b11000010101100101010111000111101 */
2222
0
#define XXH_PRIME32_4  0x27D4EB2FU  /*!< 0b00100111110101001110101100101111 */
2223
0
#define XXH_PRIME32_5  0x165667B1U  /*!< 0b00010110010101100110011110110001 */
2224
2225
#ifdef XXH_OLD_NAMES
2226
#  define PRIME32_1 XXH_PRIME32_1
2227
#  define PRIME32_2 XXH_PRIME32_2
2228
#  define PRIME32_3 XXH_PRIME32_3
2229
#  define PRIME32_4 XXH_PRIME32_4
2230
#  define PRIME32_5 XXH_PRIME32_5
2231
#endif
2232
2233
/*!
2234
 * @internal
2235
 * @brief Normal stripe processing routine.
2236
 *
2237
 * This shuffles the bits so that any bit from @p input impacts several bits in
2238
 * @p acc.
2239
 *
2240
 * @param acc The accumulator lane.
2241
 * @param input The stripe of input to mix.
2242
 * @return The mixed accumulator lane.
2243
 */
2244
static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
2245
0
{
2246
0
    acc += input * XXH_PRIME32_2;
2247
0
    acc  = XXH_rotl32(acc, 13);
2248
0
    acc *= XXH_PRIME32_1;
2249
0
#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
2250
    /*
2251
     * UGLY HACK:
2252
     * A compiler fence is the only thing that prevents GCC and Clang from
2253
     * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
2254
     * reason) without globally disabling SSE4.1.
2255
     *
2256
     * The reason we want to avoid vectorization is because despite working on
2257
     * 4 integers at a time, there are multiple factors slowing XXH32 down on
2258
     * SSE4:
2259
     * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
2260
     *   newer chips!) making it slightly slower to multiply four integers at
2261
     *   once compared to four integers independently. Even when pmulld was
2262
     *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
2263
     *   just to multiply unless doing a long operation.
2264
     *
2265
     * - Four instructions are required to rotate,
2266
     *      movqda tmp,  v // not required with VEX encoding
2267
     *      pslld  tmp, 13 // tmp <<= 13
2268
     *      psrld  v,   19 // x >>= 19
2269
     *      por    v,  tmp // x |= tmp
2270
     *   compared to one for scalar:
2271
     *      roll   v, 13    // reliably fast across the board
2272
     *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
2273
     *
2274
     * - Instruction level parallelism is actually more beneficial here because
2275
     *   the SIMD actually serializes this operation: While v1 is rotating, v2
2276
     *   can load data, while v3 can multiply. SSE forces them to operate
2277
     *   together.
2278
     *
2279
     * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
2280
     * and it is pointless writing a NEON implementation that is basically the
2281
     * same speed as scalar for XXH32.
2282
     */
2283
0
    XXH_COMPILER_GUARD(acc);
2284
0
#endif
2285
0
    return acc;
2286
0
}
2287
2288
/*!
2289
 * @internal
2290
 * @brief Mixes all bits to finalize the hash.
2291
 *
2292
 * The final mix ensures that all input bits have a chance to impact any bit in
2293
 * the output digest, resulting in an unbiased distribution.
2294
 *
2295
 * @param hash The hash to avalanche.
2296
 * @return The avalanched hash.
2297
 */
2298
static xxh_u32 XXH32_avalanche(xxh_u32 hash)
2299
0
{
2300
0
    hash ^= hash >> 15;
2301
0
    hash *= XXH_PRIME32_2;
2302
0
    hash ^= hash >> 13;
2303
0
    hash *= XXH_PRIME32_3;
2304
0
    hash ^= hash >> 16;
2305
0
    return hash;
2306
0
}
2307
2308
0
#define XXH_get32bits(p) XXH_readLE32_align(p, align)
2309
2310
/*!
2311
 * @internal
2312
 * @brief Processes the last 0-15 bytes of @p ptr.
2313
 *
2314
 * There may be up to 15 bytes remaining to consume from the input.
2315
 * This final stage will digest them to ensure that all input bytes are present
2316
 * in the final mix.
2317
 *
2318
 * @param hash The hash to finalize.
2319
 * @param ptr The pointer to the remaining input.
2320
 * @param len The remaining length, modulo 16.
2321
 * @param align Whether @p ptr is aligned.
2322
 * @return The finalized hash.
2323
 * @see XXH64_finalize().
2324
 */
2325
static XXH_PUREF xxh_u32
2326
XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
2327
0
{
2328
0
#define XXH_PROCESS1 do {                             \
2329
0
    hash += (*ptr++) * XXH_PRIME32_5;                 \
2330
0
    hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1;      \
2331
0
} while (0)
2332
2333
0
#define XXH_PROCESS4 do {                             \
2334
0
    hash += XXH_get32bits(ptr) * XXH_PRIME32_3;       \
2335
0
    ptr += 4;                                         \
2336
0
    hash  = XXH_rotl32(hash, 17) * XXH_PRIME32_4;     \
2337
0
} while (0)
2338
2339
0
    if (ptr==nullptr) XXH_ASSERT(len == 0)
2340
2341
    /* Compact rerolled version; generally faster */
2342
0
    if (!XXH32_ENDJMP) {
2343
0
        len &= 15;
2344
0
        while (len >= 4) {
2345
0
            XXH_PROCESS4;
2346
0
            len -= 4;
2347
0
        }
2348
0
        while (len > 0) {
2349
0
            XXH_PROCESS1;
2350
0
            --len;
2351
0
        }
2352
0
        return XXH32_avalanche(hash);
2353
0
    } else {
2354
0
         switch(len&15) /* or switch(bEnd - p) */ {
2355
0
           case 12:      XXH_PROCESS4;
2356
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2357
0
           case 8:       XXH_PROCESS4;
2358
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2359
0
           case 4:       XXH_PROCESS4;
2360
0
                         return XXH32_avalanche(hash);
2361
2362
0
           case 13:      XXH_PROCESS4;
2363
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2364
0
           case 9:       XXH_PROCESS4;
2365
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2366
0
           case 5:       XXH_PROCESS4;
2367
0
                         XXH_PROCESS1;
2368
0
                         return XXH32_avalanche(hash);
2369
2370
0
           case 14:      XXH_PROCESS4;
2371
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2372
0
           case 10:      XXH_PROCESS4;
2373
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2374
0
           case 6:       XXH_PROCESS4;
2375
0
                         XXH_PROCESS1;
2376
0
                         XXH_PROCESS1;
2377
0
                         return XXH32_avalanche(hash);
2378
2379
0
           case 15:      XXH_PROCESS4;
2380
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2381
0
           case 11:      XXH_PROCESS4;
2382
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2383
0
           case 7:       XXH_PROCESS4;
2384
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2385
0
           case 3:       XXH_PROCESS1;
2386
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2387
0
           case 2:       XXH_PROCESS1;
2388
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2389
0
           case 1:       XXH_PROCESS1;
2390
0
                         XXH_FALLTHROUGH;  /* fallthrough */
2391
0
           case 0:       return XXH32_avalanche(hash);
2392
0
        }
2393
0
        XXH_ASSERT(0)
2394
0
        return hash;   /* reaching this point is deemed impossible */
2395
0
    }
2396
0
}
2397
2398
#ifdef XXH_OLD_NAMES
2399
#  define PROCESS1 XXH_PROCESS1
2400
#  define PROCESS4 XXH_PROCESS4
2401
#else
2402
#  undef XXH_PROCESS1
2403
#  undef XXH_PROCESS4
2404
#endif
2405
2406
/*!
2407
 * @internal
2408
 * @brief The implementation for @ref XXH32().
2409
 *
2410
 * @param input , len , seed Directly passed from @ref XXH32().
2411
 * @param align Whether @p input is aligned.
2412
 * @return The calculated hash.
2413
 */
2414
XXH_FORCE_INLINE XXH_PUREF xxh_u32
2415
XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2416
0
{
2417
0
    xxh_u32 h32;
2418
2419
0
    if (input==nullptr) XXH_ASSERT(len == 0)
2420
2421
0
    if (len>=16) {
2422
0
        const xxh_u8* const bEnd = input + len;
2423
0
        const xxh_u8* const limit = bEnd - 15;
2424
0
        xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2425
0
        xxh_u32 v2 = seed + XXH_PRIME32_2;
2426
0
        xxh_u32 v3 = seed + 0;
2427
0
        xxh_u32 v4 = seed - XXH_PRIME32_1;
2428
2429
0
        do {
2430
0
            v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2431
0
            v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2432
0
            v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2433
0
            v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2434
0
        } while (input < limit);
2435
2436
0
        h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
2437
0
            + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2438
0
    } else {
2439
0
        h32  = seed + XXH_PRIME32_5;
2440
0
    }
2441
2442
0
    h32 += (xxh_u32)len;
2443
2444
0
    return XXH32_finalize(h32, input, len&15, align);
2445
0
}
2446
2447
/*! @ingroup XXH32_family */
2448
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2449
0
{
2450
#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
2451
    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2452
    XXH32_state_t state;
2453
    XXH32_reset(&state, seed);
2454
    XXH32_update(&state, (const xxh_u8*)input, len);
2455
    return XXH32_digest(&state);
2456
#else
2457
0
    if (XXH_FORCE_ALIGN_CHECK) {
2458
0
        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
2459
0
            return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2460
0
    }   }
2461
2462
0
    return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2463
0
#endif
2464
0
}
2465
2466
2467
2468
/*******   Hash streaming   *******/
2469
#ifndef XXH_NO_STREAM
2470
/*! @ingroup XXH32_family */
2471
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
2472
0
{
2473
0
    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2474
0
}
2475
/*! @ingroup XXH32_family */
2476
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2477
0
{
2478
0
    XXH_free(statePtr);
2479
0
    return XXH_OK;
2480
0
}
2481
2482
/*! @ingroup XXH32_family */
2483
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2484
0
{
2485
0
    XXH_memcpy(dstState, srcState, sizeof(*dstState));
2486
0
}
2487
2488
/*! @ingroup XXH32_family */
2489
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2490
0
{
2491
0
    XXH_ASSERT(statePtr != NULL)
2492
0
    memset(statePtr, 0, sizeof(*statePtr));
2493
0
    statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2494
0
    statePtr->v[1] = seed + XXH_PRIME32_2;
2495
0
    statePtr->v[2] = seed + 0;
2496
0
    statePtr->v[3] = seed - XXH_PRIME32_1;
2497
0
    return XXH_OK;
2498
0
}
2499
2500
2501
/*! @ingroup XXH32_family */
2502
XXH_PUBLIC_API XXH_errorcode
2503
XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2504
0
{
2505
0
    if (input==nullptr) {
2506
0
        XXH_ASSERT(len == 0)
2507
0
        return XXH_OK;
2508
0
    }
2509
2510
0
    {   const xxh_u8* p = (const xxh_u8*)input;
2511
0
        const xxh_u8* const bEnd = p + len;
2512
2513
0
        state->total_len_32 += (XXH32_hash_t)len;
2514
0
        state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2515
2516
0
        if (state->memsize + len < 16)  {   /* fill in tmp buffer */
2517
0
            XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2518
0
            state->memsize += (XXH32_hash_t)len;
2519
0
            return XXH_OK;
2520
0
        }
2521
2522
0
        if (state->memsize) {   /* some data left from previous update */
2523
0
            XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2524
0
            {   const xxh_u32* p32 = state->mem32;
2525
0
                state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2526
0
                state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2527
0
                state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2528
0
                state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2529
0
            }
2530
0
            p += 16-state->memsize;
2531
0
            state->memsize = 0;
2532
0
        }
2533
2534
0
        if (p <= bEnd-16) {
2535
0
            const xxh_u8* const limit = bEnd - 16;
2536
2537
0
            do {
2538
0
                state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2539
0
                state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2540
0
                state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2541
0
                state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2542
0
            } while (p<=limit);
2543
2544
0
        }
2545
2546
0
        if (p < bEnd) {
2547
0
            XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2548
0
            state->memsize = (unsigned)(bEnd-p);
2549
0
        }
2550
0
    }
2551
2552
0
    return XXH_OK;
2553
0
}
2554
2555
2556
/*! @ingroup XXH32_family */
2557
XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2558
0
{
2559
0
    xxh_u32 h32;
2560
2561
0
    if (state->large_len) {
2562
0
        h32 = XXH_rotl32(state->v[0], 1)
2563
0
            + XXH_rotl32(state->v[1], 7)
2564
0
            + XXH_rotl32(state->v[2], 12)
2565
0
            + XXH_rotl32(state->v[3], 18);
2566
0
    } else {
2567
0
        h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2568
0
    }
2569
2570
0
    h32 += state->total_len_32;
2571
2572
0
    return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2573
0
}
2574
#endif /* !XXH_NO_STREAM */
2575
2576
/*******   Canonical representation   *******/
2577
2578
/*!
2579
 * @ingroup XXH32_family
2580
 * The default return values from XXH functions are unsigned 32 and 64 bit
2581
 * integers.
2582
 *
2583
 * The canonical representation uses big endian convention, the same convention
2584
 * as human-readable numbers (large digits first).
2585
 *
2586
 * This way, hash values can be written into a file or buffer, remaining
2587
 * comparable across different systems.
2588
 *
2589
 * The following functions allow transformation of hash values to and from their
2590
 * canonical format.
2591
 */
2592
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2593
0
{
2594
0
    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2595
0
    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2596
0
    XXH_memcpy(dst, &hash, sizeof(*dst));
2597
0
}
2598
/*! @ingroup XXH32_family */
2599
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2600
0
{
2601
0
    return XXH_readBE32(src);
2602
0
}
2603
2604
2605
#ifndef XXH_NO_LONG_LONG
2606
2607
/* *******************************************************************
2608
*  64-bit hash functions
2609
*********************************************************************/
2610
/*!
2611
 * @}
2612
 * @ingroup impl
2613
 * @{
2614
 */
2615
/*******   Memory access   *******/
2616
2617
typedef XXH64_hash_t xxh_u64;
2618
2619
#ifdef XXH_OLD_NAMES
2620
#  define U64 xxh_u64
2621
#endif
2622
2623
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2624
/*
2625
 * Manual byteshift. Best for old compilers which don't inline memcpy.
2626
 * We actually directly use XXH_readLE64 and XXH_readBE64.
2627
 */
2628
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2629
2630
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
2631
static xxh_u64 XXH_read64(const void* memPtr)
2632
{
2633
    return *(const xxh_u64*) memPtr;
2634
}
2635
2636
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2637
2638
/*
2639
 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
2640
 * documentation claimed that it only increased the alignment, but actually it
2641
 * can decrease it on gcc, clang, and icc:
2642
 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
2643
 * https://gcc.godbolt.org/z/xYez1j67Y.
2644
 */
2645
#ifdef XXH_OLD_NAMES
2646
typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2647
#endif
2648
static xxh_u64 XXH_read64(const void* ptr)
2649
31.8M
{
2650
31.8M
    typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
2651
31.8M
    return *((const xxh_unalign64*)ptr);
2652
31.8M
}
2653
2654
#else
2655
2656
/*
2657
 * Portable and safe solution. Generally efficient.
2658
 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2659
 */
2660
static xxh_u64 XXH_read64(const void* memPtr)
2661
{
2662
    xxh_u64 val;
2663
    XXH_memcpy(&val, memPtr, sizeof(val));
2664
    return val;
2665
}
2666
2667
#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2668
2669
#if defined(_MSC_VER)     /* Visual Studio */
2670
#  define XXH_swap64 _byteswap_uint64
2671
#elif XXH_GCC_VERSION >= 403
2672
#  define XXH_swap64 __builtin_bswap64
2673
#else
2674
static xxh_u64 XXH_swap64(xxh_u64 x)
2675
1.03M
{
2676
1.03M
    return  ((x << 56) & 0xff00000000000000ULL) |
2677
1.03M
            ((x << 40) & 0x00ff000000000000ULL) |
2678
1.03M
            ((x << 24) & 0x0000ff0000000000ULL) |
2679
1.03M
            ((x << 8)  & 0x000000ff00000000ULL) |
2680
1.03M
            ((x >> 8)  & 0x00000000ff000000ULL) |
2681
1.03M
            ((x >> 24) & 0x0000000000ff0000ULL) |
2682
1.03M
            ((x >> 40) & 0x000000000000ff00ULL) |
2683
1.03M
            ((x >> 56) & 0x00000000000000ffULL);
2684
1.03M
}
2685
#endif
2686
2687
2688
/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2689
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2690
2691
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2692
{
2693
    const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2694
    return bytePtr[0]
2695
         | ((xxh_u64)bytePtr[1] << 8)
2696
         | ((xxh_u64)bytePtr[2] << 16)
2697
         | ((xxh_u64)bytePtr[3] << 24)
2698
         | ((xxh_u64)bytePtr[4] << 32)
2699
         | ((xxh_u64)bytePtr[5] << 40)
2700
         | ((xxh_u64)bytePtr[6] << 48)
2701
         | ((xxh_u64)bytePtr[7] << 56);
2702
}
2703
2704
XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2705
{
2706
    const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2707
    return bytePtr[7]
2708
         | ((xxh_u64)bytePtr[6] << 8)
2709
         | ((xxh_u64)bytePtr[5] << 16)
2710
         | ((xxh_u64)bytePtr[4] << 24)
2711
         | ((xxh_u64)bytePtr[3] << 32)
2712
         | ((xxh_u64)bytePtr[2] << 40)
2713
         | ((xxh_u64)bytePtr[1] << 48)
2714
         | ((xxh_u64)bytePtr[0] << 56);
2715
}
2716
2717
#else
2718
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2719
31.8M
{
2720
18.4E
    return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2721
31.8M
}
2722
2723
static xxh_u64 XXH_readBE64(const void* ptr)
2724
0
{
2725
0
    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2726
0
}
2727
#endif
2728
2729
XXH_FORCE_INLINE xxh_u64
2730
XXH_readLE64_align(const void* ptr, XXH_alignment align)
2731
0
{
2732
0
    if (align==XXH_unaligned)
2733
0
        return XXH_readLE64(ptr);
2734
0
    else
2735
0
        return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2736
0
}
2737
2738
2739
/*******   xxh64   *******/
2740
/*!
2741
 * @}
2742
 * @defgroup XXH64_impl XXH64 implementation
2743
 * @ingroup impl
2744
 *
2745
 * Details on the XXH64 implementation.
2746
 * @{
2747
 */
2748
/* #define rather that static const, to be used as initializers */
2749
2.80M
#define XXH_PRIME64_1  0x9E3779B185EBCA87ULL  /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2750
1.41M
#define XXH_PRIME64_2  0xC2B2AE3D27D4EB4FULL  /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2751
1.39M
#define XXH_PRIME64_3  0x165667B19E3779F9ULL  /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2752
1.41M
#define XXH_PRIME64_4  0x85EBCA77C2B2AE63ULL  /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2753
1.39M
#define XXH_PRIME64_5  0x27D4EB2F165667C5ULL  /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2754
2755
#ifdef XXH_OLD_NAMES
2756
#  define PRIME64_1 XXH_PRIME64_1
2757
#  define PRIME64_2 XXH_PRIME64_2
2758
#  define PRIME64_3 XXH_PRIME64_3
2759
#  define PRIME64_4 XXH_PRIME64_4
2760
#  define PRIME64_5 XXH_PRIME64_5
2761
#endif
2762
2763
/*! @copydoc XXH32_round */
2764
static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2765
0
{
2766
0
    acc += input * XXH_PRIME64_2;
2767
0
    acc  = XXH_rotl64(acc, 31);
2768
0
    acc *= XXH_PRIME64_1;
2769
0
    return acc;
2770
0
}
2771
2772
static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2773
0
{
2774
0
    val  = XXH64_round(0, val);
2775
0
    acc ^= val;
2776
0
    acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2777
0
    return acc;
2778
0
}
2779
2780
/*! @copydoc XXH32_avalanche */
2781
static xxh_u64 XXH64_avalanche(xxh_u64 hash)
2782
0
{
2783
0
    hash ^= hash >> 33;
2784
0
    hash *= XXH_PRIME64_2;
2785
0
    hash ^= hash >> 29;
2786
0
    hash *= XXH_PRIME64_3;
2787
0
    hash ^= hash >> 32;
2788
0
    return hash;
2789
0
}
2790
2791
2792
0
#define XXH_get64bits(p) XXH_readLE64_align(p, align)
2793
2794
/*!
2795
 * @internal
2796
 * @brief Processes the last 0-31 bytes of @p ptr.
2797
 *
2798
 * There may be up to 31 bytes remaining to consume from the input.
2799
 * This final stage will digest them to ensure that all input bytes are present
2800
 * in the final mix.
2801
 *
2802
 * @param hash The hash to finalize.
2803
 * @param ptr The pointer to the remaining input.
2804
 * @param len The remaining length, modulo 32.
2805
 * @param align Whether @p ptr is aligned.
2806
 * @return The finalized hash
2807
 * @see XXH32_finalize().
2808
 */
2809
static XXH_PUREF xxh_u64
2810
XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
2811
0
{
2812
0
    if (ptr==nullptr) XXH_ASSERT(len == 0)
2813
0
    len &= 31;
2814
0
    while (len >= 8) {
2815
0
        xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2816
0
        ptr += 8;
2817
0
        hash ^= k1;
2818
0
        hash  = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2819
0
        len -= 8;
2820
0
    }
2821
0
    if (len >= 4) {
2822
0
        hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2823
0
        ptr += 4;
2824
0
        hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2825
0
        len -= 4;
2826
0
    }
2827
0
    while (len > 0) {
2828
0
        hash ^= (*ptr++) * XXH_PRIME64_5;
2829
0
        hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
2830
0
        --len;
2831
0
    }
2832
0
    return  XXH64_avalanche(hash);
2833
0
}
2834
2835
#ifdef XXH_OLD_NAMES
2836
#  define PROCESS1_64 XXH_PROCESS1_64
2837
#  define PROCESS4_64 XXH_PROCESS4_64
2838
#  define PROCESS8_64 XXH_PROCESS8_64
2839
#else
2840
#  undef XXH_PROCESS1_64
2841
#  undef XXH_PROCESS4_64
2842
#  undef XXH_PROCESS8_64
2843
#endif
2844
2845
/*!
2846
 * @internal
2847
 * @brief The implementation for @ref XXH64().
2848
 *
2849
 * @param input , len , seed Directly passed from @ref XXH64().
2850
 * @param align Whether @p input is aligned.
2851
 * @return The calculated hash.
2852
 */
2853
XXH_FORCE_INLINE XXH_PUREF xxh_u64
2854
XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2855
0
{
2856
0
    xxh_u64 h64;
2857
0
    if (input==nullptr) XXH_ASSERT(len == 0)
2858
2859
0
    if (len>=32) {
2860
0
        const xxh_u8* const bEnd = input + len;
2861
0
        const xxh_u8* const limit = bEnd - 31;
2862
0
        xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2863
0
        xxh_u64 v2 = seed + XXH_PRIME64_2;
2864
0
        xxh_u64 v3 = seed + 0;
2865
0
        xxh_u64 v4 = seed - XXH_PRIME64_1;
2866
2867
0
        do {
2868
0
            v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2869
0
            v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2870
0
            v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2871
0
            v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2872
0
        } while (input<limit);
2873
2874
0
        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2875
0
        h64 = XXH64_mergeRound(h64, v1);
2876
0
        h64 = XXH64_mergeRound(h64, v2);
2877
0
        h64 = XXH64_mergeRound(h64, v3);
2878
0
        h64 = XXH64_mergeRound(h64, v4);
2879
2880
0
    } else {
2881
0
        h64  = seed + XXH_PRIME64_5;
2882
0
    }
2883
2884
0
    h64 += (xxh_u64) len;
2885
2886
0
    return XXH64_finalize(h64, input, len, align);
2887
0
}
2888
2889
2890
/*! @ingroup XXH64_family */
2891
XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
2892
0
{
2893
#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
2894
    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2895
    XXH64_state_t state;
2896
    XXH64_reset(&state, seed);
2897
    XXH64_update(&state, (const xxh_u8*)input, len);
2898
    return XXH64_digest(&state);
2899
#else
2900
0
    if (XXH_FORCE_ALIGN_CHECK) {
2901
0
        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
2902
0
            return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2903
0
    }   }
2904
2905
0
    return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2906
2907
0
#endif
2908
0
}
2909
2910
/*******   Hash Streaming   *******/
2911
#ifndef XXH_NO_STREAM
2912
/*! @ingroup XXH64_family*/
2913
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2914
0
{
2915
0
    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2916
0
}
2917
/*! @ingroup XXH64_family */
2918
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2919
0
{
2920
0
    XXH_free(statePtr);
2921
0
    return XXH_OK;
2922
0
}
2923
2924
/*! @ingroup XXH64_family */
2925
XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
2926
0
{
2927
0
    XXH_memcpy(dstState, srcState, sizeof(*dstState));
2928
0
}
2929
2930
/*! @ingroup XXH64_family */
2931
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
2932
0
{
2933
0
    XXH_ASSERT(statePtr != NULL)
2934
0
    memset(statePtr, 0, sizeof(*statePtr));
2935
0
    statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2936
0
    statePtr->v[1] = seed + XXH_PRIME64_2;
2937
0
    statePtr->v[2] = seed + 0;
2938
0
    statePtr->v[3] = seed - XXH_PRIME64_1;
2939
0
    return XXH_OK;
2940
0
}
2941
2942
/*! @ingroup XXH64_family */
2943
XXH_PUBLIC_API XXH_errorcode
2944
XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
2945
0
{
2946
0
    if (input==nullptr) {
2947
0
        XXH_ASSERT(len == 0)
2948
0
        return XXH_OK;
2949
0
    }
2950
2951
0
    {   const xxh_u8* p = (const xxh_u8*)input;
2952
0
        const xxh_u8* const bEnd = p + len;
2953
2954
0
        state->total_len += len;
2955
2956
0
        if (state->memsize + len < 32) {  /* fill in tmp buffer */
2957
0
            XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2958
0
            state->memsize += (xxh_u32)len;
2959
0
            return XXH_OK;
2960
0
        }
2961
2962
0
        if (state->memsize) {   /* tmp buffer is full */
2963
0
            XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2964
0
            state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2965
0
            state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2966
0
            state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2967
0
            state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2968
0
            p += 32 - state->memsize;
2969
0
            state->memsize = 0;
2970
0
        }
2971
2972
0
        if (p+32 <= bEnd) {
2973
0
            const xxh_u8* const limit = bEnd - 32;
2974
2975
0
            do {
2976
0
                state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2977
0
                state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2978
0
                state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2979
0
                state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2980
0
            } while (p<=limit);
2981
2982
0
        }
2983
2984
0
        if (p < bEnd) {
2985
0
            XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2986
0
            state->memsize = (unsigned)(bEnd-p);
2987
0
        }
2988
0
    }
2989
2990
0
    return XXH_OK;
2991
0
}
2992
2993
2994
/*! @ingroup XXH64_family */
2995
XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
2996
0
{
2997
0
    xxh_u64 h64;
2998
2999
0
    if (state->total_len >= 32) {
3000
0
        h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
3001
0
        h64 = XXH64_mergeRound(h64, state->v[0]);
3002
0
        h64 = XXH64_mergeRound(h64, state->v[1]);
3003
0
        h64 = XXH64_mergeRound(h64, state->v[2]);
3004
0
        h64 = XXH64_mergeRound(h64, state->v[3]);
3005
0
    } else {
3006
0
        h64  = state->v[2] /*seed*/ + XXH_PRIME64_5;
3007
0
    }
3008
3009
0
    h64 += (xxh_u64) state->total_len;
3010
3011
0
    return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
3012
0
}
3013
#endif /* !XXH_NO_STREAM */
3014
3015
/******* Canonical representation   *******/
3016
3017
/*! @ingroup XXH64_family */
3018
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
3019
0
{
3020
0
    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
3021
0
    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
3022
0
    XXH_memcpy(dst, &hash, sizeof(*dst));
3023
0
}
3024
3025
/*! @ingroup XXH64_family */
3026
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
3027
0
{
3028
0
    return XXH_readBE64(src);
3029
0
}
3030
3031
#ifndef XXH_NO_XXH3
3032
3033
/* *********************************************************************
3034
*  XXH3
3035
*  New generation hash designed for speed on small keys and vectorization
3036
************************************************************************ */
3037
/*!
3038
 * @}
3039
 * @defgroup XXH3_impl XXH3 implementation
3040
 * @ingroup impl
3041
 * @{
3042
 */
3043
3044
/* ===   Compiler specifics   === */
3045
3046
#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
3047
#  define XXH_RESTRICT /* disable */
3048
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* >= C99 */
3049
#  define XXH_RESTRICT   restrict
3050
#else
3051
/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
3052
#  define XXH_RESTRICT   /* disable */
3053
#endif
3054
3055
#if (defined(__GNUC__) && (__GNUC__ >= 3))  \
3056
  || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
3057
  || defined(__clang__)
3058
1.03M
#    define XXH_likely(x) __builtin_expect(x, 1)
3059
#    define XXH_unlikely(x) __builtin_expect(x, 0)
3060
#else
3061
#    define XXH_likely(x) (x)
3062
#    define XXH_unlikely(x) (x)
3063
#endif
3064
3065
#if defined(__GNUC__) || defined(__clang__)
3066
#  if defined(__ARM_FEATURE_SVE)
3067
#    include <arm_sve.h>
3068
#  elif defined(__ARM_NEON__) || defined(__ARM_NEON) \
3069
   || defined(__aarch64__)  || defined(_M_ARM) \
3070
   || defined(_M_ARM64)     || defined(_M_ARM64EC)
3071
#    define inline __inline__  /* circumvent a clang bug */
3072
#    include <arm_neon.h>
3073
#    undef inline
3074
#  elif defined(__AVX2__)
3075
#    include <immintrin.h>
3076
#  elif defined(__SSE2__)
3077
#    include <emmintrin.h>
3078
#  endif
3079
#endif
3080
3081
#if defined(_MSC_VER)
3082
#  include <intrin.h>
3083
#endif
3084
3085
/*
3086
 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
3087
 * remaining a true 64-bit/128-bit hash function.
3088
 *
3089
 * This is done by prioritizing a subset of 64-bit operations that can be
3090
 * emulated without too many steps on the average 32-bit machine.
3091
 *
3092
 * For example, these two lines seem similar, and run equally fast on 64-bit:
3093
 *
3094
 *   xxh_u64 x;
3095
 *   x ^= (x >> 47); // good
3096
 *   x ^= (x >> 13); // bad
3097
 *
3098
 * However, to a 32-bit machine, there is a major difference.
3099
 *
3100
 * x ^= (x >> 47) looks like this:
3101
 *
3102
 *   x.lo ^= (x.hi >> (47 - 32));
3103
 *
3104
 * while x ^= (x >> 13) looks like this:
3105
 *
3106
 *   // note: funnel shifts are not usually cheap.
3107
 *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
3108
 *   x.hi ^= (x.hi >> 13);
3109
 *
3110
 * The first one is significantly faster than the second, simply because the
3111
 * shift is larger than 32. This means:
3112
 *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
3113
 *    32 bits in the shift.
3114
 *  - The shift result will always fit in the lower 32 bits, and therefore,
3115
 *    we can ignore the upper 32 bits in the xor.
3116
 *
3117
 * Thanks to this optimization, XXH3 only requires these features to be efficient:
3118
 *
3119
 *  - Usable unaligned access
3120
 *  - A 32-bit or 64-bit ALU
3121
 *      - If 32-bit, a decent ADC instruction
3122
 *  - A 32 or 64-bit multiply with a 64-bit result
3123
 *  - For the 128-bit variant, a decent byteswap helps short inputs.
3124
 *
3125
 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
3126
 * platforms which can run XXH32 can run XXH3 efficiently.
3127
 *
3128
 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
3129
 * notable exception.
3130
 *
3131
 * First of all, Thumb-1 lacks support for the UMULL instruction which
3132
 * performs the important long multiply. This means numerous __aeabi_lmul
3133
 * calls.
3134
 *
3135
 * Second of all, the 8 functional registers are just not enough.
3136
 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
3137
 * Lo registers, and this shuffling results in thousands more MOVs than A32.
3138
 *
3139
 * A32 and T32 don't have this limitation. They can access all 14 registers,
3140
 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
3141
 * shifts is helpful, too.
3142
 *
3143
 * Therefore, we do a quick sanity check.
3144
 *
3145
 * If compiling Thumb-1 for a target which supports ARM instructions, we will
3146
 * emit a warning, as it is not a "sane" platform to compile for.
3147
 *
3148
 * Usually, if this happens, it is because of an accident and you probably need
3149
 * to specify -march, as you likely meant to compile for a newer architecture.
3150
 *
3151
 * Credit: large sections of the vectorial and asm source code paths
3152
 *         have been contributed by @easyaspi314
3153
 */
3154
#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
3155
#   warning "XXH3 is highly inefficient without ARM or Thumb-2."
3156
#endif
3157
3158
/* ==========================================
3159
 * Vectorization detection
3160
 * ========================================== */
3161
3162
#ifdef XXH_DOXYGEN
3163
/*!
3164
 * @ingroup tuning
3165
 * @brief Overrides the vectorization implementation chosen for XXH3.
3166
 *
3167
 * Can be defined to 0 to disable SIMD or any of the values mentioned in
3168
 * @ref XXH_VECTOR_TYPE.
3169
 *
3170
 * If this is not defined, it uses predefined macros to determine the best
3171
 * implementation.
3172
 */
3173
#  define XXH_VECTOR XXH_SCALAR
3174
/*!
3175
 * @ingroup tuning
3176
 * @brief Possible values for @ref XXH_VECTOR.
3177
 *
3178
 * Note that these are actually implemented as macros.
3179
 *
3180
 * If this is not defined, it is detected automatically.
3181
 * @ref XXH_X86DISPATCH overrides this.
3182
 */
3183
enum XXH_VECTOR_TYPE /* fake enum */ {
3184
    XXH_SCALAR = 0,  /*!< Portable scalar version */
3185
    XXH_SSE2   = 1,  /*!<
3186
                      * SSE2 for Pentium 4, Opteron, all x86_64.
3187
                      *
3188
                      * @note SSE2 is also guaranteed on Windows 10, macOS, and
3189
                      * Android x86.
3190
                      */
3191
    XXH_AVX2   = 2,  /*!< AVX2 for Haswell and Bulldozer */
3192
    XXH_AVX512 = 3,  /*!< AVX512 for Skylake and Icelake */
3193
    XXH_NEON   = 4,  /*!< NEON for most ARMv7-A and all AArch64 */
3194
    XXH_VSX    = 5,  /*!< VSX and ZVector for POWER8/z13 (64-bit) */
3195
    XXH_SVE    = 6,  /*!< SVE for some ARMv8-A and ARMv9-A */
3196
};
3197
/*!
3198
 * @ingroup tuning
3199
 * @brief Selects the minimum alignment for XXH3's accumulators.
3200
 *
3201
 * When using SIMD, this should match the alignment required for said vector
3202
 * type, so, for example, 32 for AVX2.
3203
 *
3204
 * Default: Auto detected.
3205
 */
3206
#  define XXH_ACC_ALIGN 8
3207
#endif
3208
3209
/* Actual definition */
3210
#ifndef XXH_DOXYGEN
3211
#  define XXH_SCALAR 0
3212
#  define XXH_SSE2   1
3213
#  define XXH_AVX2   2
3214
#  define XXH_AVX512 3
3215
#  define XXH_NEON   4
3216
#  define XXH_VSX    5
3217
#  define XXH_SVE    6
3218
#endif
3219
3220
#ifndef XXH_VECTOR    /* can be defined on command line */
3221
#  if defined(__ARM_FEATURE_SVE)
3222
#    define XXH_VECTOR XXH_SVE
3223
#  elif ( \
3224
        defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
3225
     || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
3226
   ) && ( \
3227
        defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
3228
    || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
3229
   )
3230
#    define XXH_VECTOR XXH_NEON
3231
#  elif defined(__AVX512F__)
3232
#    define XXH_VECTOR XXH_AVX512
3233
#  elif defined(__AVX2__)
3234
#    define XXH_VECTOR XXH_AVX2
3235
#  elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
3236
#    define XXH_VECTOR XXH_SSE2
3237
#  elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
3238
     || (defined(__s390x__) && defined(__VEC__)) \
3239
     && defined(__GNUC__) /* TODO: IBM XL */
3240
#    define XXH_VECTOR XXH_VSX
3241
#  else
3242
#    define XXH_VECTOR XXH_SCALAR
3243
#  endif
3244
#endif
3245
3246
/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
3247
#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
3248
#  ifdef _MSC_VER
3249
#    pragma warning(once : 4606)
3250
#  else
3251
#    warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
3252
#  endif
3253
#  undef XXH_VECTOR
3254
#  define XXH_VECTOR XXH_SCALAR
3255
#endif
3256
3257
/*
3258
 * Controls the alignment of the accumulator,
3259
 * for compatibility with aligned vector loads, which are usually faster.
3260
 */
3261
#ifndef XXH_ACC_ALIGN
3262
#  if defined(XXH_X86DISPATCH)
3263
#     define XXH_ACC_ALIGN 64  /* for compatibility with avx512 */
3264
#  elif XXH_VECTOR == XXH_SCALAR  /* scalar */
3265
#     define XXH_ACC_ALIGN 8
3266
#  elif XXH_VECTOR == XXH_SSE2  /* sse2 */
3267
#     define XXH_ACC_ALIGN 16
3268
#  elif XXH_VECTOR == XXH_AVX2  /* avx2 */
3269
#     define XXH_ACC_ALIGN 32
3270
#  elif XXH_VECTOR == XXH_NEON  /* neon */
3271
#     define XXH_ACC_ALIGN 16
3272
#  elif XXH_VECTOR == XXH_VSX   /* vsx */
3273
#     define XXH_ACC_ALIGN 16
3274
#  elif XXH_VECTOR == XXH_AVX512  /* avx512 */
3275
#     define XXH_ACC_ALIGN 64
3276
#  elif XXH_VECTOR == XXH_SVE   /* sve */
3277
#     define XXH_ACC_ALIGN 64
3278
#  endif
3279
#endif
3280
3281
#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
3282
    || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
3283
#  define XXH_SEC_ALIGN XXH_ACC_ALIGN
3284
#elif XXH_VECTOR == XXH_SVE
3285
#  define XXH_SEC_ALIGN XXH_ACC_ALIGN
3286
#else
3287
#  define XXH_SEC_ALIGN 8
3288
#endif
3289
3290
/*
3291
 * UGLY HACK:
3292
 * GCC usually generates the best code with -O3 for xxHash.
3293
 *
3294
 * However, when targeting AVX2, it is overzealous in its unrolling resulting
3295
 * in code roughly 3/4 the speed of Clang.
3296
 *
3297
 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
3298
 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
3299
 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
3300
 *
3301
 * That is why when compiling the AVX2 version, it is recommended to use either
3302
 *   -O2 -mavx2 -march=haswell
3303
 * or
3304
 *   -O2 -mavx2 -mno-avx256-split-unaligned-load
3305
 * for decent performance, or to use Clang instead.
3306
 *
3307
 * Fortunately, we can control the first one with a pragma that forces GCC into
3308
 * -O2, but the other one we can't control without "failed to inline always
3309
 * inline function due to target mismatch" warnings.
3310
 */
3311
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
3312
  && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3313
  && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
3314
#  pragma GCC push_options
3315
#  pragma GCC optimize("-O2")
3316
#endif
3317
3318
3319
#if XXH_VECTOR == XXH_NEON
3320
/*
3321
 * NEON's setup for vmlal_u32 is a little more complicated than it is on
3322
 * SSE2, AVX2, and VSX.
3323
 *
3324
 * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
3325
 *
3326
 * To do the same operation, the 128-bit 'Q' register needs to be split into
3327
 * two 64-bit 'D' registers, performing this operation::
3328
 *
3329
 *   [                a                 |                 b                ]
3330
 *            |              '---------. .--------'                |
3331
 *            |                         x                          |
3332
 *            |              .---------' '--------.                |
3333
 *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32    ]
3334
 *
3335
 * Due to significant changes in aarch64, the fastest method for aarch64 is
3336
 * completely different than the fastest method for ARMv7-A.
3337
 *
3338
 * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
3339
 * D11 will modify the high half of Q5. This is similar to how modifying AH
3340
 * will only affect bits 8-15 of AX on x86.
3341
 *
3342
 * VZIP takes two registers, and puts even lanes in one register and odd lanes
3343
 * in the other.
3344
 *
3345
 * On ARMv7-A, this strangely modifies both parameters in place instead of
3346
 * taking the usual 3-operand form.
3347
 *
3348
 * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
3349
 * lower and upper halves of the Q register to end up with the high and low
3350
 * halves where we want - all in one instruction.
3351
 *
3352
 *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
3353
 *
3354
 * Unfortunately we need inline assembly for this: Instructions modifying two
3355
 * registers at once is not possible in GCC or Clang's IR, and they have to
3356
 * create a copy.
3357
 *
3358
 * aarch64 requires a different approach.
3359
 *
3360
 * In order to make it easier to write a decent compiler for aarch64, many
3361
 * quirks were removed, such as conditional execution.
3362
 *
3363
 * NEON was also affected by this.
3364
 *
3365
 * aarch64 cannot access the high bits of a Q-form register, and writes to a
3366
 * D-form register zero the high bits, similar to how writes to W-form scalar
3367
 * registers (or DWORD registers on x86_64) work.
3368
 *
3369
 * The formerly free vget_high intrinsics now require a vext (with a few
3370
 * exceptions)
3371
 *
3372
 * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
3373
 * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
3374
 * operand.
3375
 *
3376
 * The equivalent of the VZIP.32 on the lower and upper halves would be this
3377
 * mess:
3378
 *
3379
 *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
3380
 *   zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] }
3381
 *   zip2    v0.2s, v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
3382
 *
3383
 * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
3384
 *
3385
 *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
3386
 *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
3387
 *
3388
 * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
3389
 */
3390
3391
/*!
3392
 * Function-like macro:
3393
 * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
3394
 * {
3395
 *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
3396
 *     outHi = (uint32x2_t)(in >> 32);
3397
 *     in = UNDEFINED;
3398
 * }
3399
 */
3400
# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
3401
   && (defined(__GNUC__) || defined(__clang__)) \
3402
   && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
3403
#  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                              \
3404
    do {                                                                                    \
3405
      /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3406
      /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */     \
3407
      /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3408
      __asm__("vzip.32  %e0, %f0" : "+w" (in));                                             \
3409
      (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in));                                   \
3410
      (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                   \
3411
   } while (0)
3412
# else
3413
#  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                            \
3414
    do {                                                                                  \
3415
      (outLo) = vmovn_u64    (in);                                                        \
3416
      (outHi) = vshrn_n_u64  ((in), 32);                                                  \
3417
    } while (0)
3418
# endif
3419
3420
/*!
3421
 * @internal
3422
 * @brief `vld1q_u64` but faster and alignment-safe.
3423
 *
3424
 * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
3425
 * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
3426
 *
3427
 * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
3428
 * prohibits load-store optimizations. Therefore, a direct dereference is used.
3429
 *
3430
 * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
3431
 * unaligned load.
3432
 */
3433
#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
3434
XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
3435
{
3436
    return *(uint64x2_t const*)ptr;
3437
}
3438
#else
3439
XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
3440
{
3441
    return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
3442
}
3443
#endif
3444
/*!
3445
 * @ingroup tuning
3446
 * @brief Controls the NEON to scalar ratio for XXH3
3447
 *
3448
 * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
3449
 * 2 lanes on scalar by default (except on Apple platforms, as Apple CPUs benefit
3450
 * from only using NEON).
3451
 *
3452
 * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
3453
 * emulated 64-bit arithmetic is too slow.
3454
 *
3455
 * Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
3456
 *
3457
 * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
3458
 * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
3459
 * you are only using 2/3 of the CPU bandwidth.
3460
 *
3461
 * This is even more noticeable on the more advanced cores like the A76 which
3462
 * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
3463
 *
3464
 * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
3465
 * remaining lanes will use scalar instructions. This improves the bandwidth
3466
 * and also gives the integer pipelines something to do besides twiddling loop
3467
 * counters and pointers.
3468
 *
3469
 * This change benefits CPUs with large micro-op buffers without negatively affecting
3470
 * most other CPUs:
3471
 *
3472
 *  | Chipset               | Dispatch type       | NEON only | 6:2 hybrid | Diff. |
3473
 *  |:----------------------|:--------------------|----------:|-----------:|------:|
3474
 *  | Snapdragon 730 (A76)  | 2 NEON/8 micro-ops  |  8.8 GB/s |  10.1 GB/s |  ~16% |
3475
 *  | Snapdragon 835 (A73)  | 2 NEON/3 micro-ops  |  5.1 GB/s |   5.3 GB/s |   ~5% |
3476
 *  | Marvell PXA1928 (A53) | In-order dual-issue |  1.9 GB/s |   1.9 GB/s |    0% |
3477
 *  | Apple M1              | 4 NEON/8 micro-ops  | 37.3 GB/s |  36.1 GB/s |  ~-3% |
3478
 *
3479
 * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
3480
 *
3481
 * @see XXH3_accumulate_512_neon()
3482
 */
3483
# ifndef XXH3_NEON_LANES
3484
#  if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
3485
   && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
3486
#   define XXH3_NEON_LANES 6
3487
#  else
3488
#   define XXH3_NEON_LANES XXH_ACC_NB
3489
#  endif
3490
# endif
3491
#endif  /* XXH_VECTOR == XXH_NEON */
3492
3493
/*
3494
 * VSX and Z Vector helpers.
3495
 *
3496
 * This is very messy, and any pull requests to clean this up are welcome.
3497
 *
3498
 * There are a lot of problems with supporting VSX and s390x, due to
3499
 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3500
 */
3501
#if XXH_VECTOR == XXH_VSX
3502
/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
3503
 * and `pixel`. This is a problem for obvious reasons.
3504
 *
3505
 * These keywords are unnecessary; the spec literally says they are
3506
 * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
3507
 * after including the header.
3508
 *
3509
 * We use pragma push_macro/pop_macro to keep the namespace clean. */
3510
#  pragma push_macro("bool")
3511
#  pragma push_macro("vector")
3512
#  pragma push_macro("pixel")
3513
/* silence potential macro redefined warnings */
3514
#  undef bool
3515
#  undef vector
3516
#  undef pixel
3517
3518
#  if defined(__s390x__)
3519
#    include <s390intrin.h>
3520
#  else
3521
#    include <altivec.h>
3522
#  endif
3523
3524
/* Restore the original macro values, if applicable. */
3525
#  pragma pop_macro("pixel")
3526
#  pragma pop_macro("vector")
3527
#  pragma pop_macro("bool")
3528
3529
typedef __vector unsigned long long xxh_u64x2;
3530
typedef __vector unsigned char xxh_u8x16;
3531
typedef __vector unsigned xxh_u32x4;
3532
3533
# ifndef XXH_VSX_BE
3534
#  if defined(__BIG_ENDIAN__) \
3535
  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3536
#    define XXH_VSX_BE 1
3537
#  elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3538
#    warning "-maltivec=be is not recommended. Please use native endianness."
3539
#    define XXH_VSX_BE 1
3540
#  else
3541
#    define XXH_VSX_BE 0
3542
#  endif
3543
# endif /* !defined(XXH_VSX_BE) */
3544
3545
# if XXH_VSX_BE
3546
#  if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3547
#    define XXH_vec_revb vec_revb
3548
#  else
3549
/*!
3550
 * A polyfill for POWER9's vec_revb().
3551
 */
3552
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3553
{
3554
    xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3555
                                  0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3556
    return vec_perm(val, val, vByteSwap);
3557
}
3558
#  endif
3559
# endif /* XXH_VSX_BE */
3560
3561
/*!
3562
 * Performs an unaligned vector load and byte swaps it on big endian.
3563
 */
3564
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3565
{
3566
    xxh_u64x2 ret;
3567
    XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3568
# if XXH_VSX_BE
3569
    ret = XXH_vec_revb(ret);
3570
# endif
3571
    return ret;
3572
}
3573
3574
/*
3575
 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3576
 *
3577
 * These intrinsics weren't added until GCC 8, despite existing for a while,
3578
 * and they are endian dependent. Also, their meaning swap depending on version.
3579
 * */
3580
# if defined(__s390x__)
3581
 /* s390x is always big endian, no issue on this platform */
3582
#  define XXH_vec_mulo vec_mulo
3583
#  define XXH_vec_mule vec_mule
3584
# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
3585
/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3586
 /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
3587
#  define XXH_vec_mulo __builtin_altivec_vmulouw
3588
#  define XXH_vec_mule __builtin_altivec_vmuleuw
3589
# else
3590
/* gcc needs inline assembly */
3591
/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
3592
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3593
{
3594
    xxh_u64x2 result;
3595
    __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3596
    return result;
3597
}
3598
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3599
{
3600
    xxh_u64x2 result;
3601
    __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3602
    return result;
3603
}
3604
# endif /* XXH_vec_mulo, XXH_vec_mule */
3605
#endif /* XXH_VECTOR == XXH_VSX */
3606
3607
#if XXH_VECTOR == XXH_SVE
3608
#define ACCRND(acc, offset) \
3609
do { \
3610
    svuint64_t input_vec = svld1_u64(mask, xinput + offset);         \
3611
    svuint64_t secret_vec = svld1_u64(mask, xsecret + offset);       \
3612
    svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec);     \
3613
    svuint64_t swapped = svtbl_u64(input_vec, kSwap);                \
3614
    svuint64_t mixed_lo = svextw_u64_x(mask, mixed);                 \
3615
    svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32);            \
3616
    svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
3617
    acc = svadd_u64_x(mask, acc, mul);                               \
3618
} while (0)
3619
#endif /* XXH_VECTOR == XXH_SVE */
3620
3621
3622
/* prefetch
3623
 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3624
#if defined(XXH_NO_PREFETCH)
3625
#  define XXH_PREFETCH(ptr)  (void)(ptr)  /* disabled */
3626
#else
3627
#  if XXH_SIZE_OPT >= 1
3628
#    define XXH_PREFETCH(ptr) (void)(ptr)
3629
#  elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))  /* _mm_prefetch() not defined outside of x86/x64 */
3630
#    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3631
#    define XXH_PREFETCH(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3632
#  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3633
22.7M
#    define XXH_PREFETCH(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3634
#  else
3635
#    define XXH_PREFETCH(ptr) (void)(ptr)  /* disabled */
3636
#  endif
3637
#endif  /* XXH_NO_PREFETCH */
3638
3639
3640
/* ==========================================
3641
 * XXH3 default settings
3642
 * ========================================== */
3643
3644
1.16M
#define XXH_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
3645
3646
#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3647
#  error "default keyset is not large enough"
3648
#endif
3649
3650
/*! Pseudorandom secret taken directly from FARSH. */
3651
XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3652
    0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3653
    0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3654
    0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3655
    0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3656
    0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3657
    0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3658
    0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3659
    0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3660
    0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3661
    0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3662
    0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3663
    0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3664
};
3665
3666
3667
#ifdef XXH_OLD_NAMES
3668
#  define kSecret XXH3_kSecret
3669
#endif
3670
3671
#ifdef XXH_DOXYGEN
3672
/*!
3673
 * @brief Calculates a 32-bit to 64-bit long multiply.
3674
 *
3675
 * Implemented as a macro.
3676
 *
3677
 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3678
 * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3679
 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3680
 * use that instead of the normal method.
3681
 *
3682
 * If you are compiling for platforms like Thumb-1 and don't have a better option,
3683
 * you may also want to write your own long multiply routine here.
3684
 *
3685
 * @param x, y Numbers to be multiplied
3686
 * @return 64-bit product of the low 32 bits of @p x and @p y.
3687
 */
3688
XXH_FORCE_INLINE xxh_u64
3689
XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3690
{
3691
   return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3692
}
3693
#elif defined(_MSC_VER) && defined(_M_IX86)
3694
#    define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3695
#else
3696
/*
3697
 * Downcast + upcast is usually better than masking on older compilers like
3698
 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3699
 *
3700
 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3701
 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3702
 */
3703
0
#    define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3704
#endif
3705
3706
/*!
3707
 * @brief Calculates a 64->128-bit long multiply.
3708
 *
3709
 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3710
 * version.
3711
 *
3712
 * @param lhs , rhs The 64-bit integers to be multiplied
3713
 * @return The 128-bit result represented in an @ref XXH128_hash_t.
3714
 */
3715
static XXH128_hash_t
3716
XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3717
7.84M
{
3718
    /*
3719
     * GCC/Clang __uint128_t method.
3720
     *
3721
     * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3722
     * This is usually the best way as it usually uses a native long 64-bit
3723
     * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3724
     *
3725
     * Usually.
3726
     *
3727
     * Despite being a 32-bit platform, Clang (and emscripten) define this type
3728
     * despite not having the arithmetic for it. This results in a laggy
3729
     * compiler builtin call which calculates a full 128-bit multiply.
3730
     * In that case it is best to use the portable one.
3731
     * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3732
     */
3733
7.84M
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
3734
7.84M
    && defined(__SIZEOF_INT128__) \
3735
7.84M
    || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3736
3737
7.84M
    __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3738
7.84M
    XXH128_hash_t r128;
3739
7.84M
    r128.low64  = (xxh_u64)(product);
3740
7.84M
    r128.high64 = (xxh_u64)(product >> 64);
3741
7.84M
    return r128;
3742
3743
    /*
3744
     * MSVC for x64's _umul128 method.
3745
     *
3746
     * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3747
     *
3748
     * This compiles to single operand MUL on x64.
3749
     */
3750
#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
3751
3752
#ifndef _MSC_VER
3753
#   pragma intrinsic(_umul128)
3754
#endif
3755
    xxh_u64 product_high;
3756
    xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3757
    XXH128_hash_t r128;
3758
    r128.low64  = product_low;
3759
    r128.high64 = product_high;
3760
    return r128;
3761
3762
    /*
3763
     * MSVC for ARM64's __umulh method.
3764
     *
3765
     * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3766
     */
3767
#elif defined(_M_ARM64) || defined(_M_ARM64EC)
3768
3769
#ifndef _MSC_VER
3770
#   pragma intrinsic(__umulh)
3771
#endif
3772
    XXH128_hash_t r128;
3773
    r128.low64  = lhs * rhs;
3774
    r128.high64 = __umulh(lhs, rhs);
3775
    return r128;
3776
3777
#else
3778
    /*
3779
     * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3780
     *
3781
     * This is a fast and simple grade school multiply, which is shown below
3782
     * with base 10 arithmetic instead of base 0x100000000.
3783
     *
3784
     *           9 3 // D2 lhs = 93
3785
     *         x 7 5 // D2 rhs = 75
3786
     *     ----------
3787
     *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3788
     *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3789
     *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3790
     *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3791
     *     ---------
3792
     *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3793
     *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3794
     *     ---------
3795
     *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3796
     *
3797
     * The reasons for adding the products like this are:
3798
     *  1. It avoids manual carry tracking. Just like how
3799
     *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3800
     *     This avoids a lot of complexity.
3801
     *
3802
     *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
3803
     *     instruction available in ARM's Digital Signal Processing extension
3804
     *     in 32-bit ARMv6 and later, which is shown below:
3805
     *
3806
     *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3807
     *         {
3808
     *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3809
     *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3810
     *             *RdHi = (xxh_u32)(product >> 32);
3811
     *         }
3812
     *
3813
     *     This instruction was designed for efficient long multiplication, and
3814
     *     allows this to be calculated in only 4 instructions at speeds
3815
     *     comparable to some 64-bit ALUs.
3816
     *
3817
     *  3. It isn't terrible on other platforms. Usually this will be a couple
3818
     *     of 32-bit ADD/ADCs.
3819
     */
3820
3821
    /* First calculate all of the cross products. */
3822
    xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3823
    xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32,        rhs & 0xFFFFFFFF);
3824
    xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3825
    xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32,        rhs >> 32);
3826
3827
    /* Now add the products together. These will never overflow. */
3828
    xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3829
    xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32)        + hi_hi;
3830
    xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3831
3832
    XXH128_hash_t r128;
3833
    r128.low64  = lower;
3834
    r128.high64 = upper;
3835
    return r128;
3836
#endif
3837
7.84M
}
3838
3839
/*!
3840
 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3841
 *
3842
 * The reason for the separate function is to prevent passing too many structs
3843
 * around by value. This will hopefully inline the multiply, but we don't force it.
3844
 *
3845
 * @param lhs , rhs The 64-bit integers to multiply
3846
 * @return The low 64 bits of the product XOR'd by the high 64 bits.
3847
 * @see XXH_mult64to128()
3848
 */
3849
static xxh_u64
3850
XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3851
7.84M
{
3852
7.84M
    XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3853
7.84M
    return product.low64 ^ product.high64;
3854
7.84M
}
3855
3856
/*! Seems to produce slightly better code on GCC for some reason. */
3857
XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3858
5.28M
{
3859
5.28M
    XXH_ASSERT(0 <= shift && shift < 64)
3860
5.28M
    return v64 ^ (v64 >> shift);
3861
5.28M
}
3862
3863
/*
3864
 * This is a fast avalanche stage,
3865
 * suitable when input bits are already partially mixed
3866
 */
3867
static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3868
2.64M
{
3869
2.64M
    h64 = XXH_xorshift64(h64, 37);
3870
2.64M
    h64 *= 0x165667919E3779F9ULL;
3871
2.64M
    h64 = XXH_xorshift64(h64, 32);
3872
2.64M
    return h64;
3873
2.64M
}
3874
3875
/*
3876
 * This is a stronger avalanche,
3877
 * inspired by Pelle Evensen's rrmxmx
3878
 * preferable when input has not been previously mixed
3879
 */
3880
static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3881
1.82k
{
3882
    /* this mix is inspired by Pelle Evensen's rrmxmx */
3883
1.82k
    h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3884
1.82k
    h64 *= 0x9FB21C651E98DF25ULL;
3885
1.82k
    h64 ^= (h64 >> 35) + len ;
3886
1.82k
    h64 *= 0x9FB21C651E98DF25ULL;
3887
1.82k
    return XXH_xorshift64(h64, 28);
3888
1.82k
}
3889
3890
3891
/* ==========================================
3892
 * Short keys
3893
 * ==========================================
3894
 * One of the shortcomings of XXH32 and XXH64 was that their performance was
3895
 * sub-optimal on short lengths. It used an iterative algorithm which strongly
3896
 * favored lengths that were a multiple of 4 or 8.
3897
 *
3898
 * Instead of iterating over individual inputs, we use a set of single shot
3899
 * functions which piece together a range of lengths and operate in constant time.
3900
 *
3901
 * Additionally, the number of multiplies has been significantly reduced. This
3902
 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3903
 *
3904
 * Depending on the platform, this may or may not be faster than XXH32, but it
3905
 * is almost guaranteed to be faster than XXH64.
3906
 */
3907
3908
/*
3909
 * At very short lengths, there isn't enough input to fully hide secrets, or use
3910
 * the entire secret.
3911
 *
3912
 * There is also only a limited amount of mixing we can do before significantly
3913
 * impacting performance.
3914
 *
3915
 * Therefore, we use different sections of the secret and always mix two secret
3916
 * samples with an XOR. This should have no effect on performance on the
3917
 * seedless or withSeed variants because everything _should_ be constant folded
3918
 * by modern compilers.
3919
 *
3920
 * The XOR mixing hides individual parts of the secret and increases entropy.
3921
 *
3922
 * This adds an extra layer of strength for custom secrets.
3923
 */
3924
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
3925
XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3926
0
{
3927
0
    XXH_ASSERT(input != NULL)
3928
0
    XXH_ASSERT(1 <= len && len <= 3)
3929
0
    XXH_ASSERT(secret != NULL)
3930
    /*
3931
     * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3932
     * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3933
     * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3934
     */
3935
0
    {   xxh_u8  const c1 = input[0];
3936
0
        xxh_u8  const c2 = input[len >> 1];
3937
0
        xxh_u8  const c3 = input[len - 1];
3938
0
        xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2  << 24)
3939
0
                               | ((xxh_u32)c3 <<  0) | ((xxh_u32)len << 8);
3940
0
        xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3941
0
        xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3942
0
        return XXH64_avalanche(keyed);
3943
0
    }
3944
0
}
3945
3946
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
3947
XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3948
1.82k
{
3949
1.82k
    XXH_ASSERT(input != NULL)
3950
1.82k
    XXH_ASSERT(secret != NULL)
3951
1.82k
    XXH_ASSERT(4 <= len && len <= 8)
3952
1.82k
    seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3953
1.82k
    {   xxh_u32 const input1 = XXH_readLE32(input);
3954
1.82k
        xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3955
1.82k
        xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3956
1.82k
        xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3957
1.82k
        xxh_u64 const keyed = input64 ^ bitflip;
3958
1.82k
        return XXH3_rrmxmx(keyed, len);
3959
1.82k
    }
3960
1.82k
}
3961
3962
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
3963
XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3964
1.03M
{
3965
1.03M
    XXH_ASSERT(input != NULL)
3966
1.03M
    XXH_ASSERT(secret != NULL)
3967
1.03M
    XXH_ASSERT(9 <= len && len <= 16)
3968
1.03M
    {   xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3969
1.03M
        xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3970
1.03M
        xxh_u64 const input_lo = XXH_readLE64(input)           ^ bitflip1;
3971
1.03M
        xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3972
1.03M
        xxh_u64 const acc = len
3973
1.03M
                          + XXH_swap64(input_lo) + input_hi
3974
1.03M
                          + XXH3_mul128_fold64(input_lo, input_hi);
3975
1.03M
        return XXH3_avalanche(acc);
3976
1.03M
    }
3977
1.03M
}
3978
3979
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
3980
XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3981
1.03M
{
3982
1.03M
    XXH_ASSERT(len <= 16)
3983
1.03M
    {   if (XXH_likely(len >  8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3984
1.82k
        if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3985
0
        if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3986
0
        return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3987
0
    }
3988
0
}
3989
3990
/*
3991
 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3992
 * multiplication by zero, affecting hashes of lengths 17 to 240.
3993
 *
3994
 * However, they are very unlikely.
3995
 *
3996
 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3997
 * unseeded non-cryptographic hashes, it does not attempt to defend itself
3998
 * against specially crafted inputs, only random inputs.
3999
 *
4000
 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
4001
 * cancelling out the secret is taken an arbitrary number of times (addressed
4002
 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
4003
 * and/or proper seeding:
4004
 *
4005
 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
4006
 * function that is only called up to 16 times per hash with up to 240 bytes of
4007
 * input.
4008
 *
4009
 * This is not too bad for a non-cryptographic hash function, especially with
4010
 * only 64 bit outputs.
4011
 *
4012
 * The 128-bit variant (which trades some speed for strength) is NOT affected
4013
 * by this, although it is always a good idea to use a proper seed if you care
4014
 * about strength.
4015
 */
4016
XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
4017
                                     const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
4018
5.88M
{
4019
#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
4020
  && defined(__i386__) && defined(__SSE2__)  /* x86 + SSE2 */ \
4021
  && !defined(XXH_ENABLE_AUTOVECTORIZE)      /* Define to disable like XXH32 hack */
4022
    /*
4023
     * UGLY HACK:
4024
     * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
4025
     * slower code.
4026
     *
4027
     * By forcing seed64 into a register, we disrupt the cost model and
4028
     * cause it to scalarize. See `XXH32_round()`
4029
     *
4030
     * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
4031
     * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
4032
     * GCC 9.2, despite both emitting scalar code.
4033
     *
4034
     * GCC generates much better scalar code than Clang for the rest of XXH3,
4035
     * which is why finding a more optimal codepath is an interest.
4036
     */
4037
    XXH_COMPILER_GUARD(seed64);
4038
#endif
4039
5.88M
    {   xxh_u64 const input_lo = XXH_readLE64(input);
4040
5.88M
        xxh_u64 const input_hi = XXH_readLE64(input+8);
4041
5.88M
        return XXH3_mul128_fold64(
4042
5.88M
            input_lo ^ (XXH_readLE64(secret)   + seed64),
4043
5.88M
            input_hi ^ (XXH_readLE64(secret+8) - seed64)
4044
5.88M
        );
4045
5.88M
    }
4046
5.88M
}
4047
4048
/* For mid range keys, XXH3 uses a Mum-hash variant. */
4049
XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4050
XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4051
                     const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4052
                     XXH64_hash_t seed)
4053
925k
{
4054
925k
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN) (void)secretSize;
4055
925k
    XXH_ASSERT(16 < len && len <= 128)
4056
4057
925k
    {   xxh_u64 acc = len * XXH_PRIME64_1, acc_end;
4058
#if XXH_SIZE_OPT >= 1
4059
        /* Smaller and cleaner, but slightly slower. */
4060
        unsigned int i = (unsigned int)(len - 1) / 32;
4061
        do {
4062
            acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
4063
            acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
4064
        } while (i-- != 0);
4065
        acc_end = 0;
4066
#else
4067
925k
        acc += XXH3_mix16B(input+0, secret+0, seed);
4068
925k
        acc_end = XXH3_mix16B(input+len-16, secret+16, seed);
4069
925k
        if (len > 32) {
4070
475k
            acc += XXH3_mix16B(input+16, secret+32, seed);
4071
475k
            acc_end += XXH3_mix16B(input+len-32, secret+48, seed);
4072
475k
            if (len > 64) {
4073
210k
                acc += XXH3_mix16B(input+32, secret+64, seed);
4074
210k
                acc_end += XXH3_mix16B(input+len-48, secret+80, seed);
4075
4076
210k
                if (len > 96) {
4077
71.7k
                    acc += XXH3_mix16B(input+48, secret+96, seed);
4078
71.7k
                    acc_end += XXH3_mix16B(input+len-64, secret+112, seed);
4079
71.7k
                }
4080
210k
            }
4081
475k
        }
4082
925k
#endif
4083
925k
        return XXH3_avalanche(acc + acc_end);
4084
925k
    }
4085
925k
}
4086
4087
433k
#define XXH3_MIDSIZE_MAX 240
4088
4089
XXH_NO_INLINE XXH_PUREF XXH64_hash_t
4090
XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4091
                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4092
                      XXH64_hash_t seed)
4093
199k
{
4094
199k
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN) (void)secretSize;
4095
199k
    XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX)
4096
4097
614k
    #define XXH3_MIDSIZE_STARTOFFSET 3
4098
199k
    #define XXH3_MIDSIZE_LASTOFFSET  17
4099
4100
199k
    {   xxh_u64 acc = len * XXH_PRIME64_1;
4101
199k
        xxh_u64 acc_end;
4102
199k
        unsigned int const nbRounds = (unsigned int)len / 16;
4103
199k
        unsigned int i;
4104
199k
        XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX)
4105
1.79M
        for (i=0; i<8; i++) {
4106
1.59M
            acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
4107
1.59M
        }
4108
        /* last bytes */
4109
199k
        acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
4110
199k
        XXH_ASSERT(nbRounds >= 8)
4111
199k
        acc = XXH3_avalanche(acc);
4112
#if defined(__clang__)                                /* Clang */ \
4113
    && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4114
    && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
4115
        /*
4116
         * UGLY HACK:
4117
         * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
4118
         * In everywhere else, it uses scalar code.
4119
         *
4120
         * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
4121
         * would still be slower than UMAAL (see XXH_mult64to128).
4122
         *
4123
         * Unfortunately, Clang doesn't handle the long multiplies properly and
4124
         * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
4125
         * scalarized into an ugly mess of VMOV.32 instructions.
4126
         *
4127
         * This mess is difficult to avoid without turning autovectorization
4128
         * off completely, but they are usually relatively minor and/or not
4129
         * worth it to fix.
4130
         *
4131
         * This loop is the easiest to fix, as unlike XXH32, this pragma
4132
         * _actually works_ because it is a loop vectorization instead of an
4133
         * SLP vectorization.
4134
         */
4135
        #pragma clang loop vectorize(disable)
4136
#endif
4137
814k
        for (i=8 ; i < nbRounds; i++) {
4138
            /*
4139
             * Prevents clang for unrolling the acc loop and interleaving with this one.
4140
             */
4141
614k
            XXH_COMPILER_GUARD(acc);
4142
614k
            acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
4143
614k
        }
4144
199k
        return XXH3_avalanche(acc + acc_end);
4145
199k
    }
4146
199k
}
4147
4148
4149
/* =======     Long Keys     ======= */
4150
4151
99.5M
#define XXH_STRIPE_LEN 64
4152
24.1M
#define XXH_SECRET_CONSUME_RATE 8   /* nb of secret bytes consumed at each accumulation */
4153
#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
4154
4155
#ifdef XXH_OLD_NAMES
4156
#  define STRIPE_LEN XXH_STRIPE_LEN
4157
#  define ACC_NB XXH_ACC_NB
4158
#endif
4159
4160
#ifndef XXH_PREFETCH_DIST
4161
#  ifdef __clang__
4162
#    define XXH_PREFETCH_DIST 320
4163
#  else
4164
#    if (XXH_VECTOR == XXH_AVX512)
4165
#      define XXH_PREFETCH_DIST 512
4166
#    else
4167
#      define XXH_PREFETCH_DIST 384
4168
#    endif
4169
#  endif  /* __clang__ */
4170
#endif  /* XXH_PREFETCH_DIST */
4171
4172
/*
4173
 * These macros are to generate an XXH3_accumulate() function.
4174
 * The two arguments select the name suffix and target attribute.
4175
 *
4176
 * The name of this symbol is XXH3_accumulate_<name>() and it calls
4177
 * XXH3_accumulate_512_<name>().
4178
 *
4179
 * It may be useful to hand implement this function if the compiler fails to
4180
 * optimize the inline function.
4181
 */
4182
#define XXH3_ACCUMULATE_TEMPLATE(name)                      \
4183
void                                                        \
4184
XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc,           \
4185
                       const xxh_u8* XXH_RESTRICT input,    \
4186
                       const xxh_u8* XXH_RESTRICT secret,   \
4187
1.58M
                       size_t nbStripes)                    \
4188
1.58M
{                                                           \
4189
1.58M
    size_t n;                                               \
4190
24.3M
    for (n = 0; n < nbStripes; n++ ) {                      \
4191
22.7M
        const xxh_u8* const in = input + n*XXH_STRIPE_LEN;  \
4192
22.7M
        XXH_PREFETCH(in + XXH_PREFETCH_DIST);               \
4193
22.7M
        XXH3_accumulate_512_##name(                         \
4194
22.7M
                 acc,                                       \
4195
22.7M
                 in,                                        \
4196
22.7M
                 secret + n*XXH_SECRET_CONSUME_RATE);       \
4197
22.7M
    }                                                       \
4198
1.58M
}
xxhash.cc:XXH3_accumulate_avx2(unsigned long*, unsigned char const*, unsigned char const*, unsigned long)
Line
Count
Source
4187
1.58M
                       size_t nbStripes)                    \
4188
1.58M
{                                                           \
4189
1.58M
    size_t n;                                               \
4190
24.3M
    for (n = 0; n < nbStripes; n++ ) {                      \
4191
22.7M
        const xxh_u8* const in = input + n*XXH_STRIPE_LEN;  \
4192
22.7M
        XXH_PREFETCH(in + XXH_PREFETCH_DIST);               \
4193
22.7M
        XXH3_accumulate_512_##name(                         \
4194
22.7M
                 acc,                                       \
4195
22.7M
                 in,                                        \
4196
22.7M
                 secret + n*XXH_SECRET_CONSUME_RATE);       \
4197
22.7M
    }                                                       \
4198
1.58M
}
Unexecuted instantiation: xxhash.cc:XXH3_accumulate_scalar(unsigned long*, unsigned char const*, unsigned char const*, unsigned long)
4199
4200
4201
XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
4202
0
{
4203
0
    if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
4204
0
    XXH_memcpy(dst, &v64, sizeof(v64));
4205
0
}
4206
4207
/* Several intrinsic functions below are supposed to accept __int64 as argument,
4208
 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
4209
 * However, several environments do not define __int64 type,
4210
 * requiring a workaround.
4211
 */
4212
#if !defined (__VMS) \
4213
  && (defined (__cplusplus) \
4214
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
4215
    typedef int64_t xxh_i64;
4216
#else
4217
    /* the following type must have a width of 64-bit */
4218
    typedef long long xxh_i64;
4219
#endif
4220
4221
4222
/*
4223
 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
4224
 *
4225
 * It is a hardened version of UMAC, based off of FARSH's implementation.
4226
 *
4227
 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
4228
 * implementations, and it is ridiculously fast.
4229
 *
4230
 * We harden it by mixing the original input to the accumulators as well as the product.
4231
 *
4232
 * This means that in the (relatively likely) case of a multiply by zero, the
4233
 * original input is preserved.
4234
 *
4235
 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
4236
 * cross-pollination, as otherwise the upper and lower halves would be
4237
 * essentially independent.
4238
 *
4239
 * This doesn't matter on 64-bit hashes since they all get merged together in
4240
 * the end, so we skip the extra step.
4241
 *
4242
 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4243
 */
4244
4245
#if (XXH_VECTOR == XXH_AVX512) \
4246
     || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
4247
4248
#ifndef XXH_TARGET_AVX512
4249
# define XXH_TARGET_AVX512  /* disable attribute target */
4250
#endif
4251
4252
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4253
XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
4254
                     const void* XXH_RESTRICT input,
4255
                     const void* XXH_RESTRICT secret)
4256
{
4257
    __m512i* const xacc = (__m512i *) acc;
4258
    XXH_ASSERT((((size_t)acc) & 63) == 0)
4259
    XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4260
4261
    {
4262
        /* data_vec    = input[0]; */
4263
        __m512i const data_vec    = _mm512_loadu_si512   (input);
4264
        /* key_vec     = secret[0]; */
4265
        __m512i const key_vec     = _mm512_loadu_si512   (secret);
4266
        /* data_key    = data_vec ^ key_vec; */
4267
        __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
4268
        /* data_key_lo = data_key >> 32; */
4269
        __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
4270
        /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4271
        __m512i const product     = _mm512_mul_epu32     (data_key, data_key_lo);
4272
        /* xacc[0] += swap(data_vec); */
4273
        __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
4274
        __m512i const sum       = _mm512_add_epi64(*xacc, data_swap);
4275
        /* xacc[0] += product; */
4276
        *xacc = _mm512_add_epi64(product, sum);
4277
    }
4278
}
4279
XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
4280
4281
/*
4282
 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
4283
 *
4284
 * Multiplication isn't perfect, as explained by Google in HighwayHash:
4285
 *
4286
 *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
4287
 *  // varying degrees. In descending order of goodness, bytes
4288
 *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
4289
 *  // As expected, the upper and lower bytes are much worse.
4290
 *
4291
 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
4292
 *
4293
 * Since our algorithm uses a pseudorandom secret to add some variance into the
4294
 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
4295
 *
4296
 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
4297
 * extraction.
4298
 *
4299
 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4300
 */
4301
4302
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4303
XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4304
{
4305
    XXH_ASSERT((((size_t)acc) & 63) == 0)
4306
    XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4307
    {   __m512i* const xacc = (__m512i*) acc;
4308
        const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
4309
4310
        /* xacc[0] ^= (xacc[0] >> 47) */
4311
        __m512i const acc_vec     = *xacc;
4312
        __m512i const shifted     = _mm512_srli_epi64    (acc_vec, 47);
4313
        /* xacc[0] ^= secret; */
4314
        __m512i const key_vec     = _mm512_loadu_si512   (secret);
4315
        __m512i const data_key    = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
4316
4317
        /* xacc[0] *= XXH_PRIME32_1; */
4318
        __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
4319
        __m512i const prod_lo     = _mm512_mul_epu32     (data_key, prime32);
4320
        __m512i const prod_hi     = _mm512_mul_epu32     (data_key_hi, prime32);
4321
        *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
4322
    }
4323
}
4324
4325
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4326
XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4327
{
4328
    XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
4329
    XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
4330
    XXH_ASSERT(((size_t)customSecret & 63) == 0)
4331
    (void)(&XXH_writeLE64);
4332
    {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
4333
        __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
4334
        __m512i const seed     = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
4335
4336
        const __m512i* const src  = (const __m512i*) ((const void*) XXH3_kSecret);
4337
              __m512i* const dest = (      __m512i*) customSecret;
4338
        int i;
4339
        XXH_ASSERT(((size_t)src & 63) == 0) /* control alignment */
4340
        XXH_ASSERT(((size_t)dest & 63) == 0)
4341
        for (i=0; i < nbRounds; ++i) {
4342
            dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
4343
    }   }
4344
}
4345
4346
#endif
4347
4348
#if (XXH_VECTOR == XXH_AVX2) \
4349
    || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
4350
4351
#ifndef XXH_TARGET_AVX2
4352
# define XXH_TARGET_AVX2  /* disable attribute target */
4353
#endif
4354
4355
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4356
XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
4357
                    const void* XXH_RESTRICT input,
4358
                    const void* XXH_RESTRICT secret)
4359
23.0M
{
4360
23.0M
    XXH_ASSERT((((size_t)acc) & 31) == 0)
4361
23.0M
    {   __m256i* const xacc    =       (__m256i *) acc;
4362
        /* Unaligned. This is mainly for pointer arithmetic, and because
4363
         * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason. */
4364
23.0M
        const         __m256i* const xinput  = (const __m256i *) input;
4365
        /* Unaligned. This is mainly for pointer arithmetic, and because
4366
         * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4367
23.0M
        const         __m256i* const xsecret = (const __m256i *) secret;
4368
4369
23.0M
        size_t i;
4370
69.0M
        for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4371
            /* data_vec    = xinput[i]; */
4372
46.0M
            __m256i const data_vec    = _mm256_loadu_si256    (xinput+i);
4373
            /* key_vec     = xsecret[i]; */
4374
46.0M
            __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
4375
            /* data_key    = data_vec ^ key_vec; */
4376
46.0M
            __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
4377
            /* data_key_lo = data_key >> 32; */
4378
46.0M
            __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
4379
            /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4380
46.0M
            __m256i const product     = _mm256_mul_epu32     (data_key, data_key_lo);
4381
            /* xacc[i] += swap(data_vec); */
4382
46.0M
            __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
4383
46.0M
            __m256i const sum       = _mm256_add_epi64(xacc[i], data_swap);
4384
            /* xacc[i] += product; */
4385
46.0M
            xacc[i] = _mm256_add_epi64(product, sum);
4386
46.0M
    }   }
4387
23.0M
}
4388
XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
4389
4390
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4391
XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4392
1.32M
{
4393
1.32M
    XXH_ASSERT((((size_t)acc) & 31) == 0)
4394
1.32M
    {   __m256i* const xacc = (__m256i*) acc;
4395
        /* Unaligned. This is mainly for pointer arithmetic, and because
4396
         * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4397
1.32M
        const         __m256i* const xsecret = (const __m256i *) secret;
4398
1.32M
        const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
4399
4400
1.32M
        size_t i;
4401
3.97M
        for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4402
            /* xacc[i] ^= (xacc[i] >> 47) */
4403
2.65M
            __m256i const acc_vec     = xacc[i];
4404
2.65M
            __m256i const shifted     = _mm256_srli_epi64    (acc_vec, 47);
4405
2.65M
            __m256i const data_vec    = _mm256_xor_si256     (acc_vec, shifted);
4406
            /* xacc[i] ^= xsecret; */
4407
2.65M
            __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
4408
2.65M
            __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
4409
4410
            /* xacc[i] *= XXH_PRIME32_1; */
4411
2.65M
            __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
4412
2.65M
            __m256i const prod_lo     = _mm256_mul_epu32     (data_key, prime32);
4413
2.65M
            __m256i const prod_hi     = _mm256_mul_epu32     (data_key_hi, prime32);
4414
2.65M
            xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
4415
2.65M
        }
4416
1.32M
    }
4417
1.32M
}
4418
4419
XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4420
0
{
4421
0
    XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
4422
0
    XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
4423
0
    XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
4424
0
    (void)(&XXH_writeLE64);
4425
0
    XXH_PREFETCH(customSecret);
4426
0
    {   __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
4427
4428
0
        const __m256i* const src  = (const __m256i*) ((const void*) XXH3_kSecret);
4429
0
              __m256i*       dest = (      __m256i*) customSecret;
4430
4431
0
#       if defined(__GNUC__) || defined(__clang__)
4432
        /*
4433
         * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4434
         *   - do not extract the secret from sse registers in the internal loop
4435
         *   - use less common registers, and avoid pushing these reg into stack
4436
         */
4437
0
        XXH_COMPILER_GUARD(dest);
4438
0
#       endif
4439
        XXH_ASSERT(((size_t)src & 31) == 0) /* control alignment */
4440
0
        XXH_ASSERT(((size_t)dest & 31) == 0)
4441
4442
        /* GCC -O2 need unroll loop manually */
4443
0
        dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
4444
0
        dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
4445
0
        dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
4446
0
        dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
4447
0
        dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
4448
0
        dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
4449
0
    }
4450
0
}
4451
4452
#endif
4453
4454
/* x86dispatch always generates SSE2 */
4455
#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
4456
4457
#ifndef XXH_TARGET_SSE2
4458
# define XXH_TARGET_SSE2  /* disable attribute target */
4459
#endif
4460
4461
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
4462
XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
4463
                    const void* XXH_RESTRICT input,
4464
                    const void* XXH_RESTRICT secret)
4465
{
4466
    /* SSE2 is just a half-scale version of the AVX2 version. */
4467
    XXH_ASSERT((((size_t)acc) & 15) == 0)
4468
    {   __m128i* const xacc    =       (__m128i *) acc;
4469
        /* Unaligned. This is mainly for pointer arithmetic, and because
4470
         * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
4471
        const         __m128i* const xinput  = (const __m128i *) input;
4472
        /* Unaligned. This is mainly for pointer arithmetic, and because
4473
         * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
4474
        const         __m128i* const xsecret = (const __m128i *) secret;
4475
4476
        size_t i;
4477
        for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
4478
            /* data_vec    = xinput[i]; */
4479
            __m128i const data_vec    = _mm_loadu_si128   (xinput+i);
4480
            /* key_vec     = xsecret[i]; */
4481
            __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
4482
            /* data_key    = data_vec ^ key_vec; */
4483
            __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
4484
            /* data_key_lo = data_key >> 32; */
4485
            __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4486
            /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4487
            __m128i const product     = _mm_mul_epu32     (data_key, data_key_lo);
4488
            /* xacc[i] += swap(data_vec); */
4489
            __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
4490
            __m128i const sum       = _mm_add_epi64(xacc[i], data_swap);
4491
            /* xacc[i] += product; */
4492
            xacc[i] = _mm_add_epi64(product, sum);
4493
    }   }
4494
}
4495
XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
4496
4497
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
4498
XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4499
{
4500
    XXH_ASSERT((((size_t)acc) & 15) == 0)
4501
    {   __m128i* const xacc = (__m128i*) acc;
4502
        /* Unaligned. This is mainly for pointer arithmetic, and because
4503
         * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
4504
        const         __m128i* const xsecret = (const __m128i *) secret;
4505
        const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
4506
4507
        size_t i;
4508
        for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
4509
            /* xacc[i] ^= (xacc[i] >> 47) */
4510
            __m128i const acc_vec     = xacc[i];
4511
            __m128i const shifted     = _mm_srli_epi64    (acc_vec, 47);
4512
            __m128i const data_vec    = _mm_xor_si128     (acc_vec, shifted);
4513
            /* xacc[i] ^= xsecret[i]; */
4514
            __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
4515
            __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
4516
4517
            /* xacc[i] *= XXH_PRIME32_1; */
4518
            __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4519
            __m128i const prod_lo     = _mm_mul_epu32     (data_key, prime32);
4520
            __m128i const prod_hi     = _mm_mul_epu32     (data_key_hi, prime32);
4521
            xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4522
        }
4523
    }
4524
}
4525
4526
XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4527
{
4528
    XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4529
    (void)(&XXH_writeLE64);
4530
    {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4531
4532
#       if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4533
        /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4534
        XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4535
        __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4536
#       else
4537
        __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4538
#       endif
4539
        int i;
4540
4541
        const void* const src16 = XXH3_kSecret;
4542
        __m128i* dst16 = (__m128i*) customSecret;
4543
#       if defined(__GNUC__) || defined(__clang__)
4544
        /*
4545
         * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4546
         *   - do not extract the secret from sse registers in the internal loop
4547
         *   - use less common registers, and avoid pushing these reg into stack
4548
         */
4549
        XXH_COMPILER_GUARD(dst16);
4550
#       endif
4551
        XXH_ASSERT(((size_t)src16 & 15) == 0) /* control alignment */
4552
        XXH_ASSERT(((size_t)dst16 & 15) == 0)
4553
4554
        for (i=0; i < nbRounds; ++i) {
4555
            dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4556
    }   }
4557
}
4558
4559
#endif
4560
4561
#if (XXH_VECTOR == XXH_NEON)
4562
4563
/* forward declarations for the scalar routines */
4564
XXH_FORCE_INLINE void
4565
XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
4566
                 void const* XXH_RESTRICT secret, size_t lane);
4567
4568
XXH_FORCE_INLINE void
4569
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4570
                         void const* XXH_RESTRICT secret, size_t lane);
4571
4572
/*!
4573
 * @internal
4574
 * @brief The bulk processing loop for NEON.
4575
 *
4576
 * The NEON code path is actually partially scalar when running on AArch64. This
4577
 * is to optimize the pipelining and can have up to 15% speedup depending on the
4578
 * CPU, and it also mitigates some GCC codegen issues.
4579
 *
4580
 * @see XXH3_NEON_LANES for configuring this and details about this optimization.
4581
 */
4582
XXH_FORCE_INLINE void
4583
XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4584
                    const void* XXH_RESTRICT input,
4585
                    const void* XXH_RESTRICT secret)
4586
{
4587
    XXH_ASSERT((((size_t)acc) & 15) == 0)
4588
    XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
4589
    {
4590
        uint64x2_t* const xacc = (uint64x2_t *) acc;
4591
        /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4592
        uint8_t const* const xinput = (const uint8_t *) input;
4593
        uint8_t const* const xsecret  = (const uint8_t *) secret;
4594
4595
        size_t i;
4596
        /* AArch64 uses both scalar and neon at the same time */
4597
        for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4598
            XXH3_scalarRound(acc, input, secret, i);
4599
        }
4600
        i = 0;
4601
        for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
4602
            uint64x2_t acc_vec1 = xacc[i];
4603
            /* data_vec = xinput[i]; */
4604
            uint64x2_t data_vec1 = XXH_vld1q_u64(xinput  + (i * 16));
4605
            /* key_vec  = xsecret[i];  */
4606
            uint64x2_t key_vec1  = XXH_vld1q_u64(xsecret + (i * 16));
4607
            /* acc_vec_2 = swap(data_vec) */
4608
            uint64x2_t acc_vec_21 = vextq_u64(data_vec1, data_vec1, 1);
4609
            /* data_key = data_vec ^ key_vec; */
4610
            uint64x2_t data_key1 = veorq_u64(data_vec1, key_vec1);
4611
4612
            uint64x2_t acc_vec2 = xacc[i+1];
4613
            /* data_vec = xinput[i]; */
4614
            uint64x2_t data_vec2 = XXH_vld1q_u64(xinput  + ((i+1) * 16));
4615
            /* key_vec  = xsecret[i];  */
4616
            uint64x2_t key_vec2  = XXH_vld1q_u64(xsecret + ((i+1) * 16));
4617
            /* acc_vec_2 = swap(data_vec) */
4618
            uint64x2_t acc_vec_22 = vextq_u64(data_vec2, data_vec2, 1);
4619
            /* data_key = data_vec ^ key_vec; */
4620
            uint64x2_t data_key2 = veorq_u64(data_vec2, key_vec2);
4621
4622
            /* data_key_lo = {(data_key1 & 0xFFFFFFFF), (data_key2 & 0xFFFFFFFF)};
4623
             * data_key_hi = {(data_key1 >> 32), (data_key2 >> 32)};
4624
             */
4625
            uint32x4x2_t zipped = vuzpq_u32(vreinterpretq_u32_u64(data_key1), vreinterpretq_u32_u64(data_key2));
4626
            uint32x4_t data_key_lo = zipped.val[0];
4627
            uint32x4_t data_key_hi = zipped.val[1];
4628
4629
            /* acc_vec_2 += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4630
            acc_vec_21 = vmlal_u32 (acc_vec_21, vget_low_u32(data_key_lo), vget_low_u32(data_key_hi));
4631
            XXH_COMPILER_GUARD_W(acc_vec_21);
4632
            /* xacc[i] += acc_vec_2; */
4633
            acc_vec1 = vaddq_u64 (acc_vec1, acc_vec_21);
4634
            xacc[i] = acc_vec1;
4635
            /* acc_vec_2 += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4636
            acc_vec_22 = vmlal_u32 (acc_vec_22, vget_high_u32(data_key_lo), vget_high_u32(data_key_hi));
4637
            XXH_COMPILER_GUARD_W(acc_vec_22);
4638
            /* xacc[i] += acc_vec_2; */
4639
            acc_vec2 = vaddq_u64 (acc_vec2, acc_vec_22);
4640
            xacc[i+1] = acc_vec2;
4641
        }
4642
        for (; i < XXH3_NEON_LANES / 2; i++) {
4643
            uint64x2_t acc_vec = xacc[i];
4644
            /* data_vec = xinput[i]; */
4645
            uint64x2_t data_vec = XXH_vld1q_u64(xinput  + (i * 16));
4646
            /* key_vec  = xsecret[i];  */
4647
            uint64x2_t key_vec  = XXH_vld1q_u64(xsecret + (i * 16));
4648
            uint64x2_t data_key;
4649
            uint32x2_t data_key_lo, data_key_hi;
4650
            /* acc_vec_2 = swap(data_vec) */
4651
            uint64x2_t acc_vec_2 = vextq_u64(data_vec, data_vec, 1);
4652
            /* data_key = data_vec ^ key_vec; */
4653
            data_key = veorq_u64(data_vec, key_vec);
4654
            /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4655
             * data_key_hi = (uint32x2_t) (data_key >> 32);
4656
             * data_key = UNDEFINED; */
4657
            XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4658
            /* acc_vec_2 += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4659
            acc_vec_2 = vmlal_u32 (acc_vec_2, data_key_lo, data_key_hi);
4660
            XXH_COMPILER_GUARD_W(acc_vec_2);
4661
            /* xacc[i] += acc_vec_2; */
4662
            acc_vec = vaddq_u64 (acc_vec, acc_vec_2);
4663
            xacc[i] = acc_vec;
4664
        }
4665
4666
    }
4667
}
4668
XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
4669
4670
XXH_FORCE_INLINE void
4671
XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4672
{
4673
    XXH_ASSERT((((size_t)acc) & 15) == 0)
4674
4675
    {   uint64x2_t* xacc       = (uint64x2_t*) acc;
4676
        uint8_t const* xsecret = (uint8_t const*) secret;
4677
        uint32x2_t prime       = vdup_n_u32 (XXH_PRIME32_1);
4678
4679
        size_t i;
4680
        /* AArch64 uses both scalar and neon at the same time */
4681
        for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4682
            XXH3_scalarScrambleRound(acc, secret, i);
4683
        }
4684
        for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4685
            /* xacc[i] ^= (xacc[i] >> 47); */
4686
            uint64x2_t acc_vec  = xacc[i];
4687
            uint64x2_t shifted  = vshrq_n_u64   (acc_vec, 47);
4688
            uint64x2_t data_vec = veorq_u64     (acc_vec, shifted);
4689
4690
            /* xacc[i] ^= xsecret[i]; */
4691
            uint64x2_t key_vec  = XXH_vld1q_u64 (xsecret + (i * 16));
4692
            uint64x2_t data_key = veorq_u64     (data_vec, key_vec);
4693
4694
            /* xacc[i] *= XXH_PRIME32_1 */
4695
            uint32x2_t data_key_lo, data_key_hi;
4696
            /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4697
             * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4698
             * xacc[i] = UNDEFINED; */
4699
            XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4700
            {   /*
4701
                 * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4702
                 *
4703
                 * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4704
                 * incorrectly "optimize" this:
4705
                 *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4706
                 *   shifted = vshll_n_u32(tmp, 32);
4707
                 * to this:
4708
                 *   tmp     = "vmulq_u64"(a, b); // no such thing!
4709
                 *   shifted = vshlq_n_u64(tmp, 32);
4710
                 *
4711
                 * However, unlike SSE, Clang lacks a 64-bit multiply routine
4712
                 * for NEON, and it scalarizes two 64-bit multiplies instead.
4713
                 *
4714
                 * vmull_u32 has the same timing as vmul_u32, and it avoids
4715
                 * this bug completely.
4716
                 * See https://bugs.llvm.org/show_bug.cgi?id=39967
4717
                 */
4718
                uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4719
                /* xacc[i] = prod_hi << 32; */
4720
                prod_hi = vshlq_n_u64(prod_hi, 32);
4721
                /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4722
                xacc[i] = vmlal_u32(prod_hi, data_key_lo, prime);
4723
            }
4724
        }
4725
    }
4726
}
4727
4728
#endif
4729
4730
#if (XXH_VECTOR == XXH_VSX)
4731
4732
XXH_FORCE_INLINE void
4733
XXH3_accumulate_512_vsx(  void* XXH_RESTRICT acc,
4734
                    const void* XXH_RESTRICT input,
4735
                    const void* XXH_RESTRICT secret)
4736
{
4737
    /* presumed aligned */
4738
    unsigned int* const xacc = (unsigned int*) acc;
4739
    xxh_u64x2 const* const xinput   = (xxh_u64x2 const*) input;   /* no alignment restriction */
4740
    xxh_u64x2 const* const xsecret  = (xxh_u64x2 const*) secret;    /* no alignment restriction */
4741
    xxh_u64x2 const v32 = { 32, 32 };
4742
    size_t i;
4743
    for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4744
        /* data_vec = xinput[i]; */
4745
        xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4746
        /* key_vec = xsecret[i]; */
4747
        xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4748
        xxh_u64x2 const data_key = data_vec ^ key_vec;
4749
        /* shuffled = (data_key << 32) | (data_key >> 32); */
4750
        xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4751
        /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4752
        xxh_u64x2 const product  = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4753
        /* acc_vec = xacc[i]; */
4754
        xxh_u64x2 acc_vec        = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
4755
        acc_vec += product;
4756
4757
        /* swap high and low halves */
4758
#ifdef __s390x__
4759
        acc_vec += vec_permi(data_vec, data_vec, 2);
4760
#else
4761
        acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4762
#endif
4763
        /* xacc[i] = acc_vec; */
4764
        vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
4765
    }
4766
}
4767
XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
4768
4769
XXH_FORCE_INLINE void
4770
XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4771
{
4772
    XXH_ASSERT((((size_t)acc) & 15) == 0)
4773
4774
    {         xxh_u64x2* const xacc    =       (xxh_u64x2*) acc;
4775
        const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4776
        /* constants */
4777
        xxh_u64x2 const v32  = { 32, 32 };
4778
        xxh_u64x2 const v47 = { 47, 47 };
4779
        xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4780
        size_t i;
4781
        for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4782
            /* xacc[i] ^= (xacc[i] >> 47); */
4783
            xxh_u64x2 const acc_vec  = xacc[i];
4784
            xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4785
4786
            /* xacc[i] ^= xsecret[i]; */
4787
            xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4788
            xxh_u64x2 const data_key = data_vec ^ key_vec;
4789
4790
            /* xacc[i] *= XXH_PRIME32_1 */
4791
            /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF);  */
4792
            xxh_u64x2 const prod_even  = XXH_vec_mule((xxh_u32x4)data_key, prime);
4793
            /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
4794
            xxh_u64x2 const prod_odd  = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4795
            xacc[i] = prod_odd + (prod_even << v32);
4796
    }   }
4797
}
4798
4799
#endif
4800
4801
#if (XXH_VECTOR == XXH_SVE)
4802
4803
XXH_FORCE_INLINE void
4804
XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
4805
                   const void* XXH_RESTRICT input,
4806
                   const void* XXH_RESTRICT secret)
4807
{
4808
    uint64_t *xacc = (uint64_t *)acc;
4809
    const uint64_t *xinput = (const uint64_t *)(const void *)input;
4810
    const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
4811
    svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
4812
    uint64_t element_count = svcntd();
4813
    if (element_count >= 8) {
4814
        svbool_t mask = svptrue_pat_b64(SV_VL8);
4815
        svuint64_t vacc = svld1_u64(mask, xacc);
4816
        ACCRND(vacc, 0);
4817
        svst1_u64(mask, xacc, vacc);
4818
    } else if (element_count == 2) {   /* sve128 */
4819
        svbool_t mask = svptrue_pat_b64(SV_VL2);
4820
        svuint64_t acc0 = svld1_u64(mask, xacc + 0);
4821
        svuint64_t acc1 = svld1_u64(mask, xacc + 2);
4822
        svuint64_t acc2 = svld1_u64(mask, xacc + 4);
4823
        svuint64_t acc3 = svld1_u64(mask, xacc + 6);
4824
        ACCRND(acc0, 0);
4825
        ACCRND(acc1, 2);
4826
        ACCRND(acc2, 4);
4827
        ACCRND(acc3, 6);
4828
        svst1_u64(mask, xacc + 0, acc0);
4829
        svst1_u64(mask, xacc + 2, acc1);
4830
        svst1_u64(mask, xacc + 4, acc2);
4831
        svst1_u64(mask, xacc + 6, acc3);
4832
    } else {
4833
        svbool_t mask = svptrue_pat_b64(SV_VL4);
4834
        svuint64_t acc0 = svld1_u64(mask, xacc + 0);
4835
        svuint64_t acc1 = svld1_u64(mask, xacc + 4);
4836
        ACCRND(acc0, 0);
4837
        ACCRND(acc1, 4);
4838
        svst1_u64(mask, xacc + 0, acc0);
4839
        svst1_u64(mask, xacc + 4, acc1);
4840
    }
4841
}
4842
4843
XXH_FORCE_INLINE void
4844
XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
4845
               const xxh_u8* XXH_RESTRICT input,
4846
               const xxh_u8* XXH_RESTRICT secret,
4847
               size_t nbStripes)
4848
{
4849
    if (nbStripes != 0) {
4850
        uint64_t *xacc = (uint64_t *)acc;
4851
        const uint64_t *xinput = (const uint64_t *)(const void *)input;
4852
        const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
4853
        svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
4854
        uint64_t element_count = svcntd();
4855
        if (element_count >= 8) {
4856
            svbool_t mask = svptrue_pat_b64(SV_VL8);
4857
            svuint64_t vacc = svld1_u64(mask, xacc + 0);
4858
            do {
4859
                /* svprfd(svbool_t, void *, enum svfprop); */
4860
                svprfd(mask, xinput + 128, SV_PLDL1STRM);
4861
                ACCRND(vacc, 0);
4862
                xinput += 8;
4863
                xsecret += 1;
4864
                nbStripes--;
4865
           } while (nbStripes != 0);
4866
4867
           svst1_u64(mask, xacc + 0, vacc);
4868
        } else if (element_count == 2) { /* sve128 */
4869
            svbool_t mask = svptrue_pat_b64(SV_VL2);
4870
            svuint64_t acc0 = svld1_u64(mask, xacc + 0);
4871
            svuint64_t acc1 = svld1_u64(mask, xacc + 2);
4872
            svuint64_t acc2 = svld1_u64(mask, xacc + 4);
4873
            svuint64_t acc3 = svld1_u64(mask, xacc + 6);
4874
            do {
4875
                svprfd(mask, xinput + 128, SV_PLDL1STRM);
4876
                ACCRND(acc0, 0);
4877
                ACCRND(acc1, 2);
4878
                ACCRND(acc2, 4);
4879
                ACCRND(acc3, 6);
4880
                xinput += 8;
4881
                xsecret += 1;
4882
                nbStripes--;
4883
           } while (nbStripes != 0);
4884
4885
           svst1_u64(mask, xacc + 0, acc0);
4886
           svst1_u64(mask, xacc + 2, acc1);
4887
           svst1_u64(mask, xacc + 4, acc2);
4888
           svst1_u64(mask, xacc + 6, acc3);
4889
        } else {
4890
            svbool_t mask = svptrue_pat_b64(SV_VL4);
4891
            svuint64_t acc0 = svld1_u64(mask, xacc + 0);
4892
            svuint64_t acc1 = svld1_u64(mask, xacc + 4);
4893
            do {
4894
                svprfd(mask, xinput + 128, SV_PLDL1STRM);
4895
                ACCRND(acc0, 0);
4896
                ACCRND(acc1, 4);
4897
                xinput += 8;
4898
                xsecret += 1;
4899
                nbStripes--;
4900
           } while (nbStripes != 0);
4901
4902
           svst1_u64(mask, xacc + 0, acc0);
4903
           svst1_u64(mask, xacc + 4, acc1);
4904
       }
4905
    }
4906
}
4907
4908
#endif
4909
4910
/* scalar variants - universal */
4911
4912
/*!
4913
 * @internal
4914
 * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
4915
 *
4916
 * This is extracted to its own function because the NEON path uses a combination
4917
 * of NEON and scalar.
4918
 */
4919
XXH_FORCE_INLINE void
4920
XXH3_scalarRound(void* XXH_RESTRICT acc,
4921
                 void const* XXH_RESTRICT input,
4922
                 void const* XXH_RESTRICT secret,
4923
                 size_t lane)
4924
0
{
4925
0
    xxh_u64* xacc = (xxh_u64*) acc;
4926
0
    xxh_u8 const* xinput  = (xxh_u8 const*) input;
4927
0
    xxh_u8 const* xsecret = (xxh_u8 const*) secret;
4928
0
    XXH_ASSERT(lane < XXH_ACC_NB)
4929
0
    XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0)
4930
0
    {
4931
0
        xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
4932
0
        xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
4933
0
        xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
4934
0
        xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4935
0
    }
4936
0
}
4937
4938
/*!
4939
 * @internal
4940
 * @brief Processes a 64 byte block of data using the scalar path.
4941
 */
4942
XXH_FORCE_INLINE void
4943
XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4944
                     const void* XXH_RESTRICT input,
4945
                     const void* XXH_RESTRICT secret)
4946
0
{
4947
0
    size_t i;
4948
0
    /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
4949
0
#if defined(__GNUC__) && !defined(__clang__) \
4950
0
  && (defined(__arm__) || defined(__thumb2__)) \
4951
0
  && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
4952
0
  && XXH_SIZE_OPT <= 0
4953
0
#  pragma GCC unroll 8
4954
0
#endif
4955
0
    for (i=0; i < XXH_ACC_NB; i++) {
4956
0
        XXH3_scalarRound(acc, input, secret, i);
4957
0
    }
4958
0
}
4959
XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
4960
4961
/*!
4962
 * @internal
4963
 * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
4964
 *
4965
 * This is extracted to its own function because the NEON path uses a combination
4966
 * of NEON and scalar.
4967
 */
4968
XXH_FORCE_INLINE void
4969
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4970
                         void const* XXH_RESTRICT secret,
4971
                         size_t lane)
4972
0
{
4973
0
    xxh_u64* const xacc = (xxh_u64*) acc;   /* presumed aligned */
4974
0
    const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4975
0
    XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0)
4976
0
    XXH_ASSERT(lane < XXH_ACC_NB)
4977
0
    {
4978
0
        xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
4979
0
        xxh_u64 acc64 = xacc[lane];
4980
0
        acc64 = XXH_xorshift64(acc64, 47);
4981
0
        acc64 ^= key64;
4982
0
        acc64 *= XXH_PRIME32_1;
4983
0
        xacc[lane] = acc64;
4984
0
    }
4985
0
}
4986
4987
/*!
4988
 * @internal
4989
 * @brief Scrambles the accumulators after a large chunk has been read
4990
 */
4991
XXH_FORCE_INLINE void
4992
XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4993
0
{
4994
0
    size_t i;
4995
0
    for (i=0; i < XXH_ACC_NB; i++) {
4996
0
        XXH3_scalarScrambleRound(acc, secret, i);
4997
0
    }
4998
0
}
4999
5000
XXH_FORCE_INLINE void
5001
XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5002
0
{
5003
0
    /*
5004
0
     * We need a separate pointer for the hack below,
5005
0
     * which requires a non-const pointer.
5006
0
     * Any decent compiler will optimize this out otherwise.
5007
0
     */
5008
0
    const xxh_u8* kSecretPtr = XXH3_kSecret;
5009
0
    XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5010
0
5011
0
#if defined(__clang__) && defined(__aarch64__)
5012
0
    /*
5013
0
     * UGLY HACK:
5014
0
     * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
5015
0
     * placed sequentially, in order, at the top of the unrolled loop.
5016
0
     *
5017
0
     * While MOVK is great for generating constants (2 cycles for a 64-bit
5018
0
     * constant compared to 4 cycles for LDR), it fights for bandwidth with
5019
0
     * the arithmetic instructions.
5020
0
     *
5021
0
     *   I   L   S
5022
0
     * MOVK
5023
0
     * MOVK
5024
0
     * MOVK
5025
0
     * MOVK
5026
0
     * ADD
5027
0
     * SUB      STR
5028
0
     *          STR
5029
0
     * By forcing loads from memory (as the asm line causes Clang to assume
5030
0
     * that XXH3_kSecretPtr has been changed), the pipelines are used more
5031
0
     * efficiently:
5032
0
     *   I   L   S
5033
0
     *      LDR
5034
0
     *  ADD LDR
5035
0
     *  SUB     STR
5036
0
     *          STR
5037
0
     *
5038
0
     * See XXH3_NEON_LANES for details on the pipsline.
5039
0
     *
5040
0
     * XXH3_64bits_withSeed, len == 256, Snapdragon 835
5041
0
     *   without hack: 2654.4 MB/s
5042
0
     *   with hack:    3202.9 MB/s
5043
0
     */
5044
0
    XXH_COMPILER_GUARD(kSecretPtr);
5045
0
#endif
5046
0
    /*
5047
0
     * Note: in debug mode, this overrides the asm optimization
5048
0
     * and Clang will emit MOVK chains again.
5049
0
     */
5050
0
    XXH_ASSERT(kSecretPtr == XXH3_kSecret)
5051
0
5052
0
    {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
5053
0
        int i;
5054
0
        for (i=0; i < nbRounds; i++) {
5055
0
            /*
5056
0
             * The asm hack causes Clang to assume that kSecretPtr aliases with
5057
0
             * customSecret, and on aarch64, this prevented LDP from merging two
5058
0
             * loads together for free. Putting the loads together before the stores
5059
0
             * properly generates LDP.
5060
0
             */
5061
0
            xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i)     + seed64;
5062
0
            xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
5063
0
            XXH_writeLE64((xxh_u8*)customSecret + 16*i,     lo);
5064
0
            XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
5065
0
    }   }
5066
0
}
5067
5068
5069
typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
5070
typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
5071
typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
5072
5073
5074
#if (XXH_VECTOR == XXH_AVX512)
5075
5076
#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
5077
#define XXH3_accumulate     XXH3_accumulate_avx512
5078
#define XXH3_scrambleAcc    XXH3_scrambleAcc_avx512
5079
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
5080
5081
#elif (XXH_VECTOR == XXH_AVX2)
5082
5083
233k
#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
5084
247k
#define XXH3_accumulate     XXH3_accumulate_avx2
5085
247k
#define XXH3_scrambleAcc    XXH3_scrambleAcc_avx2
5086
0
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
5087
5088
#elif (XXH_VECTOR == XXH_SSE2)
5089
5090
#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
5091
#define XXH3_accumulate     XXH3_accumulate_sse2
5092
#define XXH3_scrambleAcc    XXH3_scrambleAcc_sse2
5093
#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
5094
5095
#elif (XXH_VECTOR == XXH_NEON)
5096
5097
#define XXH3_accumulate_512 XXH3_accumulate_512_neon
5098
#define XXH3_accumulate     XXH3_accumulate_neon
5099
#define XXH3_scrambleAcc    XXH3_scrambleAcc_neon
5100
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5101
5102
#elif (XXH_VECTOR == XXH_VSX)
5103
5104
#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
5105
#define XXH3_accumulate     XXH3_accumulate_vsx
5106
#define XXH3_scrambleAcc    XXH3_scrambleAcc_vsx
5107
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5108
5109
#elif (XXH_VECTOR == XXH_SVE)
5110
#define XXH3_accumulate_512 XXH3_accumulate_512_sve
5111
#define XXH3_accumulate     XXH3_accumulate_sve
5112
#define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
5113
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5114
5115
#else /* scalar */
5116
5117
#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
5118
#define XXH3_accumulate     XXH3_accumulate_scalar
5119
#define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
5120
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5121
5122
#endif
5123
5124
#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
5125
#  undef XXH3_initCustomSecret
5126
#  define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5127
#endif
5128
5129
XXH_FORCE_INLINE void
5130
XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
5131
                      const xxh_u8* XXH_RESTRICT input, size_t len,
5132
                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5133
                            XXH3_f_accumulate f_acc,
5134
                            XXH3_f_scrambleAcc f_scramble)
5135
226k
{
5136
226k
    size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
5137
226k
    size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
5138
226k
    size_t const nb_blocks = (len - 1) / block_len;
5139
5140
226k
    size_t n;
5141
5142
226k
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN)
5143
5144
1.32M
    for (n = 0; n < nb_blocks; n++) {
5145
1.10M
        f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
5146
1.10M
        f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
5147
1.10M
    }
5148
5149
    /* last partial block */
5150
226k
    XXH_ASSERT(len > XXH_STRIPE_LEN)
5151
226k
    {   size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
5152
226k
        XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE))
5153
226k
        f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
5154
5155
        /* last stripe */
5156
226k
        {   const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
5157
233k
#define XXH_SECRET_LASTACC_START 7  /* not aligned on 8, last secret is different from acc & scrambler */
5158
226k
            XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
5159
226k
    }   }
5160
226k
}
5161
5162
XXH_FORCE_INLINE xxh_u64
5163
XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
5164
932k
{
5165
932k
    return XXH3_mul128_fold64(
5166
932k
               acc[0] ^ XXH_readLE64(secret),
5167
932k
               acc[1] ^ XXH_readLE64(secret+8) );
5168
932k
}
5169
5170
static XXH64_hash_t
5171
XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
5172
233k
{
5173
233k
    xxh_u64 result64 = start;
5174
233k
    size_t i = 0;
5175
5176
1.16M
    for (i = 0; i < 4; i++) {
5177
932k
        result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
5178
#if defined(__clang__)                                /* Clang */ \
5179
    && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
5180
    && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
5181
    && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
5182
        /*
5183
         * UGLY HACK:
5184
         * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
5185
         * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
5186
         * XXH3_64bits, len == 256, Snapdragon 835:
5187
         *   without hack: 2063.7 MB/s
5188
         *   with hack:    2560.7 MB/s
5189
         */
5190
        XXH_COMPILER_GUARD(result64);
5191
#endif
5192
932k
    }
5193
5194
233k
    return XXH3_avalanche(result64);
5195
233k
}
5196
5197
226k
#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
5198
226k
                        XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
5199
5200
XXH_FORCE_INLINE XXH64_hash_t
5201
XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
5202
                           const void* XXH_RESTRICT secret, size_t secretSize,
5203
                           XXH3_f_accumulate f_acc,
5204
                           XXH3_f_scrambleAcc f_scramble)
5205
226k
{
5206
226k
    XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5207
5208
226k
    XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
5209
5210
    /* converge into final hash */
5211
226k
    XXH_STATIC_ASSERT(sizeof(acc) == 64);
5212
    /* do not align on 8, so that the secret is different from the accumulator */
5213
233k
#define XXH_SECRET_MERGEACCS_START 11
5214
226k
    XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START)
5215
226k
    return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
5216
226k
}
5217
5218
/*
5219
 * It's important for performance to transmit secret's size (when it's static)
5220
 * so that the compiler can properly optimize the vectorized loop.
5221
 * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
5222
 */
5223
XXH_FORCE_INLINE XXH64_hash_t
5224
XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
5225
                             XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5226
0
{
5227
0
    (void)seed64;
5228
0
    return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
5229
0
}
5230
5231
/*
5232
 * It's preferable for performance that XXH3_hashLong is not inlined,
5233
 * as it results in a smaller function for small data, easier to the instruction cache.
5234
 * Note that inside this no_inline function, we do inline the internal loop,
5235
 * and provide a statically defined secret size to allow optimization of vector loop.
5236
 */
5237
XXH_NO_INLINE XXH_PUREF XXH64_hash_t
5238
XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
5239
                          XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5240
226k
{
5241
226k
    (void)seed64; (void)secret; (void)secretLen;
5242
226k
    return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
5243
226k
}
5244
5245
/*
5246
 * XXH3_hashLong_64b_withSeed():
5247
 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
5248
 * and then use this key for long mode hashing.
5249
 *
5250
 * This operation is decently fast but nonetheless costs a little bit of time.
5251
 * Try to avoid it whenever possible (typically when seed==0).
5252
 *
5253
 * It's important for performance that XXH3_hashLong is not inlined. Not sure
5254
 * why (uop cache maybe?), but the difference is large and easily measurable.
5255
 */
5256
XXH_FORCE_INLINE XXH64_hash_t
5257
XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
5258
                                    XXH64_hash_t seed,
5259
                                    XXH3_f_accumulate f_acc,
5260
                                    XXH3_f_scrambleAcc f_scramble,
5261
                                    XXH3_f_initCustomSecret f_initSec)
5262
0
{
5263
0
#if XXH_SIZE_OPT <= 0
5264
0
    if (seed == 0)
5265
0
        return XXH3_hashLong_64b_internal(input, len,
5266
0
                                          XXH3_kSecret, sizeof(XXH3_kSecret),
5267
0
                                          f_acc, f_scramble);
5268
0
#endif
5269
0
    {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5270
0
        f_initSec(secret, seed);
5271
0
        return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
5272
0
                                          f_acc, f_scramble);
5273
0
    }
5274
0
}
5275
5276
/*
5277
 * It's important for performance that XXH3_hashLong is not inlined.
5278
 */
5279
XXH_NO_INLINE XXH64_hash_t
5280
XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
5281
                           XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5282
0
{
5283
0
    (void)secret; (void)secretLen;
5284
0
    return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
5285
0
                XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
5286
0
}
5287
5288
5289
typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
5290
                                          XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
5291
5292
XXH_FORCE_INLINE XXH64_hash_t
5293
XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
5294
                     XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5295
                     XXH3_hashLong64_f f_hashLong)
5296
2.38M
{
5297
2.38M
    XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN)
5298
    /*
5299
     * If an action is to be taken if `secretLen` condition is not respected,
5300
     * it should be done here.
5301
     * For now, it's a contract pre-condition.
5302
     * Adding a check and a branch here would cost performance at every hash.
5303
     * Also, note that function signature doesn't offer room to return an error.
5304
     */
5305
2.38M
    if (len <= 16)
5306
1.03M
        return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5307
1.35M
    if (len <= 128)
5308
925k
        return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5309
425k
    if (len <= XXH3_MIDSIZE_MAX)
5310
199k
        return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5311
226k
    return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
5312
425k
}
5313
5314
5315
/* ===   Public entry point   === */
5316
5317
/*! @ingroup XXH3_family */
5318
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
5319
2.38M
{
5320
2.38M
    return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
5321
2.38M
}
5322
5323
/*! @ingroup XXH3_family */
5324
XXH_PUBLIC_API XXH64_hash_t
5325
XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
5326
1.47k
{
5327
1.47k
    return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
5328
1.47k
}
5329
5330
/*! @ingroup XXH3_family */
5331
XXH_PUBLIC_API XXH64_hash_t
5332
XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
5333
0
{
5334
0
    return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
5335
0
}
5336
5337
XXH_PUBLIC_API XXH64_hash_t
5338
XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
5339
0
{
5340
0
    if (length <= XXH3_MIDSIZE_MAX)
5341
0
        return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), nullptr);
5342
0
    return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
5343
0
}
5344
5345
5346
/* ===   XXH3 streaming   === */
5347
#ifndef XXH_NO_STREAM
5348
/*
5349
 * Malloc's a pointer that is always aligned to align.
5350
 *
5351
 * This must be freed with `XXH_alignedFree()`.
5352
 *
5353
 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
5354
 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
5355
 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
5356
 *
5357
 * This underalignment previously caused a rather obvious crash which went
5358
 * completely unnoticed due to XXH3_createState() not actually being tested.
5359
 * Credit to RedSpah for noticing this bug.
5360
 *
5361
 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
5362
 * are avoided: To maintain portability, we would have to write a fallback
5363
 * like this anyways, and besides, testing for the existence of library
5364
 * functions without relying on external build tools is impossible.
5365
 *
5366
 * The method is simple: Overallocate, manually align, and store the offset
5367
 * to the original behind the returned pointer.
5368
 *
5369
 * Align must be a power of 2 and 8 <= align <= 128.
5370
 */
5371
static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
5372
5.86k
{
5373
    XXH_ASSERT(align <= 128 && align >= 8) /* range check */
5374
    XXH_ASSERT((align & (align-1)) == 0)   /* power of 2 */
5375
    XXH_ASSERT(s != 0 && s < (s + align))  /* empty/overflow */
5376
5.86k
    {   /* Overallocate to make room for manual realignment and an offset byte */
5377
5.86k
        xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
5378
5.86k
        if (base != nullptr) {
5379
            /*
5380
             * Get the offset needed to align this pointer.
5381
             *
5382
             * Even if the returned pointer is aligned, there will always be
5383
             * at least one byte to store the offset to the original pointer.
5384
             */
5385
5.86k
            size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
5386
            /* Add the offset for the now-aligned pointer */
5387
5.86k
            xxh_u8* ptr = base + offset;
5388
5389
5.86k
            XXH_ASSERT((size_t)ptr % align == 0)
5390
5391
            /* Store the offset immediately before the returned pointer. */
5392
5.86k
            ptr[-1] = (xxh_u8)offset;
5393
5.86k
            return ptr;
5394
5.86k
        }
5395
0
        return nullptr;
5396
5.86k
    }
5397
5.86k
}
5398
/*
5399
 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
5400
 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
5401
 */
5402
static void XXH_alignedFree(void* p)
5403
5.86k
{
5404
5.86k
    if (p != nullptr) {
5405
5.86k
        xxh_u8* ptr = (xxh_u8*)p;
5406
        /* Get the offset byte we added in XXH_malloc. */
5407
5.86k
        xxh_u8 offset = ptr[-1];
5408
        /* Free the original malloc'd pointer */
5409
5.86k
        xxh_u8* base = ptr - offset;
5410
5.86k
        XXH_free(base);
5411
5.86k
    }
5412
5.86k
}
5413
/*! @ingroup XXH3_family */
5414
XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
5415
5.86k
{
5416
5.86k
    XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
5417
5.86k
    if (state==nullptr) return nullptr;
5418
5.86k
    XXH3_INITSTATE(state);
5419
5.86k
    return state;
5420
5.86k
}
5421
5422
/*! @ingroup XXH3_family */
5423
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
5424
5.86k
{
5425
5.86k
    XXH_alignedFree(statePtr);
5426
5.86k
    return XXH_OK;
5427
5.86k
}
5428
5429
/*! @ingroup XXH3_family */
5430
XXH_PUBLIC_API void
5431
XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
5432
0
{
5433
0
    XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
5434
0
}
5435
5436
static void
5437
XXH3_reset_internal(XXH3_state_t* statePtr,
5438
                    XXH64_hash_t seed,
5439
                    const void* secret, size_t secretSize)
5440
1.16M
{
5441
1.16M
    size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
5442
1.16M
    size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
5443
1.16M
    XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart)
5444
1.16M
    XXH_ASSERT(statePtr != NULL)
5445
    /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
5446
1.16M
    memset((char*)statePtr + initStart, 0, initLength);
5447
1.16M
    statePtr->acc[0] = XXH_PRIME32_3;
5448
1.16M
    statePtr->acc[1] = XXH_PRIME64_1;
5449
1.16M
    statePtr->acc[2] = XXH_PRIME64_2;
5450
1.16M
    statePtr->acc[3] = XXH_PRIME64_3;
5451
1.16M
    statePtr->acc[4] = XXH_PRIME64_4;
5452
1.16M
    statePtr->acc[5] = XXH_PRIME32_2;
5453
1.16M
    statePtr->acc[6] = XXH_PRIME64_5;
5454
1.16M
    statePtr->acc[7] = XXH_PRIME32_1;
5455
1.16M
    statePtr->seed = seed;
5456
1.16M
    statePtr->useSeed = (seed != 0);
5457
1.16M
    statePtr->extSecret = (const unsigned char*)secret;
5458
1.16M
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN)
5459
1.16M
    statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
5460
1.16M
    statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
5461
1.16M
}
5462
5463
/*! @ingroup XXH3_family */
5464
XXH_PUBLIC_API XXH_errorcode
5465
XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
5466
1.16M
{
5467
1.16M
    if (statePtr == nullptr) return XXH_ERROR;
5468
1.16M
    XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
5469
1.16M
    return XXH_OK;
5470
1.16M
}
5471
5472
/*! @ingroup XXH3_family */
5473
XXH_PUBLIC_API XXH_errorcode
5474
XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
5475
0
{
5476
0
    if (statePtr == nullptr) return XXH_ERROR;
5477
0
    XXH3_reset_internal(statePtr, 0, secret, secretSize);
5478
0
    if (secret == nullptr) return XXH_ERROR;
5479
0
    if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5480
0
    return XXH_OK;
5481
0
}
5482
5483
/*! @ingroup XXH3_family */
5484
XXH_PUBLIC_API XXH_errorcode
5485
XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
5486
0
{
5487
0
    if (statePtr == nullptr) return XXH_ERROR;
5488
0
    if (seed==0) return XXH3_64bits_reset(statePtr);
5489
0
    if ((seed != statePtr->seed) || (statePtr->extSecret != nullptr))
5490
0
        XXH3_initCustomSecret(statePtr->customSecret, seed);
5491
0
    XXH3_reset_internal(statePtr, seed, nullptr, XXH_SECRET_DEFAULT_SIZE);
5492
0
    return XXH_OK;
5493
0
}
5494
5495
/*! @ingroup XXH3_family */
5496
XXH_PUBLIC_API XXH_errorcode
5497
XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
5498
0
{
5499
0
    if (statePtr == nullptr) return XXH_ERROR;
5500
0
    if (secret == nullptr) return XXH_ERROR;
5501
0
    if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5502
0
    XXH3_reset_internal(statePtr, seed64, secret, secretSize);
5503
0
    statePtr->useSeed = 1; /* always, even if seed64==0 */
5504
0
    return XXH_OK;
5505
0
}
5506
5507
/* Note : when XXH3_consumeStripes() is invoked,
5508
 * there must be a guarantee that at least one more byte must be consumed from input
5509
 * so that the function can blindly consume all stripes using the "normal" secret segment */
5510
XXH_FORCE_INLINE void
5511
XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
5512
                    size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
5513
                    const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
5514
                    const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
5515
                    XXH3_f_accumulate f_acc,
5516
                    XXH3_f_scrambleAcc f_scramble)
5517
15.0k
{
5518
    XXH_ASSERT(nbStripes <= nbStripesPerBlock)  /* can handle max 1 scramble per invocation */
5519
15.0k
    XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock)
5520
15.0k
    if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
5521
        /* need a scrambling operation */
5522
3.91k
        size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
5523
3.91k
        size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
5524
3.91k
        f_acc(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock);
5525
3.91k
        f_scramble(acc, secret + secretLimit);
5526
3.91k
        f_acc(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock);
5527
3.91k
        *nbStripesSoFarPtr = nbStripesAfterBlock;
5528
11.1k
    } else {
5529
11.1k
        f_acc(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes);
5530
11.1k
        *nbStripesSoFarPtr += nbStripes;
5531
11.1k
    }
5532
15.0k
}
5533
5534
#ifndef XXH3_STREAM_USE_STACK
5535
# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
5536
#   define XXH3_STREAM_USE_STACK 1
5537
# endif
5538
#endif
5539
/*
5540
 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
5541
 */
5542
XXH_FORCE_INLINE XXH_errorcode
5543
XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
5544
            const xxh_u8* XXH_RESTRICT input, size_t len,
5545
            XXH3_f_accumulate f_acc,
5546
            XXH3_f_scrambleAcc f_scramble)
5547
19.8k
{
5548
19.8k
    if (input==nullptr) {
5549
0
        XXH_ASSERT(len == 0)
5550
0
        return XXH_OK;
5551
0
    }
5552
5553
19.8k
    XXH_ASSERT(state != NULL)
5554
19.8k
    {   const xxh_u8* const bEnd = input + len;
5555
19.8k
        const unsigned char* const secret = (state->extSecret == nullptr) ? state->customSecret : state->extSecret;
5556
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
5557
        /* For some reason, gcc and MSVC seem to suffer greatly
5558
         * when operating accumulators directly into state.
5559
         * Operating into stack space seems to enable proper optimization.
5560
         * clang, on the other hand, doesn't seem to need this trick */
5561
        XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
5562
#else
5563
19.8k
        xxh_u64* XXH_RESTRICT const acc = state->acc;
5564
19.8k
#endif
5565
19.8k
        state->totalLen += len;
5566
19.8k
        XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE)
5567
5568
        /* small input : just fill in tmp buffer */
5569
19.8k
        if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
5570
4.14k
            XXH_memcpy(state->buffer + state->bufferedSize, input, len);
5571
4.14k
            state->bufferedSize += (XXH32_hash_t)len;
5572
4.14k
            return XXH_OK;
5573
4.14k
        }
5574
5575
        /* total input is now > XXH3_INTERNALBUFFER_SIZE */
5576
15.6k
        #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
5577
15.6k
        XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);   /* clean multiple */
5578
5579
        /*
5580
         * Internal buffer is partially filled (always, except at beginning)
5581
         * Complete it, then consume it.
5582
         */
5583
15.6k
        if (state->bufferedSize) {
5584
9.61k
            size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
5585
9.61k
            XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
5586
9.61k
            input += loadSize;
5587
9.61k
            XXH3_consumeStripes(acc,
5588
9.61k
                               &state->nbStripesSoFar, state->nbStripesPerBlock,
5589
9.61k
                                state->buffer, XXH3_INTERNALBUFFER_STRIPES,
5590
9.61k
                                secret, state->secretLimit,
5591
9.61k
                                f_acc, f_scramble);
5592
9.61k
            state->bufferedSize = 0;
5593
9.61k
        }
5594
15.6k
        XXH_ASSERT(input < bEnd)
5595
5596
        /* large input to consume : ingest per full block */
5597
15.6k
        if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
5598
13.0k
            size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
5599
13.0k
            XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar)
5600
            /* join to current block's end */
5601
13.0k
            {   size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
5602
13.0k
                XXH_ASSERT(nbStripesToEnd <= nbStripes)
5603
13.0k
                f_acc(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd);
5604
13.0k
                f_scramble(acc, secret + state->secretLimit);
5605
13.0k
                state->nbStripesSoFar = 0;
5606
13.0k
                input += nbStripesToEnd * XXH_STRIPE_LEN;
5607
13.0k
                nbStripes -= nbStripesToEnd;
5608
13.0k
            }
5609
            /* consume per entire blocks */
5610
220k
            while(nbStripes >= state->nbStripesPerBlock) {
5611
207k
                f_acc(acc, input, secret, state->nbStripesPerBlock);
5612
207k
                f_scramble(acc, secret + state->secretLimit);
5613
207k
                input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
5614
207k
                nbStripes -= state->nbStripesPerBlock;
5615
207k
            }
5616
            /* consume last partial block */
5617
13.0k
            f_acc(acc, input, secret, nbStripes);
5618
13.0k
            input += nbStripes * XXH_STRIPE_LEN;
5619
            XXH_ASSERT(input < bEnd)  /* at least some bytes left */
5620
13.0k
            state->nbStripesSoFar = nbStripes;
5621
            /* buffer predecessor of last partial stripe */
5622
13.0k
            XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
5623
13.0k
            XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN)
5624
13.0k
        } else {
5625
            /* content to consume <= block size */
5626
            /* Consume input by a multiple of internal buffer size */
5627
2.65k
            if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
5628
2.00k
                const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
5629
3.79k
                do {
5630
3.79k
                    XXH3_consumeStripes(acc,
5631
3.79k
                                       &state->nbStripesSoFar, state->nbStripesPerBlock,
5632
3.79k
                                        input, XXH3_INTERNALBUFFER_STRIPES,
5633
3.79k
                                        secret, state->secretLimit,
5634
3.79k
                                        f_acc, f_scramble);
5635
3.79k
                    input += XXH3_INTERNALBUFFER_SIZE;
5636
3.79k
                } while (input<limit);
5637
                /* buffer predecessor of last partial stripe */
5638
2.00k
                XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
5639
2.00k
            }
5640
2.65k
        }
5641
5642
        /* Some remaining input (always) : buffer it */
5643
15.6k
        XXH_ASSERT(input < bEnd)
5644
15.6k
        XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE)
5645
15.6k
        XXH_ASSERT(state->bufferedSize == 0)
5646
15.6k
        XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
5647
15.6k
        state->bufferedSize = (XXH32_hash_t)(bEnd-input);
5648
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
5649
        /* save stack accumulators into state */
5650
        memcpy(state->acc, acc, sizeof(acc));
5651
#endif
5652
15.6k
    }
5653
5654
0
    return XXH_OK;
5655
15.6k
}
5656
5657
/*! @ingroup XXH3_family */
5658
XXH_PUBLIC_API XXH_errorcode
5659
XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
5660
19.8k
{
5661
19.8k
    return XXH3_update(state, (const xxh_u8*)input, len,
5662
19.8k
                       XXH3_accumulate, XXH3_scrambleAcc);
5663
19.8k
}
5664
5665
5666
XXH_FORCE_INLINE void
5667
XXH3_digest_long (XXH64_hash_t* acc,
5668
                  const XXH3_state_t* state,
5669
                  const unsigned char* secret)
5670
6.73k
{
5671
    /*
5672
     * Digest on a local copy. This way, the state remains unaltered, and it can
5673
     * continue ingesting more input afterwards.
5674
     */
5675
6.73k
    XXH_memcpy(acc, state->acc, sizeof(state->acc));
5676
6.73k
    if (state->bufferedSize >= XXH_STRIPE_LEN) {
5677
1.66k
        size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
5678
1.66k
        size_t nbStripesSoFar = state->nbStripesSoFar;
5679
1.66k
        XXH3_consumeStripes(acc,
5680
1.66k
                           &nbStripesSoFar, state->nbStripesPerBlock,
5681
1.66k
                            state->buffer, nbStripes,
5682
1.66k
                            secret, state->secretLimit,
5683
1.66k
                            XXH3_accumulate, XXH3_scrambleAcc);
5684
        /* last stripe */
5685
1.66k
        XXH3_accumulate_512(acc,
5686
1.66k
                            state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
5687
1.66k
                            secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5688
5.06k
    } else {  /* bufferedSize < XXH_STRIPE_LEN */
5689
5.06k
        xxh_u8 lastStripe[XXH_STRIPE_LEN];
5690
5.06k
        size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
5691
        XXH_ASSERT(state->bufferedSize > 0)  /* there is always some input buffered */
5692
5.06k
        XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
5693
5.06k
        XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
5694
5.06k
        XXH3_accumulate_512(acc,
5695
5.06k
                            lastStripe,
5696
5.06k
                            secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5697
5.06k
    }
5698
6.73k
}
5699
5700
/*! @ingroup XXH3_family */
5701
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
5702
8.20k
{
5703
8.20k
    const unsigned char* const secret = (state->extSecret == nullptr) ? state->customSecret : state->extSecret;
5704
8.20k
    if (state->totalLen > XXH3_MIDSIZE_MAX) {
5705
6.73k
        XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5706
6.73k
        XXH3_digest_long(acc, state, secret);
5707
6.73k
        return XXH3_mergeAccs(acc,
5708
6.73k
                              secret + XXH_SECRET_MERGEACCS_START,
5709
6.73k
                              (xxh_u64)state->totalLen * XXH_PRIME64_1);
5710
6.73k
    }
5711
    /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
5712
1.47k
    if (state->useSeed)
5713
0
        return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5714
1.47k
    return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
5715
1.47k
                                  secret, state->secretLimit + XXH_STRIPE_LEN);
5716
1.47k
}
5717
#endif /* !XXH_NO_STREAM */
5718
5719
5720
/* ==========================================
5721
 * XXH3 128 bits (a.k.a XXH128)
5722
 * ==========================================
5723
 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
5724
 * even without counting the significantly larger output size.
5725
 *
5726
 * For example, extra steps are taken to avoid the seed-dependent collisions
5727
 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
5728
 *
5729
 * This strength naturally comes at the cost of some speed, especially on short
5730
 * lengths. Note that longer hashes are about as fast as the 64-bit version
5731
 * due to it using only a slight modification of the 64-bit loop.
5732
 *
5733
 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
5734
 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
5735
 */
5736
5737
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
5738
XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5739
0
{
5740
    /* A doubled version of 1to3_64b with different constants. */
5741
0
    XXH_ASSERT(input != NULL)
5742
0
    XXH_ASSERT(1 <= len && len <= 3)
5743
0
    XXH_ASSERT(secret != NULL)
5744
    /*
5745
     * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
5746
     * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
5747
     * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
5748
     */
5749
0
    {   xxh_u8 const c1 = input[0];
5750
0
        xxh_u8 const c2 = input[len >> 1];
5751
0
        xxh_u8 const c3 = input[len - 1];
5752
0
        xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5753
0
                                | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
5754
0
        xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
5755
0
        xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5756
0
        xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5757
0
        xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5758
0
        xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5759
0
        XXH128_hash_t h128;
5760
0
        h128.low64  = XXH64_avalanche(keyed_lo);
5761
0
        h128.high64 = XXH64_avalanche(keyed_hi);
5762
0
        return h128;
5763
0
    }
5764
0
}
5765
5766
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
5767
XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5768
0
{
5769
0
    XXH_ASSERT(input != NULL)
5770
0
    XXH_ASSERT(secret != NULL)
5771
0
    XXH_ASSERT(4 <= len && len <= 8)
5772
0
    seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5773
0
    {   xxh_u32 const input_lo = XXH_readLE32(input);
5774
0
        xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5775
0
        xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5776
0
        xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5777
0
        xxh_u64 const keyed = input_64 ^ bitflip;
5778
5779
        /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5780
0
        XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5781
5782
0
        m128.high64 += (m128.low64 << 1);
5783
0
        m128.low64  ^= (m128.high64 >> 3);
5784
5785
0
        m128.low64   = XXH_xorshift64(m128.low64, 35);
5786
0
        m128.low64  *= 0x9FB21C651E98DF25ULL;
5787
0
        m128.low64   = XXH_xorshift64(m128.low64, 28);
5788
0
        m128.high64  = XXH3_avalanche(m128.high64);
5789
0
        return m128;
5790
0
    }
5791
0
}
5792
5793
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
5794
XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5795
0
{
5796
0
    XXH_ASSERT(input != NULL)
5797
0
    XXH_ASSERT(secret != NULL)
5798
0
    XXH_ASSERT(9 <= len && len <= 16)
5799
0
    {   xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5800
0
        xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5801
0
        xxh_u64 const input_lo = XXH_readLE64(input);
5802
0
        xxh_u64       input_hi = XXH_readLE64(input + len - 8);
5803
0
        XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5804
        /*
5805
         * Put len in the middle of m128 to ensure that the length gets mixed to
5806
         * both the low and high bits in the 128x64 multiply below.
5807
         */
5808
0
        m128.low64 += (xxh_u64)(len - 1) << 54;
5809
0
        input_hi   ^= bitfliph;
5810
        /*
5811
         * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5812
         * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5813
         * the high 64 bits of m128.
5814
         *
5815
         * The best approach to this operation is different on 32-bit and 64-bit.
5816
         */
5817
0
        if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5818
            /*
5819
             * 32-bit optimized version, which is more readable.
5820
             *
5821
             * On 32-bit, it removes an ADC and delays a dependency between the two
5822
             * halves of m128.high64, but it generates an extra mask on 64-bit.
5823
             */
5824
0
            m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5825
0
        } else {
5826
            /*
5827
             * 64-bit optimized (albeit more confusing) version.
5828
             *
5829
             * Uses some properties of addition and multiplication to remove the mask:
5830
             *
5831
             * Let:
5832
             *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5833
             *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5834
             *    c = XXH_PRIME32_2
5835
             *
5836
             *    a + (b * c)
5837
             * Inverse Property: x + y - x == y
5838
             *    a + (b * (1 + c - 1))
5839
             * Distributive Property: x * (y + z) == (x * y) + (x * z)
5840
             *    a + (b * 1) + (b * (c - 1))
5841
             * Identity Property: x * 1 == x
5842
             *    a + b + (b * (c - 1))
5843
             *
5844
             * Substitute a, b, and c:
5845
             *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5846
             *
5847
             * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5848
             *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5849
             */
5850
0
            m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5851
0
        }
5852
        /* m128 ^= XXH_swap64(m128 >> 64); */
5853
0
        m128.low64  ^= XXH_swap64(m128.high64);
5854
5855
0
        {   /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5856
0
            XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5857
0
            h128.high64 += m128.high64 * XXH_PRIME64_2;
5858
5859
0
            h128.low64   = XXH3_avalanche(h128.low64);
5860
0
            h128.high64  = XXH3_avalanche(h128.high64);
5861
0
            return h128;
5862
0
    }   }
5863
0
}
5864
5865
/*
5866
 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5867
 */
5868
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
5869
XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5870
0
{
5871
0
    XXH_ASSERT(len <= 16)
5872
0
    {   if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5873
0
        if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5874
0
        if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5875
0
        {   XXH128_hash_t h128;
5876
0
            xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5877
0
            xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5878
0
            h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5879
0
            h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5880
0
            return h128;
5881
0
    }   }
5882
0
}
5883
5884
/*
5885
 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5886
 */
5887
XXH_FORCE_INLINE XXH128_hash_t
5888
XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5889
              const xxh_u8* secret, XXH64_hash_t seed)
5890
51.6k
{
5891
51.6k
    acc.low64  += XXH3_mix16B (input_1, secret+0, seed);
5892
51.6k
    acc.low64  ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5893
51.6k
    acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5894
51.6k
    acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5895
51.6k
    return acc;
5896
51.6k
}
5897
5898
5899
XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
5900
XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5901
                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5902
                      XXH64_hash_t seed)
5903
25.8k
{
5904
25.8k
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN) (void)secretSize;
5905
25.8k
    XXH_ASSERT(16 < len && len <= 128)
5906
5907
25.8k
    {   XXH128_hash_t acc;
5908
25.8k
        acc.low64 = len * XXH_PRIME64_1;
5909
25.8k
        acc.high64 = 0;
5910
5911
#if XXH_SIZE_OPT >= 1
5912
        {
5913
            /* Smaller, but slightly slower. */
5914
            unsigned int i = (unsigned int)(len - 1) / 32;
5915
            do {
5916
                acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
5917
            } while (i-- != 0);
5918
        }
5919
#else
5920
25.8k
        if (len > 32) {
5921
25.8k
            if (len > 64) {
5922
0
                if (len > 96) {
5923
0
                    acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5924
0
                }
5925
0
                acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5926
0
            }
5927
25.8k
            acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5928
25.8k
        }
5929
25.8k
        acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5930
25.8k
#endif
5931
25.8k
        {   XXH128_hash_t h128;
5932
25.8k
            h128.low64  = acc.low64 + acc.high64;
5933
25.8k
            h128.high64 = (acc.low64    * XXH_PRIME64_1)
5934
25.8k
                        + (acc.high64   * XXH_PRIME64_4)
5935
25.8k
                        + ((len - seed) * XXH_PRIME64_2);
5936
25.8k
            h128.low64  = XXH3_avalanche(h128.low64);
5937
25.8k
            h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5938
25.8k
            return h128;
5939
25.8k
        }
5940
25.8k
    }
5941
25.8k
}
5942
5943
XXH_NO_INLINE XXH_PUREF XXH128_hash_t
5944
XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5945
                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5946
                       XXH64_hash_t seed)
5947
4
{
5948
4
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN) (void)secretSize;
5949
4
    XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX)
5950
5951
4
    {   XXH128_hash_t acc;
5952
4
        unsigned i;
5953
4
        acc.low64 = len * XXH_PRIME64_1;
5954
4
        acc.high64 = 0;
5955
        /*
5956
         *  We set as `i` as offset + 32. We do this so that unchanged
5957
         * `len` can be used as upper bound. This reaches a sweet spot
5958
         * where both x86 and aarch64 get simple agen and good codegen
5959
         * for the loop.
5960
         */
5961
20
        for (i = 32; i < 160; i += 32) {
5962
16
            acc = XXH128_mix32B(acc,
5963
16
                                input  + i - 32,
5964
16
                                input  + i - 16,
5965
16
                                secret + i - 32,
5966
16
                                seed);
5967
16
        }
5968
4
        acc.low64 = XXH3_avalanche(acc.low64);
5969
4
        acc.high64 = XXH3_avalanche(acc.high64);
5970
        /*
5971
         * NB: `i <= len` will duplicate the last 32-bytes if
5972
         * len % 32 was zero. This is an unfortunate necessity to keep
5973
         * the hash result stable.
5974
         */
5975
8
        for (i=160; i <= len; i += 32) {
5976
4
            acc = XXH128_mix32B(acc,
5977
4
                                input + i - 32,
5978
4
                                input + i - 16,
5979
4
                                secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
5980
4
                                seed);
5981
4
        }
5982
        /* last bytes */
5983
4
        acc = XXH128_mix32B(acc,
5984
4
                            input + len - 16,
5985
4
                            input + len - 32,
5986
4
                            secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5987
4
                            (XXH64_hash_t)0 - seed);
5988
5989
4
        {   XXH128_hash_t h128;
5990
4
            h128.low64  = acc.low64 + acc.high64;
5991
4
            h128.high64 = (acc.low64    * XXH_PRIME64_1)
5992
4
                        + (acc.high64   * XXH_PRIME64_4)
5993
4
                        + ((len - seed) * XXH_PRIME64_2);
5994
4
            h128.low64  = XXH3_avalanche(h128.low64);
5995
4
            h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5996
4
            return h128;
5997
4
        }
5998
4
    }
5999
4
}
6000
6001
XXH_FORCE_INLINE XXH128_hash_t
6002
XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
6003
                            const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6004
                            XXH3_f_accumulate f_acc,
6005
                            XXH3_f_scrambleAcc f_scramble)
6006
0
{
6007
0
    XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
6008
6009
0
    XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
6010
6011
    /* converge into final hash */
6012
0
    XXH_STATIC_ASSERT(sizeof(acc) == 64);
6013
0
    XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START)
6014
0
    {   XXH128_hash_t h128;
6015
0
        h128.low64  = XXH3_mergeAccs(acc,
6016
0
                                     secret + XXH_SECRET_MERGEACCS_START,
6017
0
                                     (xxh_u64)len * XXH_PRIME64_1);
6018
0
        h128.high64 = XXH3_mergeAccs(acc,
6019
0
                                     secret + secretSize
6020
0
                                            - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6021
0
                                     ~((xxh_u64)len * XXH_PRIME64_2));
6022
0
        return h128;
6023
0
    }
6024
0
}
6025
6026
/*
6027
 * It's important for performance that XXH3_hashLong() is not inlined.
6028
 */
6029
XXH_NO_INLINE XXH_PUREF XXH128_hash_t
6030
XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
6031
                           XXH64_hash_t seed64,
6032
                           const void* XXH_RESTRICT secret, size_t secretLen)
6033
0
{
6034
0
    (void)seed64; (void)secret; (void)secretLen;
6035
0
    return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
6036
0
                                       XXH3_accumulate, XXH3_scrambleAcc);
6037
0
}
6038
6039
/*
6040
 * It's important for performance to pass @p secretLen (when it's static)
6041
 * to the compiler, so that it can properly optimize the vectorized loop.
6042
 */
6043
XXH_FORCE_INLINE XXH128_hash_t
6044
XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
6045
                              XXH64_hash_t seed64,
6046
                              const void* XXH_RESTRICT secret, size_t secretLen)
6047
0
{
6048
0
    (void)seed64;
6049
0
    return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
6050
0
                                       XXH3_accumulate, XXH3_scrambleAcc);
6051
0
}
6052
6053
XXH_FORCE_INLINE XXH128_hash_t
6054
XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
6055
                                XXH64_hash_t seed64,
6056
                                XXH3_f_accumulate f_acc,
6057
                                XXH3_f_scrambleAcc f_scramble,
6058
                                XXH3_f_initCustomSecret f_initSec)
6059
0
{
6060
0
    if (seed64 == 0)
6061
0
        return XXH3_hashLong_128b_internal(input, len,
6062
0
                                           XXH3_kSecret, sizeof(XXH3_kSecret),
6063
0
                                           f_acc, f_scramble);
6064
0
    {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6065
0
        f_initSec(secret, seed64);
6066
0
        return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
6067
0
                                           f_acc, f_scramble);
6068
0
    }
6069
0
}
6070
6071
/*
6072
 * It's important for performance that XXH3_hashLong is not inlined.
6073
 */
6074
XXH_NO_INLINE XXH128_hash_t
6075
XXH3_hashLong_128b_withSeed(const void* input, size_t len,
6076
                            XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
6077
0
{
6078
0
    (void)secret; (void)secretLen;
6079
0
    return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
6080
0
                XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
6081
0
}
6082
6083
typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
6084
                                            XXH64_hash_t, const void* XXH_RESTRICT, size_t);
6085
6086
XXH_FORCE_INLINE XXH128_hash_t
6087
XXH3_128bits_internal(const void* input, size_t len,
6088
                      XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
6089
                      XXH3_hashLong128_f f_hl128)
6090
25.8k
{
6091
25.8k
    XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN)
6092
    /*
6093
     * If an action is to be taken if `secret` conditions are not respected,
6094
     * it should be done here.
6095
     * For now, it's a contract pre-condition.
6096
     * Adding a check and a branch here would cost performance at every hash.
6097
     */
6098
25.8k
    if (len <= 16)
6099
0
        return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
6100
25.8k
    if (len <= 128)
6101
25.8k
        return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6102
4
    if (len <= XXH3_MIDSIZE_MAX)
6103
4
        return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6104
0
    return f_hl128(input, len, seed64, secret, secretLen);
6105
4
}
6106
6107
6108
/* ===   Public XXH128 API   === */
6109
6110
/*! @ingroup XXH3_family */
6111
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
6112
4
{
6113
4
    return XXH3_128bits_internal(input, len, 0,
6114
4
                                 XXH3_kSecret, sizeof(XXH3_kSecret),
6115
4
                                 XXH3_hashLong_128b_default);
6116
4
}
6117
6118
/*! @ingroup XXH3_family */
6119
XXH_PUBLIC_API XXH128_hash_t
6120
XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
6121
0
{
6122
0
    return XXH3_128bits_internal(input, len, 0,
6123
0
                                 (const xxh_u8*)secret, secretSize,
6124
0
                                 XXH3_hashLong_128b_withSecret);
6125
0
}
6126
6127
/*! @ingroup XXH3_family */
6128
XXH_PUBLIC_API XXH128_hash_t
6129
XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6130
25.8k
{
6131
25.8k
    return XXH3_128bits_internal(input, len, seed,
6132
25.8k
                                 XXH3_kSecret, sizeof(XXH3_kSecret),
6133
25.8k
                                 XXH3_hashLong_128b_withSeed);
6134
25.8k
}
6135
6136
/*! @ingroup XXH3_family */
6137
XXH_PUBLIC_API XXH128_hash_t
6138
XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6139
0
{
6140
0
    if (len <= XXH3_MIDSIZE_MAX)
6141
0
        return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), nullptr);
6142
0
    return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
6143
0
}
6144
6145
/*! @ingroup XXH3_family */
6146
XXH_PUBLIC_API XXH128_hash_t
6147
XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6148
0
{
6149
0
    return XXH3_128bits_withSeed(input, len, seed);
6150
0
}
6151
6152
6153
/* ===   XXH3 128-bit streaming   === */
6154
#ifndef XXH_NO_STREAM
6155
/*
6156
 * All initialization and update functions are identical to 64-bit streaming variant.
6157
 * The only difference is the finalization routine.
6158
 */
6159
6160
/*! @ingroup XXH3_family */
6161
XXH_PUBLIC_API XXH_errorcode
6162
XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
6163
0
{
6164
0
    return XXH3_64bits_reset(statePtr);
6165
0
}
6166
6167
/*! @ingroup XXH3_family */
6168
XXH_PUBLIC_API XXH_errorcode
6169
XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
6170
0
{
6171
0
    return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
6172
0
}
6173
6174
/*! @ingroup XXH3_family */
6175
XXH_PUBLIC_API XXH_errorcode
6176
XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
6177
0
{
6178
0
    return XXH3_64bits_reset_withSeed(statePtr, seed);
6179
0
}
6180
6181
/*! @ingroup XXH3_family */
6182
XXH_PUBLIC_API XXH_errorcode
6183
XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6184
0
{
6185
0
    return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
6186
0
}
6187
6188
/*! @ingroup XXH3_family */
6189
XXH_PUBLIC_API XXH_errorcode
6190
XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
6191
0
{
6192
0
    return XXH3_update(state, (const xxh_u8*)input, len,
6193
0
                       XXH3_accumulate, XXH3_scrambleAcc);
6194
0
}
6195
6196
/*! @ingroup XXH3_family */
6197
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
6198
0
{
6199
0
    const unsigned char* const secret = (state->extSecret == nullptr) ? state->customSecret : state->extSecret;
6200
0
    if (state->totalLen > XXH3_MIDSIZE_MAX) {
6201
0
        XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6202
0
        XXH3_digest_long(acc, state, secret);
6203
0
        XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START)
6204
0
        {   XXH128_hash_t h128;
6205
0
            h128.low64  = XXH3_mergeAccs(acc,
6206
0
                                         secret + XXH_SECRET_MERGEACCS_START,
6207
0
                                         (xxh_u64)state->totalLen * XXH_PRIME64_1);
6208
0
            h128.high64 = XXH3_mergeAccs(acc,
6209
0
                                         secret + state->secretLimit + XXH_STRIPE_LEN
6210
0
                                                - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6211
0
                                         ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
6212
0
            return h128;
6213
0
        }
6214
0
    }
6215
    /* len <= XXH3_MIDSIZE_MAX : short code */
6216
0
    if (state->seed)
6217
0
        return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6218
0
    return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
6219
0
                                   secret, state->secretLimit + XXH_STRIPE_LEN);
6220
0
}
6221
#endif /* !XXH_NO_STREAM */
6222
/* 128-bit utility functions */
6223
6224
#include <string.h>   /* memcmp, memcpy */
6225
6226
/* return : 1 is equal, 0 if different */
6227
/*! @ingroup XXH3_family */
6228
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
6229
0
{
6230
    /* note : XXH128_hash_t is compact, it has no padding byte */
6231
0
    return !(memcmp(&h1, &h2, sizeof(h1)));
6232
0
}
6233
6234
/* This prototype is compatible with stdlib's qsort().
6235
 * @return : >0 if *h128_1  > *h128_2
6236
 *           <0 if *h128_1  < *h128_2
6237
 *           =0 if *h128_1 == *h128_2  */
6238
/*! @ingroup XXH3_family */
6239
XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
6240
0
{
6241
0
    XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
6242
0
    XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
6243
0
    int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
6244
    /* note : bets that, in most cases, hash values are different */
6245
0
    if (hcmp) return hcmp;
6246
0
    return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
6247
0
}
6248
6249
6250
/*======   Canonical representation   ======*/
6251
/*! @ingroup XXH3_family */
6252
XXH_PUBLIC_API void
6253
XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
6254
0
{
6255
0
    XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
6256
0
    if (XXH_CPU_LITTLE_ENDIAN) {
6257
0
        hash.high64 = XXH_swap64(hash.high64);
6258
0
        hash.low64  = XXH_swap64(hash.low64);
6259
0
    }
6260
0
    XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
6261
0
    XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
6262
0
}
6263
6264
/*! @ingroup XXH3_family */
6265
XXH_PUBLIC_API XXH128_hash_t
6266
XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
6267
0
{
6268
0
    XXH128_hash_t h;
6269
0
    h.high64 = XXH_readBE64(src);
6270
0
    h.low64  = XXH_readBE64(src->digest + 8);
6271
0
    return h;
6272
0
}
6273
6274
6275
6276
/* ==========================================
6277
 * Secret generators
6278
 * ==========================================
6279
 */
6280
0
#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
6281
6282
XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
6283
0
{
6284
0
    XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
6285
0
    XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
6286
0
}
6287
6288
/*! @ingroup XXH3_family */
6289
XXH_PUBLIC_API XXH_errorcode
6290
XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
6291
0
{
6292
#if (XXH_DEBUGLEVEL >= 1)
6293
    XXH_ASSERT(secretBuffer != NULL)
6294
    XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN)
6295
#else
6296
    /* production mode, assert() are disabled */
6297
0
    if (secretBuffer == nullptr) return XXH_ERROR;
6298
0
    if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6299
0
#endif
6300
6301
0
    if (customSeedSize == 0) {
6302
0
        customSeed = XXH3_kSecret;
6303
0
        customSeedSize = XXH_SECRET_DEFAULT_SIZE;
6304
0
    }
6305
#if (XXH_DEBUGLEVEL >= 1)
6306
    XXH_ASSERT(customSeed != NULL)
6307
#else
6308
0
    if (customSeed == nullptr) return XXH_ERROR;
6309
0
#endif
6310
6311
    /* Fill secretBuffer with a copy of customSeed - repeat as needed */
6312
0
    {   size_t pos = 0;
6313
0
        while (pos < secretSize) {
6314
0
            size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
6315
0
            memcpy((char*)secretBuffer + pos, customSeed, toCopy);
6316
0
            pos += toCopy;
6317
0
    }   }
6318
6319
0
    {   size_t const nbSeg16 = secretSize / 16;
6320
0
        size_t n;
6321
0
        XXH128_canonical_t scrambler;
6322
0
        XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
6323
0
        for (n=0; n<nbSeg16; n++) {
6324
0
            XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
6325
0
            XXH3_combine16((char*)secretBuffer + n*16, h128);
6326
0
        }
6327
        /* last segment */
6328
0
        XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
6329
0
    }
6330
0
    return XXH_OK;
6331
0
}
6332
6333
/*! @ingroup XXH3_family */
6334
XXH_PUBLIC_API void
6335
XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
6336
0
{
6337
0
    XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6338
0
    XXH3_initCustomSecret(secret, seed);
6339
0
    XXH_ASSERT(secretBuffer != NULL)
6340
0
    memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
6341
0
}
6342
6343
6344
6345
/* Pop our optimization override from above */
6346
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
6347
  && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
6348
  && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
6349
#  pragma GCC pop_options
6350
#endif
6351
6352
#endif  /* XXH_NO_LONG_LONG */
6353
6354
#endif  /* XXH_NO_XXH3 */
6355
6356
/*!
6357
 * @}
6358
 */
6359
#endif  /* XXH_IMPLEMENTATION */
6360
6361
6362
#if defined (__cplusplus)
6363
}
6364
#endif