/src/alembic/lib/Alembic/Util/SpookyV2.h
Line | Count | Source |
1 | | //-***************************************************************************** |
2 | | // |
3 | | // Copyright (c) 2013-2015, |
4 | | // Sony Pictures Imageworks Inc. and |
5 | | // Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd. |
6 | | // |
7 | | // All rights reserved. |
8 | | // |
9 | | // Redistribution and use in source and binary forms, with or without |
10 | | // modification, are permitted provided that the following conditions are |
11 | | // met: |
12 | | // * Redistributions of source code must retain the above copyright |
13 | | // notice, this list of conditions and the following disclaimer. |
14 | | // * Redistributions in binary form must reproduce the above |
15 | | // copyright notice, this list of conditions and the following disclaimer |
16 | | // in the documentation and/or other materials provided with the |
17 | | // distribution. |
18 | | // * Neither the name of Industrial Light & Magic nor the names of |
19 | | // its contributors may be used to endorse or promote products derived |
20 | | // from this software without specific prior written permission. |
21 | | // |
22 | | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
23 | | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
24 | | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
25 | | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
26 | | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
27 | | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
28 | | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
29 | | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
30 | | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
31 | | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
32 | | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | | // |
34 | | //-***************************************************************************** |
35 | | |
36 | | // |
37 | | // SpookyHash: a 128-bit noncryptographic hash function |
38 | | // By Bob Jenkins, public domain |
39 | | // Oct 31 2010: alpha, framework + SpookyHash::Mix appears right |
40 | | // Oct 31 2011: alpha again, Mix only good to 2^^69 but rest appears right |
41 | | // Dec 31 2011: beta, improved Mix, tested it for 2-bit deltas |
42 | | // Feb 2 2012: production, same bits as beta |
43 | | // Feb 5 2012: adjusted definitions of uint* to be more portable |
44 | | // Mar 30 2012: 3 bytes/cycle, not 4. Alpha was 4 but wasn't thorough enough. |
45 | | // August 5 2012: SpookyV2 (different results) |
46 | | // |
47 | | // Up to 3 bytes/cycle for long messages. Reasonably fast for short messages. |
48 | | // All 1 or 2 bit deltas achieve avalanche within 1% bias per output bit. |
49 | | // |
50 | | // This was developed for and tested on 64-bit x86-compatible processors. |
51 | | // It assumes the processor is little-endian. There is a macro |
52 | | // controlling whether unaligned reads are allowed (by default they are). |
53 | | // This should be an equally good hash on big-endian machines, but it will |
54 | | // compute different results on them than on little-endian machines. |
55 | | // |
56 | | // Google's CityHash has similar specs to SpookyHash, and CityHash is faster |
57 | | // on new Intel boxes. MD4 and MD5 also have similar specs, but they are orders |
58 | | // of magnitude slower. CRCs are two or more times slower, but unlike |
59 | | // SpookyHash, they have nice math for combining the CRCs of pieces to form |
60 | | // the CRCs of wholes. There are also cryptographic hashes, but those are even |
61 | | // slower than MD5. |
62 | | // |
63 | | |
64 | | #ifndef Alembic_Util_SpookyV2_h |
65 | | #define Alembic_Util_SpookyV2_h |
66 | | |
67 | | #include <Alembic/Util/Export.h> |
68 | | #include <Alembic/Util/PlainOldDataType.h> |
69 | | |
70 | | namespace Alembic { |
71 | | namespace Util { |
72 | | namespace ALEMBIC_VERSION_NS { |
73 | | |
74 | | class ALEMBIC_EXPORT SpookyHash |
75 | | { |
76 | | public: |
77 | | // |
78 | | // SpookyHash: hash a single message in one call, produce 128-bit output |
79 | | // |
80 | | static void Hash128( |
81 | | const void *message, // message to hash |
82 | | size_t length, // length of message in bytes |
83 | | uint64_t *hash1, // in/out: in seed 1, out hash value 1 |
84 | | uint64_t *hash2); // in/out: in seed 2, out hash value 2 |
85 | | |
86 | | // |
87 | | // Hash64: hash a single message in one call, return 64-bit output |
88 | | // |
89 | | static uint64_t Hash64( |
90 | | const void *message, // message to hash |
91 | | size_t length, // length of message in bytes |
92 | | uint64_t seed) // seed |
93 | 0 | { |
94 | 0 | uint64_t hash1 = seed; |
95 | 0 | Hash128(message, length, &hash1, &seed); |
96 | 0 | return hash1; |
97 | 0 | } |
98 | | |
99 | | // |
100 | | // Hash32: hash a single message in one call, produce 32-bit output |
101 | | // |
102 | | static uint32_t Hash32( |
103 | | const void *message, // message to hash |
104 | | size_t length, // length of message in bytes |
105 | | uint32_t seed) // seed |
106 | 0 | { |
107 | 0 | uint64_t hash1 = seed, hash2 = seed; |
108 | 0 | Hash128(message, length, &hash1, &hash2); |
109 | 0 | return (uint32_t)hash1; |
110 | 0 | } |
111 | | |
112 | | // |
113 | | // Init: initialize the context of a SpookyHash |
114 | | // |
115 | | void Init( |
116 | | uint64_t seed1, // any 64-bit value will do, including 0 |
117 | | uint64_t seed2); // different seeds produce independent hashes |
118 | | |
119 | | // |
120 | | // Update: add a piece of a message to a SpookyHash state |
121 | | // |
122 | | void Update( |
123 | | const void *message, // message fragment |
124 | | size_t length); // length of message fragment in bytes |
125 | | |
126 | | |
127 | | // |
128 | | // Final: compute the hash for the current SpookyHash state |
129 | | // |
130 | | // This does not modify the state; you can keep updating it afterward |
131 | | // |
132 | | // The result is the same as if SpookyHash() had been called with |
133 | | // all the pieces concatenated into one message. |
134 | | // |
135 | | void Final( |
136 | | uint64_t *hash1, // out only: first 64 bits of hash value. |
137 | | uint64_t *hash2); // out only: second 64 bits of hash value. |
138 | | |
139 | | // |
140 | | // left rotate a 64-bit value by k bytes |
141 | | // |
142 | | static inline uint64_t Rot64(uint64_t x, int k) |
143 | 0 | { |
144 | 0 | return (x << k) | (x >> (64 - k)); |
145 | 0 | } |
146 | | |
147 | | // |
148 | | // This is used if the input is 96 bytes long or longer. |
149 | | // |
150 | | // The internal state is fully overwritten every 96 bytes. |
151 | | // Every input bit appears to cause at least 128 bits of entropy |
152 | | // before 96 other bytes are combined, when run forward or backward |
153 | | // For every input bit, |
154 | | // Two inputs differing in just that input bit |
155 | | // Where "differ" means xor or subtraction |
156 | | // And the base value is random |
157 | | // When run forward or backwards one Mix |
158 | | // I tried 3 pairs of each; they all differed by at least 212 bits. |
159 | | // |
160 | | static inline void Mix( |
161 | | const uint64_t *data, |
162 | | uint64_t &s0, uint64_t &s1, uint64_t &s2, uint64_t &s3, |
163 | | uint64_t &s4, uint64_t &s5, uint64_t &s6, uint64_t &s7, |
164 | | uint64_t &s8, uint64_t &s9, uint64_t &s10,uint64_t &s11) |
165 | 0 | { |
166 | 0 | s0 += data[0]; s2 ^= s10; s11 ^= s0; s0 = Rot64(s0,11); s11 += s1; |
167 | 0 | s1 += data[1]; s3 ^= s11; s0 ^= s1; s1 = Rot64(s1,32); s0 += s2; |
168 | 0 | s2 += data[2]; s4 ^= s0; s1 ^= s2; s2 = Rot64(s2,43); s1 += s3; |
169 | 0 | s3 += data[3]; s5 ^= s1; s2 ^= s3; s3 = Rot64(s3,31); s2 += s4; |
170 | 0 | s4 += data[4]; s6 ^= s2; s3 ^= s4; s4 = Rot64(s4,17); s3 += s5; |
171 | 0 | s5 += data[5]; s7 ^= s3; s4 ^= s5; s5 = Rot64(s5,28); s4 += s6; |
172 | 0 | s6 += data[6]; s8 ^= s4; s5 ^= s6; s6 = Rot64(s6,39); s5 += s7; |
173 | 0 | s7 += data[7]; s9 ^= s5; s6 ^= s7; s7 = Rot64(s7,57); s6 += s8; |
174 | 0 | s8 += data[8]; s10 ^= s6; s7 ^= s8; s8 = Rot64(s8,55); s7 += s9; |
175 | 0 | s9 += data[9]; s11 ^= s7; s8 ^= s9; s9 = Rot64(s9,54); s8 += s10; |
176 | 0 | s10 += data[10]; s0 ^= s8; s9 ^= s10; s10 = Rot64(s10,22); s9 += s11; |
177 | 0 | s11 += data[11]; s1 ^= s9; s10 ^= s11; s11 = Rot64(s11,46); s10 += s0; |
178 | 0 | } |
179 | | |
180 | | // |
181 | | // Mix all 12 inputs together so that h0, h1 are a hash of them all. |
182 | | // |
183 | | // For two inputs differing in just the input bits |
184 | | // Where "differ" means xor or subtraction |
185 | | // And the base value is random, or a counting value starting at that bit |
186 | | // The final result will have each bit of h0, h1 flip |
187 | | // For every input bit, |
188 | | // with probability 50 +- .3% |
189 | | // For every pair of input bits, |
190 | | // with probability 50 +- 3% |
191 | | // |
192 | | // This does not rely on the last Mix() call having already mixed some. |
193 | | // Two iterations was almost good enough for a 64-bit result, but a |
194 | | // 128-bit result is reported, so End() does three iterations. |
195 | | // |
196 | | static inline void EndPartial( |
197 | | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, |
198 | | uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, |
199 | | uint64_t &h8, uint64_t &h9, uint64_t &h10,uint64_t &h11) |
200 | 0 | { |
201 | 0 | h11+= h1; h2 ^= h11; h1 = Rot64(h1,44); |
202 | 0 | h0 += h2; h3 ^= h0; h2 = Rot64(h2,15); |
203 | 0 | h1 += h3; h4 ^= h1; h3 = Rot64(h3,34); |
204 | 0 | h2 += h4; h5 ^= h2; h4 = Rot64(h4,21); |
205 | 0 | h3 += h5; h6 ^= h3; h5 = Rot64(h5,38); |
206 | 0 | h4 += h6; h7 ^= h4; h6 = Rot64(h6,33); |
207 | 0 | h5 += h7; h8 ^= h5; h7 = Rot64(h7,10); |
208 | 0 | h6 += h8; h9 ^= h6; h8 = Rot64(h8,13); |
209 | 0 | h7 += h9; h10^= h7; h9 = Rot64(h9,38); |
210 | 0 | h8 += h10; h11^= h8; h10= Rot64(h10,53); |
211 | 0 | h9 += h11; h0 ^= h9; h11= Rot64(h11,42); |
212 | 0 | h10+= h0; h1 ^= h10; h0 = Rot64(h0,54); |
213 | 0 | } |
214 | | |
215 | | static inline void End( |
216 | | const uint64_t *data, |
217 | | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, |
218 | | uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, |
219 | | uint64_t &h8, uint64_t &h9, uint64_t &h10,uint64_t &h11) |
220 | 0 | { |
221 | 0 | h0 += data[0]; h1 += data[1]; h2 += data[2]; h3 += data[3]; |
222 | 0 | h4 += data[4]; h5 += data[5]; h6 += data[6]; h7 += data[7]; |
223 | 0 | h8 += data[8]; h9 += data[9]; h10 += data[10]; h11 += data[11]; |
224 | 0 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
225 | 0 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
226 | 0 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
227 | 0 | } |
228 | | |
229 | | // |
230 | | // The goal is for each bit of the input to expand into 128 bits of |
231 | | // apparent entropy before it is fully overwritten. |
232 | | // n trials both set and cleared at least m bits of h0 h1 h2 h3 |
233 | | // n: 2 m: 29 |
234 | | // n: 3 m: 46 |
235 | | // n: 4 m: 57 |
236 | | // n: 5 m: 107 |
237 | | // n: 6 m: 146 |
238 | | // n: 7 m: 152 |
239 | | // when run forwards or backwards |
240 | | // for all 1-bit and 2-bit diffs |
241 | | // with diffs defined by either xor or subtraction |
242 | | // with a base of all zeros plus a counter, or plus another bit, or random |
243 | | // |
244 | | static inline void ShortMix( |
245 | | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3) |
246 | 0 | { |
247 | 0 | h2 = Rot64(h2,50); h2 += h3; h0 ^= h2; |
248 | 0 | h3 = Rot64(h3,52); h3 += h0; h1 ^= h3; |
249 | 0 | h0 = Rot64(h0,30); h0 += h1; h2 ^= h0; |
250 | 0 | h1 = Rot64(h1,41); h1 += h2; h3 ^= h1; |
251 | 0 | h2 = Rot64(h2,54); h2 += h3; h0 ^= h2; |
252 | 0 | h3 = Rot64(h3,48); h3 += h0; h1 ^= h3; |
253 | 0 | h0 = Rot64(h0,38); h0 += h1; h2 ^= h0; |
254 | 0 | h1 = Rot64(h1,37); h1 += h2; h3 ^= h1; |
255 | 0 | h2 = Rot64(h2,62); h2 += h3; h0 ^= h2; |
256 | 0 | h3 = Rot64(h3,34); h3 += h0; h1 ^= h3; |
257 | 0 | h0 = Rot64(h0,5); h0 += h1; h2 ^= h0; |
258 | 0 | h1 = Rot64(h1,36); h1 += h2; h3 ^= h1; |
259 | 0 | } |
260 | | |
261 | | // |
262 | | // Mix all 4 inputs together so that h0, h1 are a hash of them all. |
263 | | // |
264 | | // For two inputs differing in just the input bits |
265 | | // Where "differ" means xor or subtraction |
266 | | // And the base value is random, or a counting value starting at that bit |
267 | | // The final result will have each bit of h0, h1 flip |
268 | | // For every input bit, |
269 | | // with probability 50 +- .3% (it is probably better than that) |
270 | | // For every pair of input bits, |
271 | | // with probability 50 +- .75% (the worst case is approximately that) |
272 | | // |
273 | | static inline void ShortEnd( |
274 | | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3) |
275 | 0 | { |
276 | 0 | h3 ^= h2; h2 = Rot64(h2,15); h3 += h2; |
277 | 0 | h0 ^= h3; h3 = Rot64(h3,52); h0 += h3; |
278 | 0 | h1 ^= h0; h0 = Rot64(h0,26); h1 += h0; |
279 | 0 | h2 ^= h1; h1 = Rot64(h1,51); h2 += h1; |
280 | 0 | h3 ^= h2; h2 = Rot64(h2,28); h3 += h2; |
281 | 0 | h0 ^= h3; h3 = Rot64(h3,9); h0 += h3; |
282 | 0 | h1 ^= h0; h0 = Rot64(h0,47); h1 += h0; |
283 | 0 | h2 ^= h1; h1 = Rot64(h1,54); h2 += h1; |
284 | 0 | h3 ^= h2; h2 = Rot64(h2,32); h3 += h2; |
285 | 0 | h0 ^= h3; h3 = Rot64(h3,25); h0 += h3; |
286 | 0 | h1 ^= h0; h0 = Rot64(h0,63); h1 += h0; |
287 | 0 | } |
288 | | |
289 | | private: |
290 | | |
291 | | // |
292 | | // Short is used for messages under 192 bytes in length |
293 | | // Short has a low startup cost, the normal mode is good for long |
294 | | // keys, the cost crossover is at about 192 bytes. The two modes were |
295 | | // held to the same quality bar. |
296 | | // |
297 | | static void Short( |
298 | | const void *message, // message (array of bytes, not necessarily aligned) |
299 | | size_t length, // length of message (in bytes) |
300 | | uint64_t *hash1, // in/out: in the seed, out the hash value |
301 | | uint64_t *hash2); // in/out: in the seed, out the hash value |
302 | | |
303 | | // number of uint64's in internal state |
304 | | static const size_t sc_numVars = 12; |
305 | | |
306 | | // size of the internal state |
307 | | static const size_t sc_blockSize = sc_numVars*8; |
308 | | |
309 | | // size of buffer of unhashed data, in bytes |
310 | | static const size_t sc_bufSize = 2*sc_blockSize; |
311 | | |
312 | | // |
313 | | // sc_const: a constant which: |
314 | | // * is not zero |
315 | | // * is odd |
316 | | // * is a not-very-regular mix of 1's and 0's |
317 | | // * does not need any other special mathematical properties |
318 | | // |
319 | | static const uint64_t sc_const = 0xdeadbeefdeadbeefLL; |
320 | | |
321 | | uint64_t m_data[2*sc_numVars]; // unhashed data, for partial messages |
322 | | uint64_t m_state[sc_numVars]; // internal state of the hash |
323 | | size_t m_length; // total length of the input so far |
324 | | uint8_t m_remainder; // length of unhashed data stashed in m_data |
325 | | }; |
326 | | |
327 | | |
328 | | } // End namespace ALEMBIC_VERSION_NS |
329 | | |
330 | | using namespace ALEMBIC_VERSION_NS; |
331 | | |
332 | | } // End namespace Util |
333 | | } // End namespace Alembic |
334 | | |
335 | | #endif |
336 | | |