/src/gdal/ogr/ogrsf_frmts/geojson/libjson/linkhash.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * $Id: linkhash.c,v 1.4 2006/01/26 02:16:28 mclark Exp $  | 
3  |  |  *  | 
4  |  |  * Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.  | 
5  |  |  * Michael Clark <michael@metaparadigm.com>  | 
6  |  |  * Copyright (c) 2009 Hewlett-Packard Development Company, L.P.  | 
7  |  |  *  | 
8  |  |  * This library is free software; you can redistribute it and/or modify  | 
9  |  |  * it under the terms of the MIT license. See COPYING for details.  | 
10  |  |  *  | 
11  |  |  */  | 
12  |  |  | 
13  |  | #include "config.h"  | 
14  |  |  | 
15  |  | #include "cpl_port.h"  | 
16  |  |  | 
17  |  | #include <assert.h>  | 
18  |  | #include <limits.h>  | 
19  |  | #include <stdarg.h>  | 
20  |  | #include <stddef.h>  | 
21  |  | #include <stdio.h>  | 
22  |  | #include <stdlib.h>  | 
23  |  | #include <string.h>  | 
24  |  |  | 
25  |  | #ifdef HAVE_ENDIAN_H  | 
26  |  | #include <endian.h> /* attempt to define endianness */  | 
27  |  | #endif  | 
28  |  |  | 
29  |  | #if defined(_MSC_VER) || defined(__MINGW32__)  | 
30  |  | #define WIN32_LEAN_AND_MEAN  | 
31  |  | #include <windows.h> /* Get InterlockedCompareExchange */  | 
32  |  | #endif  | 
33  |  |  | 
34  |  | #include "linkhash.h"  | 
35  |  | #include "random_seed.h"  | 
36  |  |  | 
37  |  | /* hash functions */  | 
38  |  | static unsigned long lh_char_hash(const void *k);  | 
39  |  | static unsigned long lh_perllike_str_hash(const void *k);  | 
40  |  | static lh_hash_fn *char_hash_fn = lh_char_hash;  | 
41  |  |  | 
42  |  | /* comparison functions */  | 
43  |  | int lh_char_equal(const void *k1, const void *k2);  | 
44  |  | int lh_ptr_equal(const void *k1, const void *k2);  | 
45  |  |  | 
46  |  | int json_global_set_string_hash(const int h)  | 
47  | 0  | { | 
48  | 0  |   switch (h)  | 
49  | 0  |   { | 
50  | 0  |   case JSON_C_STR_HASH_DFLT: char_hash_fn = lh_char_hash; break;  | 
51  | 0  |   case JSON_C_STR_HASH_PERLLIKE: char_hash_fn = lh_perllike_str_hash; break;  | 
52  | 0  |   default: return -1;  | 
53  | 0  |   }  | 
54  | 0  |   return 0;  | 
55  | 0  | }  | 
56  |  |  | 
57  |  | static unsigned long lh_ptr_hash(const void *k)  | 
58  | 0  | { | 
59  |  |   /* CAW: refactored to be 64bit nice */  | 
60  | 0  |   return (unsigned long)((((ptrdiff_t)k * LH_PRIME) >> 4) & ULONG_MAX);  | 
61  | 0  | }  | 
62  |  |  | 
63  |  | int lh_ptr_equal(const void *k1, const void *k2)  | 
64  | 0  | { | 
65  | 0  |   return (k1 == k2);  | 
66  | 0  | }  | 
67  |  |  | 
68  |  | /*  | 
69  |  |  * hashlittle from lookup3.c, by Bob Jenkins, May 2006, Public Domain.  | 
70  |  |  * http://burtleburtle.net/bob/c/lookup3.c  | 
71  |  |  * minor modifications to make functions static so no symbols are exported  | 
72  |  |  * minor mofifications to compile with -Werror  | 
73  |  |  */  | 
74  |  |  | 
75  |  | /*  | 
76  |  | -------------------------------------------------------------------------------  | 
77  |  | lookup3.c, by Bob Jenkins, May 2006, Public Domain.  | 
78  |  |  | 
79  |  | These are functions for producing 32-bit hashes for hash table lookup.  | 
80  |  | hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()  | 
81  |  | are externally useful functions.  Routines to test the hash are included  | 
82  |  | if SELF_TEST is defined.  You can use this free for any purpose.  It's in  | 
83  |  | the public domain.  It has no warranty.  | 
84  |  |  | 
85  |  | You probably want to use hashlittle().  hashlittle() and hashbig()  | 
86  |  | hash byte arrays.  hashlittle() is is faster than hashbig() on  | 
87  |  | little-endian machines.  Intel and AMD are little-endian machines.  | 
88  |  | On second thought, you probably want hashlittle2(), which is identical to  | 
89  |  | hashlittle() except it returns two 32-bit hashes for the price of one.  | 
90  |  | You could implement hashbig2() if you wanted but I haven't bothered here.  | 
91  |  |  | 
92  |  | If you want to find a hash of, say, exactly 7 integers, do  | 
93  |  |   a = i1;  b = i2;  c = i3;  | 
94  |  |   mix(a,b,c);  | 
95  |  |   a += i4; b += i5; c += i6;  | 
96  |  |   mix(a,b,c);  | 
97  |  |   a += i7;  | 
98  |  |   final(a,b,c);  | 
99  |  | then use c as the hash value.  If you have a variable length array of  | 
100  |  | 4-byte integers to hash, use hashword().  If you have a byte array (like  | 
101  |  | a character string), use hashlittle().  If you have several byte arrays, or  | 
102  |  | a mix of things, see the comments above hashlittle().  | 
103  |  |  | 
104  |  | Why is this so big?  I read 12 bytes at a time into 3 4-byte integers,  | 
105  |  | then mix those integers.  This is fast (you can do a lot more thorough  | 
106  |  | mixing with 12*3 instructions on 3 integers than you can with 3 instructions  | 
107  |  | on 1 byte), but shoehorning those bytes into integers efficiently is messy.  | 
108  |  | -------------------------------------------------------------------------------  | 
109  |  | */  | 
110  |  |  | 
111  |  | /*  | 
112  |  |  * My best guess at if you are big-endian or little-endian.  This may  | 
113  |  |  * need adjustment.  | 
114  |  |  */  | 
115  |  | #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) || \  | 
116  |  |     (defined(i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) ||          \  | 
117  |  |      defined(__i686__) || defined(vax) || defined(MIPSEL))  | 
118  | 0  | #define HASH_LITTLE_ENDIAN 1  | 
119  |  | #define HASH_BIG_ENDIAN 0  | 
120  |  | #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) || \  | 
121  |  |     (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))  | 
122  |  | #define HASH_LITTLE_ENDIAN 0  | 
123  |  | #define HASH_BIG_ENDIAN 1  | 
124  |  | #else  | 
125  |  | #define HASH_LITTLE_ENDIAN 0  | 
126  |  | #define HASH_BIG_ENDIAN 0  | 
127  |  | #endif  | 
128  |  |  | 
129  |  | #define hashsize(n) ((uint32_t)1 << (n))  | 
130  |  | #define hashmask(n) (hashsize(n) - 1)  | 
131  | 0  | #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))  | 
132  |  |  | 
133  |  | /*  | 
134  |  | -------------------------------------------------------------------------------  | 
135  |  | mix -- mix 3 32-bit values reversibly.  | 
136  |  |  | 
137  |  | This is reversible, so any information in (a,b,c) before mix() is  | 
138  |  | still in (a,b,c) after mix().  | 
139  |  |  | 
140  |  | If four pairs of (a,b,c) inputs are run through mix(), or through  | 
141  |  | mix() in reverse, there are at least 32 bits of the output that  | 
142  |  | are sometimes the same for one pair and different for another pair.  | 
143  |  | This was tested for:  | 
144  |  | * pairs that differed by one bit, by two bits, in any combination  | 
145  |  |   of top bits of (a,b,c), or in any combination of bottom bits of  | 
146  |  |   (a,b,c).  | 
147  |  | * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed  | 
148  |  |   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as  | 
149  |  |   is commonly produced by subtraction) look like a single 1-bit  | 
150  |  |   difference.  | 
151  |  | * the base values were pseudorandom, all zero but one bit set, or  | 
152  |  |   all zero plus a counter that starts at zero.  | 
153  |  |  | 
154  |  | Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that  | 
155  |  | satisfy this are  | 
156  |  |     4  6  8 16 19  4  | 
157  |  |     9 15  3 18 27 15  | 
158  |  |    14  9  3  7 17  3  | 
159  |  | Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing  | 
160  |  | for "differ" defined as + with a one-bit base and a two-bit delta.  I  | 
161  |  | used http://burtleburtle.net/bob/hash/avalanche.html to choose  | 
162  |  | the operations, constants, and arrangements of the variables.  | 
163  |  |  | 
164  |  | This does not achieve avalanche.  There are input bits of (a,b,c)  | 
165  |  | that fail to affect some output bits of (a,b,c), especially of a.  The  | 
166  |  | most thoroughly mixed value is c, but it doesn't really even achieve  | 
167  |  | avalanche in c.  | 
168  |  |  | 
169  |  | This allows some parallelism.  Read-after-writes are good at doubling  | 
170  |  | the number of bits affected, so the goal of mixing pulls in the opposite  | 
171  |  | direction as the goal of parallelism.  I did what I could.  Rotates  | 
172  |  | seem to cost as much as shifts on every machine I could lay my hands  | 
173  |  | on, and rotates are much kinder to the top and bottom bits, so I used  | 
174  |  | rotates.  | 
175  |  | -------------------------------------------------------------------------------  | 
176  |  | */  | 
177  |  | /* clang-format off */  | 
178  | 0  | #define mix(a,b,c) \  | 
179  | 0  | { \ | 
180  | 0  |   a -= c;  a ^= rot(c, 4);  c += b; \  | 
181  | 0  |   b -= a;  b ^= rot(a, 6);  a += c; \  | 
182  | 0  |   c -= b;  c ^= rot(b, 8);  b += a; \  | 
183  | 0  |   a -= c;  a ^= rot(c,16);  c += b; \  | 
184  | 0  |   b -= a;  b ^= rot(a,19);  a += c; \  | 
185  | 0  |   c -= b;  c ^= rot(b, 4);  b += a; \  | 
186  | 0  | }  | 
187  |  | /* clang-format on */  | 
188  |  |  | 
189  |  | /*  | 
190  |  | -------------------------------------------------------------------------------  | 
191  |  | final -- final mixing of 3 32-bit values (a,b,c) into c  | 
192  |  |  | 
193  |  | Pairs of (a,b,c) values differing in only a few bits will usually  | 
194  |  | produce values of c that look totally different.  This was tested for  | 
195  |  | * pairs that differed by one bit, by two bits, in any combination  | 
196  |  |   of top bits of (a,b,c), or in any combination of bottom bits of  | 
197  |  |   (a,b,c).  | 
198  |  | * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed  | 
199  |  |   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as  | 
200  |  |   is commonly produced by subtraction) look like a single 1-bit  | 
201  |  |   difference.  | 
202  |  | * the base values were pseudorandom, all zero but one bit set, or  | 
203  |  |   all zero plus a counter that starts at zero.  | 
204  |  |  | 
205  |  | These constants passed:  | 
206  |  |  14 11 25 16 4 14 24  | 
207  |  |  12 14 25 16 4 14 24  | 
208  |  | and these came close:  | 
209  |  |   4  8 15 26 3 22 24  | 
210  |  |  10  8 15 26 3 22 24  | 
211  |  |  11  8 15 26 3 22 24  | 
212  |  | -------------------------------------------------------------------------------  | 
213  |  | */  | 
214  |  | /* clang-format off */  | 
215  | 0  | #define final(a,b,c) \  | 
216  | 0  | { \ | 
217  | 0  |   c ^= b; c -= rot(b,14); \  | 
218  | 0  |   a ^= c; a -= rot(c,11); \  | 
219  | 0  |   b ^= a; b -= rot(a,25); \  | 
220  | 0  |   c ^= b; c -= rot(b,16); \  | 
221  | 0  |   a ^= c; a -= rot(c,4);  \  | 
222  | 0  |   b ^= a; b -= rot(a,14); \  | 
223  | 0  |   c ^= b; c -= rot(b,24); \  | 
224  | 0  | }  | 
225  |  | /* clang-format on */  | 
226  |  |  | 
227  |  | /*  | 
228  |  | -------------------------------------------------------------------------------  | 
229  |  | hashlittle() -- hash a variable-length key into a 32-bit value  | 
230  |  |   k       : the key (the unaligned variable-length array of bytes)  | 
231  |  |   length  : the length of the key, counting by bytes  | 
232  |  |   initval : can be any 4-byte value  | 
233  |  | Returns a 32-bit value.  Every bit of the key affects every bit of  | 
234  |  | the return value.  Two keys differing by one or two bits will have  | 
235  |  | totally different hash values.  | 
236  |  |  | 
237  |  | The best hash table sizes are powers of 2.  There is no need to do  | 
238  |  | mod a prime (mod is sooo slow!).  If you need less than 32 bits,  | 
239  |  | use a bitmask.  For example, if you need only 10 bits, do  | 
240  |  |   h = (h & hashmask(10));  | 
241  |  | In which case, the hash table should have hashsize(10) elements.  | 
242  |  |  | 
243  |  | If you are hashing n strings (uint8_t **)k, do it like this:  | 
244  |  |   for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);  | 
245  |  |  | 
246  |  | By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this  | 
247  |  | code any way you wish, private, educational, or commercial.  It's free.  | 
248  |  |  | 
249  |  | Use for hash table lookup, or anything where one collision in 2^^32 is  | 
250  |  | acceptable.  Do NOT use for cryptographic purposes.  | 
251  |  | -------------------------------------------------------------------------------  | 
252  |  | */  | 
253  |  |  | 
254  |  | /* clang-format off */  | 
255  |  |  | 
256  |  | CPL_NOSANITIZE_UNSIGNED_INT_OVERFLOW  | 
257  |  | static uint32_t hashlittle(const void *key, size_t length, uint32_t initval)  | 
258  | 0  | { | 
259  | 0  |   uint32_t a,b,c; /* internal state */  | 
260  | 0  |   union  | 
261  | 0  |   { | 
262  | 0  |     const void *ptr;  | 
263  | 0  |     size_t i;  | 
264  | 0  |   } u; /* needed for Mac Powerbook G4 */  | 
265  |  |  | 
266  |  |   /* Set up the internal state */  | 
267  | 0  |   a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;  | 
268  |  | 
  | 
269  | 0  |   u.ptr = key;  | 
270  | 0  |   if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { | 
271  | 0  |     const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */  | 
272  |  |  | 
273  |  |     /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */  | 
274  | 0  |     while (length > 12)  | 
275  | 0  |     { | 
276  | 0  |       a += k[0];  | 
277  | 0  |       b += k[1];  | 
278  | 0  |       c += k[2];  | 
279  | 0  |       mix(a,b,c);  | 
280  | 0  |       length -= 12;  | 
281  | 0  |       k += 3;  | 
282  | 0  |     }  | 
283  |  |  | 
284  |  |     /*----------------------------- handle the last (probably partial) block */  | 
285  |  |     /*  | 
286  |  |      * "k[2]&0xffffff" actually reads beyond the end of the string, but  | 
287  |  |      * then masks off the part it's not allowed to read.  Because the  | 
288  |  |      * string is aligned, the masked-off tail is in the same word as the  | 
289  |  |      * rest of the string.  Every machine with memory protection I've seen  | 
290  |  |      * does it on word boundaries, so is OK with this.  But VALGRIND will  | 
291  |  |      * still catch it and complain.  The masking trick does make the hash  | 
292  |  |      * noticeably faster for short strings (like English words).  | 
293  |  |      * AddressSanitizer is similarly picky about overrunning  | 
294  |  |      * the buffer. (http://clang.llvm.org/docs/AddressSanitizer.html  | 
295  |  |      */  | 
296  |  | #ifdef VALGRIND  | 
297  |  | #define PRECISE_MEMORY_ACCESS 1  | 
298  |  | #elif defined(__SANITIZE_ADDRESS__) /* GCC's ASAN */  | 
299  |  | #define PRECISE_MEMORY_ACCESS 1  | 
300  |  | #elif defined(__SANITIZE_HWADDRESS__) /* GCC's HWASAN */  | 
301  |  | #define PRECISE_MEMORY_ACCESS 1  | 
302  |  | #elif defined(__has_feature)  | 
303  |  | #if __has_feature(address_sanitizer) /* Clang's ASAN */  | 
304  |  | #define PRECISE_MEMORY_ACCESS 1  | 
305  |  | #elif __has_feature(hwaddress_sanitizer) /* Clang's HWASAN */  | 
306  |  | #define PRECISE_MEMORY_ACCESS 1  | 
307  |  | #endif  | 
308  | 0  | #endif  | 
309  | 0  | #ifndef PRECISE_MEMORY_ACCESS  | 
310  |  | 
  | 
311  | 0  |     switch(length)  | 
312  | 0  |     { | 
313  | 0  |     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;  | 
314  | 0  |     case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;  | 
315  | 0  |     case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;  | 
316  | 0  |     case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;  | 
317  | 0  |     case 8 : b+=k[1]; a+=k[0]; break;  | 
318  | 0  |     case 7 : b+=k[1]&0xffffff; a+=k[0]; break;  | 
319  | 0  |     case 6 : b+=k[1]&0xffff; a+=k[0]; break;  | 
320  | 0  |     case 5 : b+=k[1]&0xff; a+=k[0]; break;  | 
321  | 0  |     case 4 : a+=k[0]; break;  | 
322  | 0  |     case 3 : a+=k[0]&0xffffff; break;  | 
323  | 0  |     case 2 : a+=k[0]&0xffff; break;  | 
324  | 0  |     case 1 : a+=k[0]&0xff; break;  | 
325  | 0  |     case 0 : return c; /* zero length strings require no mixing */  | 
326  | 0  |     }  | 
327  |  | 
  | 
328  |  | #else /* make valgrind happy */  | 
329  |  |  | 
330  |  |     const uint8_t  *k8 = (const uint8_t *)k;  | 
331  |  |     switch(length)  | 
332  |  |     { | 
333  |  |     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;  | 
334  |  |     case 11: c+=((uint32_t)k8[10])<<16;  /* fall through */  | 
335  |  |     case 10: c+=((uint32_t)k8[9])<<8;    /* fall through */  | 
336  |  |     case 9 : c+=k8[8];                   /* fall through */  | 
337  |  |     case 8 : b+=k[1]; a+=k[0]; break;  | 
338  |  |     case 7 : b+=((uint32_t)k8[6])<<16;   /* fall through */  | 
339  |  |     case 6 : b+=((uint32_t)k8[5])<<8;    /* fall through */  | 
340  |  |     case 5 : b+=k8[4];                   /* fall through */  | 
341  |  |     case 4 : a+=k[0]; break;  | 
342  |  |     case 3 : a+=((uint32_t)k8[2])<<16;   /* fall through */  | 
343  |  |     case 2 : a+=((uint32_t)k8[1])<<8;    /* fall through */  | 
344  |  |     case 1 : a+=k8[0]; break;  | 
345  |  |     case 0 : return c;  | 
346  |  |     }  | 
347  |  |  | 
348  |  | #endif /* !valgrind */  | 
349  |  | 
  | 
350  | 0  |   }  | 
351  | 0  |   else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0))  | 
352  | 0  |   { | 
353  | 0  |     const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */  | 
354  | 0  |     const uint8_t  *k8;  | 
355  |  |  | 
356  |  |     /*--------------- all but last block: aligned reads and different mixing */  | 
357  | 0  |     while (length > 12)  | 
358  | 0  |     { | 
359  | 0  |       a += k[0] + (((uint32_t)k[1])<<16);  | 
360  | 0  |       b += k[2] + (((uint32_t)k[3])<<16);  | 
361  | 0  |       c += k[4] + (((uint32_t)k[5])<<16);  | 
362  | 0  |       mix(a,b,c);  | 
363  | 0  |       length -= 12;  | 
364  | 0  |       k += 6;  | 
365  | 0  |     }  | 
366  |  |  | 
367  |  |     /*----------------------------- handle the last (probably partial) block */  | 
368  | 0  |     k8 = (const uint8_t *)k;  | 
369  | 0  |     switch(length)  | 
370  | 0  |     { | 
371  | 0  |     case 12: c+=k[4]+(((uint32_t)k[5])<<16);  | 
372  | 0  |        b+=k[2]+(((uint32_t)k[3])<<16);  | 
373  | 0  |        a+=k[0]+(((uint32_t)k[1])<<16);  | 
374  | 0  |        break;  | 
375  | 0  |     case 11: c+=((uint32_t)k8[10])<<16;     /* fall through */  | 
376  | 0  |     case 10: c+=k[4];  | 
377  | 0  |        b+=k[2]+(((uint32_t)k[3])<<16);  | 
378  | 0  |        a+=k[0]+(((uint32_t)k[1])<<16);  | 
379  | 0  |        break;  | 
380  | 0  |     case 9 : c+=k8[8];                      /* fall through */  | 
381  | 0  |     case 8 : b+=k[2]+(((uint32_t)k[3])<<16);  | 
382  | 0  |        a+=k[0]+(((uint32_t)k[1])<<16);  | 
383  | 0  |        break;  | 
384  | 0  |     case 7 : b+=((uint32_t)k8[6])<<16;      /* fall through */  | 
385  | 0  |     case 6 : b+=k[2];  | 
386  | 0  |        a+=k[0]+(((uint32_t)k[1])<<16);  | 
387  | 0  |        break;  | 
388  | 0  |     case 5 : b+=k8[4];                      /* fall through */  | 
389  | 0  |     case 4 : a+=k[0]+(((uint32_t)k[1])<<16);  | 
390  | 0  |        break;  | 
391  | 0  |     case 3 : a+=((uint32_t)k8[2])<<16;      /* fall through */  | 
392  | 0  |     case 2 : a+=k[0];  | 
393  | 0  |        break;  | 
394  | 0  |     case 1 : a+=k8[0];  | 
395  | 0  |        break;  | 
396  | 0  |     case 0 : return c;                     /* zero length requires no mixing */  | 
397  | 0  |     }  | 
398  |  | 
  | 
399  | 0  |   }  | 
400  | 0  |   else  | 
401  | 0  |   { | 
402  |  |     /* need to read the key one byte at a time */  | 
403  | 0  |     const uint8_t *k = (const uint8_t *)key;  | 
404  |  |  | 
405  |  |     /*--------------- all but the last block: affect some 32 bits of (a,b,c) */  | 
406  | 0  |     while (length > 12)  | 
407  | 0  |     { | 
408  | 0  |       a += k[0];  | 
409  | 0  |       a += ((uint32_t)k[1])<<8;  | 
410  | 0  |       a += ((uint32_t)k[2])<<16;  | 
411  | 0  |       a += ((uint32_t)k[3])<<24;  | 
412  | 0  |       b += k[4];  | 
413  | 0  |       b += ((uint32_t)k[5])<<8;  | 
414  | 0  |       b += ((uint32_t)k[6])<<16;  | 
415  | 0  |       b += ((uint32_t)k[7])<<24;  | 
416  | 0  |       c += k[8];  | 
417  | 0  |       c += ((uint32_t)k[9])<<8;  | 
418  | 0  |       c += ((uint32_t)k[10])<<16;  | 
419  | 0  |       c += ((uint32_t)k[11])<<24;  | 
420  | 0  |       mix(a,b,c);  | 
421  | 0  |       length -= 12;  | 
422  | 0  |       k += 12;  | 
423  | 0  |     }  | 
424  |  |  | 
425  |  |     /*-------------------------------- last block: affect all 32 bits of (c) */  | 
426  | 0  |     switch(length) /* all the case statements fall through */  | 
427  | 0  |     { | 
428  | 0  |     case 12: c+=((uint32_t)k[11])<<24; /* FALLTHRU */  | 
429  | 0  |     case 11: c+=((uint32_t)k[10])<<16; /* FALLTHRU */  | 
430  | 0  |     case 10: c+=((uint32_t)k[9])<<8; /* FALLTHRU */  | 
431  | 0  |     case 9 : c+=k[8]; /* FALLTHRU */  | 
432  | 0  |     case 8 : b+=((uint32_t)k[7])<<24; /* FALLTHRU */  | 
433  | 0  |     case 7 : b+=((uint32_t)k[6])<<16; /* FALLTHRU */  | 
434  | 0  |     case 6 : b+=((uint32_t)k[5])<<8; /* FALLTHRU */  | 
435  | 0  |     case 5 : b+=k[4]; /* FALLTHRU */  | 
436  | 0  |     case 4 : a+=((uint32_t)k[3])<<24; /* FALLTHRU */  | 
437  | 0  |     case 3 : a+=((uint32_t)k[2])<<16; /* FALLTHRU */  | 
438  | 0  |     case 2 : a+=((uint32_t)k[1])<<8; /* FALLTHRU */  | 
439  | 0  |     case 1 : a+=k[0];  | 
440  | 0  |        break;  | 
441  | 0  |     case 0 : return c;  | 
442  | 0  |     }  | 
443  | 0  |   }  | 
444  |  |  | 
445  | 0  |   final(a,b,c);  | 
446  | 0  |   return c;  | 
447  | 0  | }  | 
448  |  | /* clang-format on */  | 
449  |  |  | 
450  |  | /* a simple hash function similar to what perl does for strings.  | 
451  |  |  * for good results, the string should not be excessivly large.  | 
452  |  |  */  | 
453  |  | static unsigned long lh_perllike_str_hash(const void *k)  | 
454  | 0  | { | 
455  | 0  |   const char *rkey = (const char *)k;  | 
456  | 0  |   unsigned hashval = 1;  | 
457  |  | 
  | 
458  | 0  |   while (*rkey)  | 
459  | 0  |     hashval = hashval * 33 + *rkey++;  | 
460  |  | 
  | 
461  | 0  |   return hashval;  | 
462  | 0  | }  | 
463  |  |  | 
464  |  | static unsigned long lh_char_hash(const void *k)  | 
465  | 0  | { | 
466  |  | #if defined _MSC_VER || defined __MINGW32__  | 
467  |  | #define RANDOM_SEED_TYPE LONG  | 
468  |  | #else  | 
469  | 0  | #define RANDOM_SEED_TYPE int  | 
470  | 0  | #endif  | 
471  | 0  |   static volatile RANDOM_SEED_TYPE random_seed = -1;  | 
472  |  | 
  | 
473  | 0  |   if (random_seed == -1)  | 
474  | 0  |   { | 
475  | 0  |     RANDOM_SEED_TYPE seed;  | 
476  |  |     /* we can't use -1 as it is the uninitialized sentinel */  | 
477  | 0  |     while ((seed = json_c_get_random_seed()) == -1) {} | 
478  |  | #if SIZEOF_INT == 8 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8  | 
479  |  | #define USE_SYNC_COMPARE_AND_SWAP 1  | 
480  |  | #endif  | 
481  | 0  | #if SIZEOF_INT == 4 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4  | 
482  | 0  | #define USE_SYNC_COMPARE_AND_SWAP 1  | 
483  | 0  | #endif  | 
484  |  | #if SIZEOF_INT == 2 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2  | 
485  |  | #define USE_SYNC_COMPARE_AND_SWAP 1  | 
486  |  | #endif  | 
487  | 0  | #if defined USE_SYNC_COMPARE_AND_SWAP  | 
488  | 0  |     (void)__sync_val_compare_and_swap(&random_seed, -1, seed);  | 
489  |  | #elif defined _MSC_VER || defined __MINGW32__  | 
490  |  |     InterlockedCompareExchange(&random_seed, seed, -1);  | 
491  |  | #else  | 
492  |  |     //#warning "racy random seed initializtion if used by multiple threads"  | 
493  |  |     random_seed = seed; /* potentially racy */  | 
494  |  | #endif  | 
495  | 0  |   }  | 
496  |  | 
  | 
497  | 0  |   return hashlittle((const char *)k, strlen((const char *)k), random_seed);  | 
498  | 0  | }  | 
499  |  |  | 
500  |  | int lh_char_equal(const void *k1, const void *k2)  | 
501  | 0  | { | 
502  | 0  |   return (strcmp((const char *)k1, (const char *)k2) == 0);  | 
503  | 0  | }  | 
504  |  |  | 
505  |  | struct lh_table *lh_table_new(int size, lh_entry_free_fn *free_fn, lh_hash_fn *hash_fn,  | 
506  |  |                               lh_equal_fn *equal_fn)  | 
507  | 0  | { | 
508  | 0  |   int i;  | 
509  | 0  |   struct lh_table *t;  | 
510  |  |  | 
511  |  |   /* Allocate space for elements to avoid divisions by zero. */  | 
512  | 0  |   assert(size > 0);  | 
513  | 0  |   t = (struct lh_table *)calloc(1, sizeof(struct lh_table));  | 
514  | 0  |   if (!t)  | 
515  | 0  |     return NULL;  | 
516  |  |  | 
517  | 0  |   t->count = 0;  | 
518  | 0  |   t->size = size;  | 
519  | 0  |   t->table = (struct lh_entry *)calloc(size, sizeof(struct lh_entry));  | 
520  | 0  |   if (!t->table)  | 
521  | 0  |   { | 
522  | 0  |     free(t);  | 
523  | 0  |     return NULL;  | 
524  | 0  |   }  | 
525  | 0  |   t->free_fn = free_fn;  | 
526  | 0  |   t->hash_fn = hash_fn;  | 
527  | 0  |   t->equal_fn = equal_fn;  | 
528  | 0  |   for (i = 0; i < size; i++)  | 
529  | 0  |     t->table[i].k = LH_EMPTY;  | 
530  | 0  |   return t;  | 
531  | 0  | }  | 
532  |  |  | 
533  |  | struct lh_table *lh_kchar_table_new(int size, lh_entry_free_fn *free_fn)  | 
534  | 0  | { | 
535  | 0  |   return lh_table_new(size, free_fn, char_hash_fn, lh_char_equal);  | 
536  | 0  | }  | 
537  |  |  | 
538  |  | struct lh_table *lh_kptr_table_new(int size, lh_entry_free_fn *free_fn)  | 
539  | 0  | { | 
540  | 0  |   return lh_table_new(size, free_fn, lh_ptr_hash, lh_ptr_equal);  | 
541  | 0  | }  | 
542  |  |  | 
543  |  | int lh_table_resize(struct lh_table *t, int new_size)  | 
544  | 0  | { | 
545  | 0  |   struct lh_table *new_t;  | 
546  | 0  |   struct lh_entry *ent;  | 
547  |  | 
  | 
548  | 0  |   new_t = lh_table_new(new_size, NULL, t->hash_fn, t->equal_fn);  | 
549  | 0  |   if (new_t == NULL)  | 
550  | 0  |     return -1;  | 
551  |  |  | 
552  | 0  |   for (ent = t->head; ent != NULL; ent = ent->next)  | 
553  | 0  |   { | 
554  | 0  |     unsigned long h = lh_get_hash(new_t, ent->k);  | 
555  | 0  |     unsigned int opts = 0;  | 
556  | 0  |     if (ent->k_is_constant)  | 
557  | 0  |       opts = JSON_C_OBJECT_KEY_IS_CONSTANT;  | 
558  | 0  |     if (lh_table_insert_w_hash(new_t, ent->k, ent->v, h, opts) != 0)  | 
559  | 0  |     { | 
560  | 0  |       lh_table_free(new_t);  | 
561  | 0  |       return -1;  | 
562  | 0  |     }  | 
563  | 0  |   }  | 
564  | 0  |   free(t->table);  | 
565  | 0  |   t->table = new_t->table;  | 
566  | 0  |   t->size = new_size;  | 
567  | 0  |   t->head = new_t->head;  | 
568  | 0  |   t->tail = new_t->tail;  | 
569  | 0  |   free(new_t);  | 
570  |  | 
  | 
571  | 0  |   return 0;  | 
572  | 0  | }  | 
573  |  |  | 
574  |  | void lh_table_free(struct lh_table *t)  | 
575  | 0  | { | 
576  | 0  |   struct lh_entry *c;  | 
577  | 0  |   if (t->free_fn)  | 
578  | 0  |   { | 
579  | 0  |     for (c = t->head; c != NULL; c = c->next)  | 
580  | 0  |       t->free_fn(c);  | 
581  | 0  |   }  | 
582  | 0  |   free(t->table);  | 
583  | 0  |   free(t);  | 
584  | 0  | }  | 
585  |  |  | 
586  |  | int lh_table_insert_w_hash(struct lh_table *t, const void *k, const void *v, const unsigned long h,  | 
587  |  |                            const unsigned opts)  | 
588  | 0  | { | 
589  | 0  |   unsigned long n;  | 
590  |  | 
  | 
591  | 0  |   if (t->count >= t->size * LH_LOAD_FACTOR)  | 
592  | 0  |   { | 
593  |  |     /* Avoid signed integer overflow with large tables. */  | 
594  | 0  |     int new_size = (t->size > INT_MAX / 2) ? INT_MAX : (t->size * 2);  | 
595  | 0  |     if (t->size == INT_MAX || lh_table_resize(t, new_size) != 0)  | 
596  | 0  |       return -1;  | 
597  | 0  |   }  | 
598  |  |  | 
599  | 0  |   n = h % t->size;  | 
600  |  | 
  | 
601  | 0  |   while (1)  | 
602  | 0  |   { | 
603  | 0  |     if (t->table[n].k == LH_EMPTY || t->table[n].k == LH_FREED)  | 
604  | 0  |       break;  | 
605  | 0  |     if ((int)++n == t->size)  | 
606  | 0  |       n = 0;  | 
607  | 0  |   }  | 
608  |  | 
  | 
609  | 0  |   t->table[n].k = k;  | 
610  | 0  |   t->table[n].k_is_constant = (opts & JSON_C_OBJECT_KEY_IS_CONSTANT);  | 
611  | 0  |   t->table[n].v = v;  | 
612  | 0  |   t->count++;  | 
613  |  | 
  | 
614  | 0  |   if (t->head == NULL)  | 
615  | 0  |   { | 
616  | 0  |     t->head = t->tail = &t->table[n];  | 
617  | 0  |     t->table[n].next = t->table[n].prev = NULL;  | 
618  | 0  |   }  | 
619  | 0  |   else  | 
620  | 0  |   { | 
621  | 0  |     t->tail->next = &t->table[n];  | 
622  | 0  |     t->table[n].prev = t->tail;  | 
623  | 0  |     t->table[n].next = NULL;  | 
624  | 0  |     t->tail = &t->table[n];  | 
625  | 0  |   }  | 
626  |  | 
  | 
627  | 0  |   return 0;  | 
628  | 0  | }  | 
629  |  | int lh_table_insert(struct lh_table *t, const void *k, const void *v)  | 
630  | 0  | { | 
631  | 0  |   return lh_table_insert_w_hash(t, k, v, lh_get_hash(t, k), 0);  | 
632  | 0  | }  | 
633  |  |  | 
634  |  | struct lh_entry *lh_table_lookup_entry_w_hash(struct lh_table *t, const void *k,  | 
635  |  |                                               const unsigned long h)  | 
636  | 0  | { | 
637  | 0  |   unsigned long n = h % t->size;  | 
638  | 0  |   int count = 0;  | 
639  |  | 
  | 
640  | 0  |   while (count < t->size)  | 
641  | 0  |   { | 
642  | 0  |     if (t->table[n].k == LH_EMPTY)  | 
643  | 0  |       return NULL;  | 
644  | 0  |     if (t->table[n].k != LH_FREED && t->equal_fn(t->table[n].k, k))  | 
645  | 0  |       return &t->table[n];  | 
646  | 0  |     if ((int)++n == t->size)  | 
647  | 0  |       n = 0;  | 
648  | 0  |     count++;  | 
649  | 0  |   }  | 
650  | 0  |   return NULL;  | 
651  | 0  | }  | 
652  |  |  | 
653  |  | struct lh_entry *lh_table_lookup_entry(struct lh_table *t, const void *k)  | 
654  | 0  | { | 
655  | 0  |   return lh_table_lookup_entry_w_hash(t, k, lh_get_hash(t, k));  | 
656  | 0  | }  | 
657  |  |  | 
658  |  | json_bool lh_table_lookup_ex(struct lh_table *t, const void *k, void **v)  | 
659  | 0  | { | 
660  | 0  |   struct lh_entry *e = lh_table_lookup_entry(t, k);  | 
661  | 0  |   if (e != NULL)  | 
662  | 0  |   { | 
663  | 0  |     if (v != NULL)  | 
664  | 0  |       *v = lh_entry_v(e);  | 
665  | 0  |     return 1; /* key found */  | 
666  | 0  |   }  | 
667  | 0  |   if (v != NULL)  | 
668  | 0  |     *v = NULL;  | 
669  | 0  |   return 0; /* key not found */  | 
670  | 0  | }  | 
671  |  |  | 
672  |  | int lh_table_delete_entry(struct lh_table *t, struct lh_entry *e)  | 
673  | 0  | { | 
674  |  |   /* CAW: fixed to be 64bit nice, still need the crazy negative case... */  | 
675  | 0  |   ptrdiff_t n = (ptrdiff_t)(e - t->table);  | 
676  |  |  | 
677  |  |   /* CAW: this is bad, really bad, maybe stack goes other direction on this machine... */  | 
678  | 0  |   if (n < 0)  | 
679  | 0  |   { | 
680  | 0  |     return -2;  | 
681  | 0  |   }  | 
682  |  |  | 
683  | 0  |   if (t->table[n].k == LH_EMPTY || t->table[n].k == LH_FREED)  | 
684  | 0  |     return -1;  | 
685  | 0  |   t->count--;  | 
686  | 0  |   if (t->free_fn)  | 
687  | 0  |     t->free_fn(e);  | 
688  | 0  |   t->table[n].v = NULL;  | 
689  | 0  |   t->table[n].k = LH_FREED;  | 
690  | 0  |   if (t->tail == &t->table[n] && t->head == &t->table[n])  | 
691  | 0  |   { | 
692  | 0  |     t->head = t->tail = NULL;  | 
693  | 0  |   }  | 
694  | 0  |   else if (t->head == &t->table[n])  | 
695  | 0  |   { | 
696  | 0  |     t->head->next->prev = NULL;  | 
697  | 0  |     t->head = t->head->next;  | 
698  | 0  |   }  | 
699  | 0  |   else if (t->tail == &t->table[n])  | 
700  | 0  |   { | 
701  | 0  |     t->tail->prev->next = NULL;  | 
702  | 0  |     t->tail = t->tail->prev;  | 
703  | 0  |   }  | 
704  | 0  |   else  | 
705  | 0  |   { | 
706  | 0  |     t->table[n].prev->next = t->table[n].next;  | 
707  | 0  |     t->table[n].next->prev = t->table[n].prev;  | 
708  | 0  |   }  | 
709  | 0  |   t->table[n].next = t->table[n].prev = NULL;  | 
710  | 0  |   return 0;  | 
711  | 0  | }  | 
712  |  |  | 
713  |  | int lh_table_delete(struct lh_table *t, const void *k)  | 
714  | 0  | { | 
715  | 0  |   struct lh_entry *e = lh_table_lookup_entry(t, k);  | 
716  | 0  |   if (!e)  | 
717  | 0  |     return -1;  | 
718  | 0  |   return lh_table_delete_entry(t, e);  | 
719  | 0  | }  | 
720  |  |  | 
721  |  | int lh_table_length(struct lh_table *t)  | 
722  | 0  | { | 
723  | 0  |   return t->count;  | 
724  | 0  | }  |