Coverage Report

Created: 2025-07-01 06:50

/src/openvswitch/lib/hash.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2012, 2013, 2014 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
#include <config.h>
17
#include "hash.h"
18
#include <string.h>
19
#include "unaligned.h"
20
21
/* Returns the hash of 'a', 'b', and 'c'. */
22
uint32_t
23
hash_3words(uint32_t a, uint32_t b, uint32_t c)
24
0
{
25
0
    return hash_finish(hash_add(hash_add(hash_add(a, 0), b), c), 12);
26
0
}
27
28
/* Returns the hash of the 'n' bytes at 'p', starting from 'basis'. */
29
uint32_t
30
hash_bytes(const void *p_, size_t n, uint32_t basis)
31
0
{
32
0
    const uint8_t *p = p_;
33
0
    size_t orig_n = n;
34
0
    uint32_t hash;
35
36
0
    hash = basis;
37
0
    while (n >= 4) {
38
0
        hash = hash_add(hash,
39
0
                        get_unaligned_u32(ALIGNED_CAST(const uint32_t *, p)));
40
0
        n -= 4;
41
0
        p += 4;
42
0
    }
43
44
0
    if (n) {
45
0
        uint32_t tmp = 0;
46
47
0
        memcpy(&tmp, p, n);
48
0
        hash = hash_add(hash, tmp);
49
0
    }
50
51
0
    return hash_finish(hash, orig_n);
52
0
}
53
54
uint32_t
55
hash_double(double x, uint32_t basis)
56
0
{
57
0
    uint32_t value[2];
58
0
    BUILD_ASSERT_DECL(sizeof x == sizeof value);
59
60
0
    memcpy(value, &x, sizeof value);
61
0
    return hash_3words(value[0], value[1], basis);
62
0
}
63
64
uint32_t
65
hash_words__(const uint32_t *p, size_t n_words, uint32_t basis)
66
0
{
67
0
    return hash_words_inline(p, n_words, basis);
68
0
}
69
70
uint32_t
71
hash_words64__(const uint64_t *p, size_t n_words, uint32_t basis)
72
0
{
73
0
    return hash_words64_inline(p, n_words, basis);
74
0
}
75
76
#if !(defined(__x86_64__)) && !(defined(__aarch64__))
77
void
78
hash_bytes128(const void *p_, size_t len, uint32_t basis, ovs_u128 *out)
79
{
80
    const uint32_t c1 = 0x239b961b;
81
    const uint32_t c2 = 0xab0e9789;
82
    const uint32_t c3 = 0x38b34ae5;
83
    const uint32_t c4 = 0xa1e38b93;
84
    const uint8_t *tail, *data = (const uint8_t *)p_;
85
    const uint32_t *blocks = (const uint32_t *)p_;
86
    const int nblocks = len / 16;
87
    uint32_t h1 = basis;
88
    uint32_t h2 = basis;
89
    uint32_t h3 = basis;
90
    uint32_t h4 = basis;
91
92
    /* Body */
93
    for (int i = 0; i < nblocks; i++) {
94
        uint32_t k1 = get_unaligned_u32(&blocks[i * 4 + 0]);
95
        uint32_t k2 = get_unaligned_u32(&blocks[i * 4 + 1]);
96
        uint32_t k3 = get_unaligned_u32(&blocks[i * 4 + 2]);
97
        uint32_t k4 = get_unaligned_u32(&blocks[i * 4 + 3]);
98
99
        k1 *= c1;
100
        k1 = hash_rot(k1, 15);
101
        k1 *= c2;
102
        h1 ^= k1;
103
104
        h1 = hash_rot(h1, 19);
105
        h1 += h2;
106
        h1 = h1 * 5 + 0x561ccd1b;
107
108
        k2 *= c2;
109
        k2 = hash_rot(k2, 16);
110
        k2 *= c3;
111
        h2 ^= k2;
112
113
        h2 = hash_rot(h2, 17);
114
        h2 += h3;
115
        h2 = h2 * 5 + 0x0bcaa747;
116
117
        k3 *= c3;
118
        k3 = hash_rot(k3, 17);
119
        k3 *= c4;
120
        h3 ^= k3;
121
122
        h3 = hash_rot(h3, 15);
123
        h3 += h4;
124
        h3 = h3 * 5 + 0x96cd1c35;
125
126
        k4 *= c4;
127
        k4 = hash_rot(k4, 18);
128
        k4 *= c1;
129
        h4 ^= k4;
130
131
        h4 = hash_rot(h4, 13);
132
        h4 += h1;
133
        h4 = h4 * 5 + 0x32ac3b17;
134
    }
135
136
    /* Tail */
137
    uint32_t k1, k2, k3, k4;
138
    k1 = k2 = k3 = k4 = 0;
139
    tail = data + nblocks * 16;
140
    switch (len & 15) {
141
    case 15:
142
        k4 ^= tail[14] << 16;
143
        /* fall through */
144
    case 14:
145
        k4 ^= tail[13] << 8;
146
        /* fall through */
147
    case 13:
148
        k4 ^= tail[12] << 0;
149
        k4 *= c4;
150
        k4 = hash_rot(k4, 18);
151
        k4 *= c1;
152
        h4 ^= k4;
153
        /* fall through */
154
155
    case 12:
156
        k3 ^= tail[11] << 24;
157
        /* fall through */
158
    case 11:
159
        k3 ^= tail[10] << 16;
160
        /* fall through */
161
    case 10:
162
        k3 ^= tail[9] << 8;
163
        /* fall through */
164
    case 9:
165
        k3 ^= tail[8] << 0;
166
        k3 *= c3;
167
        k3 = hash_rot(k3, 17);
168
        k3 *= c4;
169
        h3 ^= k3;
170
        /* fall through */
171
172
    case 8:
173
        k2 ^= tail[7] << 24;
174
        /* fall through */
175
    case 7:
176
        k2 ^= tail[6] << 16;
177
        /* fall through */
178
    case 6:
179
        k2 ^= tail[5] << 8;
180
        /* fall through */
181
    case 5:
182
        k2 ^= tail[4] << 0;
183
        k2 *= c2;
184
        k2 = hash_rot(k2, 16);
185
        k2 *= c3;
186
        h2 ^= k2;
187
        /* fall through */
188
189
    case 4:
190
        k1 ^= tail[3] << 24;
191
        /* fall through */
192
    case 3:
193
        k1 ^= tail[2] << 16;
194
        /* fall through */
195
    case 2:
196
        k1 ^= tail[1] << 8;
197
        /* fall through */
198
    case 1:
199
        k1 ^= tail[0] << 0;
200
        k1 *= c1;
201
        k1 = hash_rot(k1, 15);
202
        k1 *= c2;
203
        h1 ^= k1;
204
    };
205
206
    /* Finalization */
207
    h1 ^= len;
208
    h2 ^= len;
209
    h3 ^= len;
210
    h4 ^= len;
211
212
    h1 += h2;
213
    h1 += h3;
214
    h1 += h4;
215
    h2 += h1;
216
    h3 += h1;
217
    h4 += h1;
218
219
    h1 = mhash_finish(h1);
220
    h2 = mhash_finish(h2);
221
    h3 = mhash_finish(h3);
222
    h4 = mhash_finish(h4);
223
224
    h1 += h2;
225
    h1 += h3;
226
    h1 += h4;
227
    h2 += h1;
228
    h3 += h1;
229
    h4 += h1;
230
231
    out->u32[0] = h1;
232
    out->u32[1] = h2;
233
    out->u32[2] = h3;
234
    out->u32[3] = h4;
235
}
236
237
#else /* __x86_64__ or __aarch64__*/
238
239
static inline uint64_t
240
hash_rot64(uint64_t x, int8_t r)
241
0
{
242
0
    return (x << r) | (x >> (64 - r));
243
0
}
244
245
static inline uint64_t
246
fmix64(uint64_t k)
247
0
{
248
0
    k ^= k >> 33;
249
0
    k *= 0xff51afd7ed558ccdULL;
250
0
    k ^= k >> 33;
251
0
    k *= 0xc4ceb9fe1a85ec53ULL;
252
0
    k ^= k >> 33;
253
254
0
    return k;
255
0
}
256
257
void
258
hash_bytes128(const void *p_, size_t len, uint32_t basis, ovs_u128 *out)
259
0
{
260
0
    const uint64_t c1 = 0x87c37b91114253d5ULL;
261
0
    const uint64_t c2 = 0x4cf5ad432745937fULL;
262
0
    const uint8_t *tail, *data = (const uint8_t *)p_;
263
0
    const uint64_t *blocks = (const uint64_t *)p_;
264
0
    const int nblocks = len / 16;
265
0
    uint64_t h1 = basis;
266
0
    uint64_t h2 = basis;
267
0
    uint64_t k1, k2;
268
269
    /* Body */
270
0
    for (int i = 0; i < nblocks; i++) {
271
0
        k1 = get_unaligned_u64(&blocks[i * 2 + 0]);
272
0
        k2 = get_unaligned_u64(&blocks[i * 2 + 1]);
273
274
0
        k1 *= c1;
275
0
        k1 = hash_rot64(k1, 31);
276
0
        k1 *= c2;
277
0
        h1 ^= k1;
278
279
0
        h1 = hash_rot64(h1, 27);
280
0
        h1 += h2;
281
0
        h1 = h1 * 5 + 0x52dce729;
282
283
0
        k2 *= c2;
284
0
        k2 = hash_rot64(k2, 33);
285
0
        k2 *= c1;
286
0
        h2 ^= k2;
287
288
0
        h2 = hash_rot64(h2, 31);
289
0
        h2 += h1;
290
0
        h2 = h2 * 5 + 0x38495ab5;
291
0
    }
292
293
    /* Tail */
294
0
    k1 = 0;
295
0
    k2 = 0;
296
0
    tail = data + nblocks * 16;
297
0
    switch (len & 15) {
298
0
    case 15:
299
0
        k2 ^= ((uint64_t) tail[14]) << 48;
300
        /* fall through */
301
0
    case 14:
302
0
        k2 ^= ((uint64_t) tail[13]) << 40;
303
        /* fall through */
304
0
    case 13:
305
0
        k2 ^= ((uint64_t) tail[12]) << 32;
306
        /* fall through */
307
0
    case 12:
308
0
        k2 ^= ((uint64_t) tail[11]) << 24;
309
        /* fall through */
310
0
    case 11:
311
0
        k2 ^= ((uint64_t) tail[10]) << 16;
312
        /* fall through */
313
0
    case 10:
314
0
        k2 ^= ((uint64_t) tail[9]) << 8;
315
        /* fall through */
316
0
    case 9:
317
0
        k2 ^= ((uint64_t) tail[8]) << 0;
318
0
        k2 *= c2;
319
0
        k2 = hash_rot64(k2, 33);
320
0
        k2 *= c1;
321
0
        h2 ^= k2;
322
        /* fall through */
323
0
    case 8:
324
0
        k1 ^= ((uint64_t) tail[7]) << 56;
325
        /* fall through */
326
0
    case 7:
327
0
        k1 ^= ((uint64_t) tail[6]) << 48;
328
        /* fall through */
329
0
    case 6:
330
0
        k1 ^= ((uint64_t) tail[5]) << 40;
331
        /* fall through */
332
0
    case 5:
333
0
        k1 ^= ((uint64_t) tail[4]) << 32;
334
        /* fall through */
335
0
    case 4:
336
0
        k1 ^= ((uint64_t) tail[3]) << 24;
337
        /* fall through */
338
0
    case 3:
339
0
        k1 ^= ((uint64_t) tail[2]) << 16;
340
        /* fall through */
341
0
    case 2:
342
0
        k1 ^= ((uint64_t) tail[1]) << 8;
343
        /* fall through */
344
0
    case 1:
345
0
        k1 ^= ((uint64_t) tail[0]) << 0;
346
0
        k1 *= c1;
347
0
        k1 = hash_rot64(k1, 31);
348
0
        k1 *= c2;
349
0
        h1 ^= k1;
350
0
    };
351
352
    /* Finalization */
353
0
    h1 ^= len;
354
0
    h2 ^= len;
355
0
    h1 += h2;
356
0
    h2 += h1;
357
0
    h1 = fmix64(h1);
358
0
    h2 = fmix64(h2);
359
0
    h1 += h2;
360
0
    h2 += h1;
361
362
0
    out->u64.lo = h1;
363
0
    out->u64.hi = h2;
364
0
}
365
#endif /* __x86_64__ or __aarch64__*/