Coverage Report

Created: 2025-12-04 06:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl35/crypto/hashtable/hashtable.c
Line
Count
Source
1
/*
2
 * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 *
9
 *
10
 *
11
 * Notes On hash table design and layout
12
 * This hashtable uses a hopscotch algorithm to do indexing.  The data structure
13
 * looks as follows:
14
 *
15
 *   hash          +--------------+
16
 *   value+------->+ HT_VALUE     |
17
 *      +          +--------------+
18
 *  +-------+
19
 *  |       |
20
 *  +---------------------------------------------------------+
21
 *  |       |       |       |       |                         |
22
 *  | entry | entry | entry | entry |                         |
23
 *  |       |       |       |       |                         |
24
 *  +---------------------------------------------------------+
25
 *  |                               |                         |
26
 *  |                               |                         |
27
 *  +---------------------------------------------------------+
28
 *  |              +                             +            +
29
 *  |        neighborhood[0]               neighborhood[1]    |
30
 *  |                                                         |
31
 *  |                                                         |
32
 *  +---------------------------------------------------------+
33
 *                              |
34
 *                              +
35
 *                         neighborhoods
36
 *
37
 * On lookup/insert/delete, the items key is hashed to a 64 bit value
38
 * and the result is masked to provide an index into the neighborhoods
39
 * table.  Once a neighborhood is determined, an in-order search is done
40
 * of the elements in the neighborhood indexes entries for a matching hash
41
 * value, if found, the corresponding HT_VALUE is used for the respective
42
 * operation.  The number of entries in a neighborhood is determined at build
43
 * time based on the cacheline size of the target CPU.  The intent is for a
44
 * neighborhood to have all entries in the neighborhood fit into a single cache
45
 * line to speed up lookups.  If all entries in a neighborhood are in use at the
46
 * time of an insert, the table is expanded and rehashed.
47
 *
48
 * Lockless reads hash table is based on the same design but does not
49
 * allow growing and deletion. Thus subsequent neighborhoods are always
50
 * searched for a match until an empty entry is found.
51
 */
52
53
#include <string.h>
54
#include <internal/rcu.h>
55
#include <internal/hashtable.h>
56
#include <internal/hashfunc.h>
57
#include <openssl/rand.h>
58
59
/*
60
 * gcc defines __SANITIZE_THREAD__
61
 * but clang uses the feature attributes api
62
 * map the latter to the former
63
 */
64
#if defined(__clang__) && defined(__has_feature)
65
# if __has_feature(thread_sanitizer)
66
#  define __SANITIZE_THREADS__
67
# endif
68
#endif
69
70
#ifdef __SANITIZE_THREADS__
71
# include <sanitizer/tsan_interface.h>
72
#endif
73
74
#include "internal/numbers.h"
75
/*
76
 * When we do a lookup/insert/delete, there is a high likelihood
77
 * that we will iterate over at least part of the neighborhood list
78
 * As such, because we design a neighborhood entry to fit into a single
79
 * cache line it is advantageous, when supported to fetch the entire
80
 * structure for faster lookups
81
 */
82
#if defined(__GNUC__) || defined(__CLANG__)
83
20.4M
# define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
84
51.9M
# define PREFETCH(x) __builtin_prefetch(x)
85
#else
86
# define PREFETCH_NEIGHBORHOOD(x)
87
# define PREFETCH(x)
88
#endif
89
90
/*
91
 * Define our neighborhood list length
92
 * Note: It should always be a power of 2
93
 */
94
477
#define DEFAULT_NEIGH_LEN_LOG 4
95
477
#define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
96
97
/*
98
 * For now assume cache line size is 64 bytes
99
 */
100
27.3M
#define CACHE_LINE_BYTES 64
101
#define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
102
103
27.3M
#define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
104
/*
105
 * Defines our chains of values
106
 */
107
struct ht_internal_value_st {
108
    HT_VALUE value;
109
    HT *ht;
110
};
111
112
struct ht_neighborhood_entry_st {
113
    uint64_t hash;
114
    struct ht_internal_value_st *value;
115
};
116
117
struct ht_neighborhood_st {
118
    struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
119
};
120
121
/*
122
 * Updates to data in this struct
123
 * require an rcu sync after modification
124
 * prior to free
125
 */
126
struct ht_mutable_data_st {
127
    struct ht_neighborhood_st *neighborhoods;
128
    void *neighborhood_ptr_to_free;
129
    uint64_t neighborhood_mask;
130
};
131
132
/*
133
 * Private data may be updated on the write
134
 * side only, and so do not require rcu sync
135
 */
136
struct ht_write_private_data_st {
137
    size_t neighborhood_len;
138
    size_t value_count;
139
    int need_sync;
140
};
141
142
struct ht_internal_st {
143
    HT_CONFIG config;
144
    CRYPTO_RCU_LOCK *lock;
145
    CRYPTO_RWLOCK *atomic_lock;
146
    struct ht_mutable_data_st *md;
147
    struct ht_write_private_data_st wpd;
148
};
149
150
static void free_value(struct ht_internal_value_st *v);
151
152
static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
153
                                                              void **freeptr)
154
58.0k
{
155
58.0k
    struct ht_neighborhood_st *ret;
156
157
58.0k
    ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
158
58.0k
                                CACHE_LINE_BYTES, freeptr);
159
160
    /* fall back to regular malloc */
161
58.0k
    if (ret == NULL) {
162
0
        ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
163
0
        if (ret == NULL)
164
0
            return NULL;
165
0
    }
166
58.0k
    memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
167
58.0k
    return ret;
168
58.0k
}
169
170
static void internal_free_nop(HT_VALUE *v)
171
33.7k
{
172
33.7k
    return;
173
33.7k
}
174
175
HT *ossl_ht_new(const HT_CONFIG *conf)
176
242
{
177
242
    HT *new = OPENSSL_zalloc(sizeof(*new));
178
179
242
    if (new == NULL)
180
0
        return NULL;
181
182
242
    new->atomic_lock = CRYPTO_THREAD_lock_new();
183
242
    if (new->atomic_lock == NULL)
184
0
        goto err;
185
186
242
    memcpy(&new->config, conf, sizeof(*conf));
187
188
242
    if (new->config.init_neighborhoods != 0) {
189
236
        new->wpd.neighborhood_len = new->config.init_neighborhoods;
190
        /* round up to the next power of 2 */
191
236
        new->wpd.neighborhood_len--;
192
236
        new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
193
236
        new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
194
236
        new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
195
236
        new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
196
236
        new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
197
236
        new->wpd.neighborhood_len++;
198
236
    } else {
199
6
        new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
200
6
    }
201
202
242
    if (new->config.ht_free_fn == NULL)
203
236
        new->config.ht_free_fn = internal_free_nop;
204
205
242
    new->md = OPENSSL_zalloc(sizeof(*new->md));
206
242
    if (new->md == NULL)
207
0
        goto err;
208
209
242
    new->md->neighborhoods =
210
242
        alloc_new_neighborhood_list(new->wpd.neighborhood_len,
211
242
                                    &new->md->neighborhood_ptr_to_free);
212
242
    if (new->md->neighborhoods == NULL)
213
0
        goto err;
214
242
    new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
215
216
242
    new->lock = ossl_rcu_lock_new(1, conf->ctx);
217
242
    if (new->lock == NULL)
218
0
        goto err;
219
220
242
    if (new->config.ht_hash_fn == NULL)
221
242
        new->config.ht_hash_fn = ossl_fnv1a_hash;
222
223
242
    return new;
224
225
0
err:
226
0
    CRYPTO_THREAD_lock_free(new->atomic_lock);
227
0
    ossl_rcu_lock_free(new->lock);
228
0
    if (new->md != NULL)
229
0
        OPENSSL_free(new->md->neighborhood_ptr_to_free);
230
0
    OPENSSL_free(new->md);
231
0
    OPENSSL_free(new);
232
0
    return NULL;
233
242
}
234
235
void ossl_ht_read_lock(HT *htable)
236
26
{
237
26
    ossl_rcu_read_lock(htable->lock);
238
26
}
239
240
void ossl_ht_read_unlock(HT *htable)
241
41
{
242
41
    ossl_rcu_read_unlock(htable->lock);
243
41
}
244
245
void ossl_ht_write_lock(HT *htable)
246
288
{
247
288
    ossl_rcu_write_lock(htable->lock);
248
288
    htable->wpd.need_sync = 0;
249
288
}
250
251
void ossl_ht_write_unlock(HT *htable)
252
288
{
253
288
    int need_sync = htable->wpd.need_sync;
254
255
288
    htable->wpd.need_sync = 0;
256
288
    ossl_rcu_write_unlock(htable->lock);
257
288
    if (need_sync)
258
182
        ossl_synchronize_rcu(htable->lock);
259
288
}
260
261
static void free_oldmd(void *arg)
262
28.9k
{
263
28.9k
    struct ht_mutable_data_st *oldmd = arg;
264
28.9k
    size_t i, j;
265
28.9k
    size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
266
28.9k
    struct ht_internal_value_st *v;
267
268
365k
    for (i = 0; i < neighborhood_len; i++) {
269
336k
        PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
270
1.68M
        for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
271
1.34M
            if (oldmd->neighborhoods[i].entries[j].value != NULL) {
272
35.1k
                v = oldmd->neighborhoods[i].entries[j].value;
273
35.1k
                v->ht->config.ht_free_fn((HT_VALUE *)v);
274
35.1k
                free_value(v);
275
35.1k
            }
276
1.34M
        }
277
336k
    }
278
279
28.9k
    OPENSSL_free(oldmd->neighborhood_ptr_to_free);
280
28.9k
    OPENSSL_free(oldmd);
281
28.9k
}
282
283
static int ossl_ht_flush_internal(HT *h)
284
157
{
285
157
    struct ht_mutable_data_st *newmd = NULL;
286
157
    struct ht_mutable_data_st *oldmd = NULL;
287
288
157
    newmd = OPENSSL_zalloc(sizeof(*newmd));
289
157
    if (newmd == NULL)
290
0
        return 0;
291
292
157
    newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
293
157
                                                       &newmd->neighborhood_ptr_to_free);
294
157
    if (newmd->neighborhoods == NULL) {
295
0
        OPENSSL_free(newmd);
296
0
        return 0;
297
0
    }
298
299
157
    newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
300
301
    /* Swap the old and new mutable data sets */
302
157
    oldmd = ossl_rcu_deref(&h->md);
303
157
    ossl_rcu_assign_ptr(&h->md, &newmd);
304
305
    /* Set the number of entries to 0 */
306
157
    h->wpd.value_count = 0;
307
157
    h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
308
309
157
    ossl_rcu_call(h->lock, free_oldmd, oldmd);
310
157
    h->wpd.need_sync = 1;
311
157
    return 1;
312
157
}
313
314
int ossl_ht_flush(HT *h)
315
4
{
316
4
    return ossl_ht_flush_internal(h);
317
4
}
318
319
void ossl_ht_free(HT *h)
320
154
{
321
154
    if (h == NULL)
322
0
        return;
323
324
154
    ossl_ht_write_lock(h);
325
154
    ossl_ht_flush_internal(h);
326
154
    ossl_ht_write_unlock(h);
327
    /* Freeing the lock does a final sync for us */
328
154
    CRYPTO_THREAD_lock_free(h->atomic_lock);
329
154
    ossl_rcu_lock_free(h->lock);
330
154
    OPENSSL_free(h->md->neighborhood_ptr_to_free);
331
154
    OPENSSL_free(h->md);
332
154
    OPENSSL_free(h);
333
154
    return;
334
154
}
335
336
size_t ossl_ht_count(HT *h)
337
0
{
338
0
    size_t count;
339
340
0
    count = h->wpd.value_count;
341
0
    return count;
342
0
}
343
344
void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
345
                           void *arg)
346
68
{
347
68
    size_t i, j;
348
68
    struct ht_mutable_data_st *md;
349
350
68
    md = ossl_rcu_deref(&h->md);
351
8.58k
    for (i = 0; i < md->neighborhood_mask + 1; i++) {
352
8.52k
        PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
353
42.5k
        for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
354
34.0k
            if (md->neighborhoods[i].entries[j].value != NULL) {
355
521
                if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
356
11
                    goto out;
357
521
            }
358
34.0k
        }
359
8.52k
    }
360
68
out:
361
68
    return;
362
68
}
363
364
HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
365
                                     int (*filter)(HT_VALUE *obj, void *arg),
366
                                     void *arg)
367
58
{
368
58
    struct ht_mutable_data_st *md;
369
58
    HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
370
58
                                         + (sizeof(HT_VALUE *) * max_len));
371
58
    size_t i, j;
372
58
    struct ht_internal_value_st *v;
373
374
58
    if (list == NULL)
375
0
        return NULL;
376
377
    /*
378
     * The list array lives just beyond the end of
379
     * the struct
380
     */
381
58
    list->list = (HT_VALUE **)(list + 1);
382
383
58
    md = ossl_rcu_deref(&h->md);
384
8.10k
    for (i = 0; i < md->neighborhood_mask + 1; i++) {
385
8.05k
        PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
386
40.2k
        for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
387
32.2k
            v = md->neighborhoods[i].entries[j].value;
388
32.2k
            if (v != NULL && filter((HT_VALUE *)v, arg)) {
389
7
                list->list[list->list_len++] = (HT_VALUE *)v;
390
7
                if (list->list_len == max_len)
391
7
                    goto out;
392
7
            }
393
32.2k
        }
394
8.05k
    }
395
58
out:
396
58
    return list;
397
58
}
398
399
void ossl_ht_value_list_free(HT_VALUE_LIST *list)
400
58
{
401
58
    OPENSSL_free(list);
402
58
}
403
404
static int compare_hash(uint64_t hash1, uint64_t hash2)
405
33.8M
{
406
33.8M
    return (hash1 == hash2);
407
33.8M
}
408
409
static void free_old_neigh_table(void *arg)
410
17
{
411
17
    struct ht_mutable_data_st *oldmd = arg;
412
413
17
    OPENSSL_free(oldmd->neighborhood_ptr_to_free);
414
17
    OPENSSL_free(oldmd);
415
17
}
416
417
/*
418
 * Increase hash table bucket list
419
 * must be called with write_lock held
420
 */
421
static int grow_hashtable(HT *h, size_t oldsize)
422
13
{
423
13
    struct ht_mutable_data_st *newmd;
424
13
    struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
425
13
    int rc = 0;
426
13
    uint64_t oldi, oldj, newi, newj;
427
13
    uint64_t oldhash;
428
13
    struct ht_internal_value_st *oldv;
429
13
    int rehashed;
430
13
    size_t newsize = oldsize * 2;
431
432
13
    if (h->config.lockless_reads)
433
0
        goto out;
434
435
13
    if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL)
436
0
        goto out;
437
438
    /* bucket list is always a power of 2 */
439
13
    newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
440
13
                                                       &newmd->neighborhood_ptr_to_free);
441
13
    if (newmd->neighborhoods == NULL)
442
0
        goto out_free;
443
444
    /* being a power of 2 makes for easy mask computation */
445
13
    newmd->neighborhood_mask = (newsize - 1);
446
447
    /*
448
     * Now we need to start rehashing entries
449
     * Note we don't need to use atomics here as the new
450
     * mutable data hasn't been published
451
     */
452
1.11k
    for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
453
1.10k
        PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
454
5.52k
        for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
455
4.41k
            oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
456
4.41k
            if (oldv == NULL)
457
4.31k
                continue;
458
100
            oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
459
100
            newi = oldhash & newmd->neighborhood_mask;
460
100
            rehashed = 0;
461
160
            for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
462
160
                if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
463
100
                    newmd->neighborhoods[newi].entries[newj].value = oldv;
464
100
                    newmd->neighborhoods[newi].entries[newj].hash = oldhash;
465
100
                    rehashed = 1;
466
100
                    break;
467
100
                }
468
160
            }
469
100
            if (rehashed == 0) {
470
                /* we ran out of space in a neighborhood, grow again */
471
0
                OPENSSL_free(newmd->neighborhoods);
472
0
                OPENSSL_free(newmd);
473
0
                return grow_hashtable(h, newsize);
474
0
            }
475
100
        }
476
1.10k
    }
477
    /*
478
     * Now that our entries are all hashed into the new bucket list
479
     * update our bucket_len and target_max_load
480
     */
481
13
    h->wpd.neighborhood_len = newsize;
482
483
    /*
484
     * Now we replace the old mutable data with the new
485
     */
486
13
    ossl_rcu_assign_ptr(&h->md, &newmd);
487
13
    ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
488
13
    h->wpd.need_sync = 1;
489
    /*
490
     * And we're done
491
     */
492
13
    rc = 1;
493
494
13
out:
495
13
    return rc;
496
0
out_free:
497
0
    OPENSSL_free(newmd->neighborhoods);
498
0
    OPENSSL_free(newmd);
499
0
    goto out;
500
13
}
501
502
static void free_old_ht_value(void *arg)
503
8
{
504
8
    HT_VALUE *h = (HT_VALUE *)arg;
505
506
    /*
507
     * Note, this is only called on replacement,
508
     * the caller is responsible for freeing the
509
     * held data, we just need to free the wrapping
510
     * struct here
511
     */
512
8
    OPENSSL_free(h);
513
8
}
514
515
static ossl_inline int match_key(HT_KEY *a, HT_KEY *b)
516
25.9M
{
517
    /*
518
     * keys match if they are both present, the same size
519
     * and compare equal in memory
520
     */
521
25.9M
    PREFETCH(a->keybuf);
522
25.9M
    PREFETCH(b->keybuf);
523
25.9M
    if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize)
524
25.9M
        return !memcmp(a->keybuf, b->keybuf, a->keysize);
525
526
2.25k
    return 1;
527
25.9M
}
528
529
static int ossl_ht_insert_locked(HT *h, uint64_t hash,
530
                                 struct ht_internal_value_st *newval,
531
                                 HT_VALUE **olddata)
532
28.1k
{
533
28.1k
    struct ht_mutable_data_st *md = h->md;
534
28.1k
    uint64_t neigh_idx_start = hash & md->neighborhood_mask;
535
28.1k
    uint64_t neigh_idx = neigh_idx_start;
536
28.1k
    size_t j;
537
28.1k
    uint64_t ihash;
538
28.1k
    HT_VALUE *ival;
539
28.1k
    size_t empty_idx = SIZE_MAX;
540
28.1k
    int lockless_reads = h->config.lockless_reads;
541
542
28.3k
    do {
543
28.3k
        PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
544
545
42.8k
        for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
546
42.5k
            ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
547
42.5k
            if (ival == NULL) {
548
28.2k
                empty_idx = j;
549
                /* lockless_reads implies no deletion, we can break out */
550
28.2k
                if (lockless_reads)
551
28.0k
                    goto not_found;
552
212
                continue;
553
28.2k
            }
554
14.2k
            if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
555
14.2k
                                    &ihash, h->atomic_lock))
556
0
                return 0;
557
14.2k
            if (compare_hash(hash, ihash) && match_key(&newval->value.key,
558
17
                                                       &ival->key)) {
559
17
                if (olddata == NULL) {
560
                    /* This would insert a duplicate -> fail */
561
11
                    return 0;
562
11
                }
563
                /* Do a replacement */
564
6
                if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
565
6
                                         hash, h->atomic_lock))
566
0
                    return 0;
567
6
                *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
568
6
                ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
569
6
                                    &newval);
570
6
                ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
571
6
                h->wpd.need_sync = 1;
572
6
                return 1;
573
6
            }
574
14.2k
        }
575
340
        if (!lockless_reads)
576
77
            break;
577
        /* Continue search in subsequent neighborhoods */
578
263
        neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
579
263
    } while (neigh_idx != neigh_idx_start);
580
581
28.0k
 not_found:
582
    /* If we get to here, its just an insert */
583
28.0k
    if (empty_idx == SIZE_MAX)
584
13
        return -1; /* out of space */
585
28.0k
    if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
586
28.0k
                             hash, h->atomic_lock))
587
0
        return 0;
588
28.0k
    h->wpd.value_count++;
589
28.0k
    ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
590
28.0k
                        &newval);
591
28.0k
    return 1;
592
28.0k
}
593
594
static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
595
                                                    void *data,
596
                                                    uintptr_t *type)
597
40.1k
{
598
40.1k
    struct ht_internal_value_st *tmp;
599
40.1k
    size_t nvsize = sizeof(*tmp);
600
601
40.1k
    if (h->config.collision_check == 1)
602
38.7k
        nvsize += key->keysize;
603
604
40.1k
    tmp = OPENSSL_malloc(nvsize);
605
606
40.1k
    if (tmp == NULL)
607
0
        return NULL;
608
609
40.1k
    tmp->ht = h;
610
40.1k
    tmp->value.value = data;
611
40.1k
    tmp->value.type_id = type;
612
40.1k
    tmp->value.key.keybuf = NULL;
613
40.1k
    if (h->config.collision_check) {
614
38.7k
        tmp->value.key.keybuf = (uint8_t *)(tmp + 1);
615
38.7k
        tmp->value.key.keysize = key->keysize;
616
38.7k
        memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize);
617
38.7k
    }
618
619
620
40.1k
    return tmp;
621
40.1k
}
622
623
static void free_value(struct ht_internal_value_st *v)
624
35.2k
{
625
35.2k
    OPENSSL_free(v);
626
35.2k
}
627
628
int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
629
40.1k
{
630
40.1k
    struct ht_internal_value_st *newval = NULL;
631
40.1k
    uint64_t hash;
632
40.1k
    int rc = 0;
633
40.1k
    int i;
634
635
40.1k
    if (data->value == NULL)
636
0
        goto out;
637
638
40.1k
    newval = alloc_new_value(h, key, data->value, data->type_id);
639
40.1k
    if (newval == NULL)
640
0
        goto out;
641
642
    /*
643
     * we have to take our lock here to prevent other changes
644
     * to the bucket list
645
     */
646
40.1k
    hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
647
648
40.1k
    for (i = 0;
649
40.1k
         (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1
650
17
         && i < 4;
651
40.1k
         ++i)
652
17
        if (!grow_hashtable(h, h->wpd.neighborhood_len)) {
653
0
            rc = -1;
654
0
            break;
655
0
        }
656
657
40.1k
    if (rc <= 0)
658
14
        free_value(newval);
659
660
40.1k
out:
661
40.1k
    return rc;
662
40.1k
}
663
664
HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
665
20.0M
{
666
20.0M
    struct ht_mutable_data_st *md;
667
20.0M
    uint64_t hash;
668
20.0M
    uint64_t neigh_idx_start;
669
20.0M
    uint64_t neigh_idx;
670
20.0M
    struct ht_internal_value_st *ival = NULL;
671
20.0M
    size_t j;
672
20.0M
    uint64_t ehash;
673
20.0M
    int lockless_reads = h->config.lockless_reads;
674
675
20.0M
    hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
676
677
20.0M
    md = ossl_rcu_deref(&h->md);
678
20.0M
    neigh_idx = neigh_idx_start = hash & md->neighborhood_mask;
679
20.0M
    do {
680
20.0M
        PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
681
25.5M
        for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
682
25.5M
            ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
683
25.5M
            if (ival == NULL) {
684
1.90M
                if (lockless_reads)
685
                    /* lockless_reads implies no deletion, we can break out */
686
1.90M
                    return NULL;
687
119
                continue;
688
1.90M
            }
689
23.6M
            if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
690
23.6M
                                    &ehash, h->atomic_lock))
691
0
                return NULL;
692
23.6M
            if (compare_hash(hash, ehash) && match_key(&ival->value.key, key))
693
18.1M
                return (HT_VALUE *)ival;
694
23.6M
        }
695
4.08k
        if (!lockless_reads)
696
39
            break;
697
        /* Continue search in subsequent neighborhoods */
698
4.04k
        neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
699
4.04k
    } while (neigh_idx != neigh_idx_start);
700
701
39
    return NULL;
702
20.0M
}
703
704
static void free_old_entry(void *arg)
705
12
{
706
12
    struct ht_internal_value_st *v = arg;
707
708
12
    v->ht->config.ht_free_fn((HT_VALUE *)v);
709
12
    free_value(v);
710
12
}
711
712
int ossl_ht_delete(HT *h, HT_KEY *key)
713
50
{
714
50
    uint64_t hash;
715
50
    uint64_t neigh_idx;
716
50
    size_t j;
717
50
    struct ht_internal_value_st *v = NULL;
718
50
    HT_VALUE *nv = NULL;
719
50
    int rc = 0;
720
721
50
    if (h->config.lockless_reads)
722
0
        return 0;
723
724
50
    hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
725
726
50
    neigh_idx = hash & h->md->neighborhood_mask;
727
50
    PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
728
228
    for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
729
188
        v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
730
188
        if (v == NULL)
731
132
            continue;
732
56
        if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)
733
10
            && match_key(key, &v->value.key)) {
734
10
            if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
735
10
                                     0, h->atomic_lock))
736
0
                break;
737
10
            h->wpd.value_count--;
738
10
            ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
739
10
                                &nv);
740
10
            rc = 1;
741
10
            break;
742
10
        }
743
56
    }
744
50
    if (rc == 1) {
745
10
        ossl_rcu_call(h->lock, free_old_entry, v);
746
10
        h->wpd.need_sync = 1;
747
10
    }
748
50
    return rc;
749
50
}