/src/openssl/crypto/hashtable/hashtable.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | * |
9 | | * |
10 | | * |
11 | | * Notes On hash table design and layout |
12 | | * This hashtable uses a hopscotch algorithm to do indexing. The data structure |
13 | | * looks as follows: |
14 | | * |
15 | | * hash +--------------+ |
16 | | * value+------->+ HT_VALUE | |
17 | | * + +--------------+ |
18 | | * +-------+ |
19 | | * | | |
20 | | * +---------------------------------------------------------+ |
21 | | * | | | | | | |
22 | | * | entry | entry | entry | entry | | |
23 | | * | | | | | | |
24 | | * +---------------------------------------------------------+ |
25 | | * | | | |
26 | | * | | | |
27 | | * +---------------------------------------------------------+ |
28 | | * | + + + |
29 | | * | neighborhood[0] neighborhood[1] | |
30 | | * | | |
31 | | * | | |
32 | | * +---------------------------------------------------------+ |
33 | | * | |
34 | | * + |
35 | | * neighborhoods |
36 | | * |
37 | | * On lookup/insert/delete, the items key is hashed to a 64 bit value |
38 | | * and the result is masked to provide an index into the neighborhoods |
39 | | * table. Once a neighborhood is determined, an in-order search is done |
40 | | * of the elements in the neighborhood indexes entries for a matching hash |
41 | | * value, if found, the corresponding HT_VALUE is used for the respective |
42 | | * operation. The number of entries in a neighborhood is determined at build |
43 | | * time based on the cacheline size of the target CPU. The intent is for a |
44 | | * neighborhood to have all entries in the neighborhood fit into a single cache |
45 | | * line to speed up lookups. If all entries in a neighborhood are in use at the |
46 | | * time of an insert, the table is expanded and rehashed. |
47 | | * |
48 | | * Lockless reads hash table is based on the same design but does not |
49 | | * allow growing and deletion. Thus subsequent neighborhoods are always |
50 | | * searched for a match until an empty entry is found. |
51 | | */ |
52 | | |
53 | | #include <string.h> |
54 | | #include <internal/rcu.h> |
55 | | #include <internal/hashtable.h> |
56 | | #include <internal/hashfunc.h> |
57 | | #include <openssl/rand.h> |
58 | | |
59 | | /* |
60 | | * gcc defines __SANITIZE_THREAD__ |
61 | | * but clang uses the feature attributes api |
62 | | * map the latter to the former |
63 | | */ |
64 | | #if defined(__clang__) && defined(__has_feature) |
65 | | # if __has_feature(thread_sanitizer) |
66 | | # define __SANITIZE_THREADS__ |
67 | | # endif |
68 | | #endif |
69 | | |
70 | | #ifdef __SANITIZE_THREADS__ |
71 | | # include <sanitizer/tsan_interface.h> |
72 | | #endif |
73 | | |
74 | | #include "internal/numbers.h" |
75 | | /* |
76 | | * When we do a lookup/insert/delete, there is a high likelihood |
77 | | * that we will iterate over at least part of the neighborhood list |
78 | | * As such, because we design a neighborhood entry to fit into a single |
79 | | * cache line it is advantageous, when supported to fetch the entire |
80 | | * structure for faster lookups |
81 | | */ |
82 | | #if defined(__GNUC__) || defined(__CLANG__) |
83 | 2.63M | # define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries) |
84 | 4.76M | # define PREFETCH(x) __builtin_prefetch(x) |
85 | | #else |
86 | | # define PREFETCH_NEIGHBORHOOD(x) |
87 | | # define PREFETCH(x) |
88 | | #endif |
89 | | |
90 | | /* |
91 | | * Define our neighborhood list length |
92 | | * Note: It should always be a power of 2 |
93 | | */ |
94 | 6 | #define DEFAULT_NEIGH_LEN_LOG 4 |
95 | 6 | #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG) |
96 | | |
97 | | /* |
98 | | * For now assume cache line size is 64 bytes |
99 | | */ |
100 | 3.02M | #define CACHE_LINE_BYTES 64 |
101 | | #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES |
102 | | |
103 | 3.02M | #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st)) |
104 | | /* |
105 | | * Defines our chains of values |
106 | | */ |
107 | | struct ht_internal_value_st { |
108 | | HT_VALUE value; |
109 | | HT *ht; |
110 | | }; |
111 | | |
112 | | struct ht_neighborhood_entry_st { |
113 | | uint64_t hash; |
114 | | struct ht_internal_value_st *value; |
115 | | }; |
116 | | |
117 | | struct ht_neighborhood_st { |
118 | | struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN]; |
119 | | }; |
120 | | |
121 | | /* |
122 | | * Updates to data in this struct |
123 | | * require an rcu sync after modification |
124 | | * prior to free |
125 | | */ |
126 | | struct ht_mutable_data_st { |
127 | | struct ht_neighborhood_st *neighborhoods; |
128 | | void *neighborhood_ptr_to_free; |
129 | | uint64_t neighborhood_mask; |
130 | | }; |
131 | | |
132 | | /* |
133 | | * Private data may be updated on the write |
134 | | * side only, and so do not require rcu sync |
135 | | */ |
136 | | struct ht_write_private_data_st { |
137 | | size_t neighborhood_len; |
138 | | size_t value_count; |
139 | | int need_sync; |
140 | | }; |
141 | | |
142 | | struct ht_internal_st { |
143 | | HT_CONFIG config; |
144 | | CRYPTO_RCU_LOCK *lock; |
145 | | CRYPTO_RWLOCK *atomic_lock; |
146 | | struct ht_mutable_data_st *md; |
147 | | struct ht_write_private_data_st wpd; |
148 | | }; |
149 | | |
150 | | static void free_value(struct ht_internal_value_st *v); |
151 | | |
152 | | static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len, |
153 | | void **freeptr) |
154 | 6 | { |
155 | 6 | struct ht_neighborhood_st *ret; |
156 | | |
157 | 6 | ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len, |
158 | 6 | CACHE_LINE_BYTES, freeptr); |
159 | | |
160 | | /* fall back to regular malloc */ |
161 | 6 | if (ret == NULL) { |
162 | 0 | ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len); |
163 | 0 | if (ret == NULL) |
164 | 0 | return NULL; |
165 | 0 | } |
166 | 6 | memset(ret, 0, sizeof(struct ht_neighborhood_st) * len); |
167 | 6 | return ret; |
168 | 6 | } |
169 | | |
170 | | static void internal_free_nop(HT_VALUE *v) |
171 | 619 | { |
172 | 619 | return; |
173 | 619 | } |
174 | | |
175 | | HT *ossl_ht_new(const HT_CONFIG *conf) |
176 | 4 | { |
177 | 4 | HT *new = OPENSSL_zalloc(sizeof(*new)); |
178 | | |
179 | 4 | if (new == NULL) |
180 | 0 | return NULL; |
181 | | |
182 | 4 | new->atomic_lock = CRYPTO_THREAD_lock_new(); |
183 | 4 | if (new->atomic_lock == NULL) |
184 | 0 | goto err; |
185 | | |
186 | 4 | memcpy(&new->config, conf, sizeof(*conf)); |
187 | | |
188 | 4 | if (new->config.init_neighborhoods != 0) { |
189 | 4 | new->wpd.neighborhood_len = new->config.init_neighborhoods; |
190 | | /* round up to the next power of 2 */ |
191 | 4 | new->wpd.neighborhood_len--; |
192 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1; |
193 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2; |
194 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4; |
195 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8; |
196 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16; |
197 | 4 | new->wpd.neighborhood_len++; |
198 | 4 | } else { |
199 | 0 | new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
200 | 0 | } |
201 | | |
202 | 4 | if (new->config.ht_free_fn == NULL) |
203 | 4 | new->config.ht_free_fn = internal_free_nop; |
204 | | |
205 | 4 | new->md = OPENSSL_zalloc(sizeof(*new->md)); |
206 | 4 | if (new->md == NULL) |
207 | 0 | goto err; |
208 | | |
209 | 4 | new->md->neighborhoods = |
210 | 4 | alloc_new_neighborhood_list(new->wpd.neighborhood_len, |
211 | 4 | &new->md->neighborhood_ptr_to_free); |
212 | 4 | if (new->md->neighborhoods == NULL) |
213 | 0 | goto err; |
214 | 4 | new->md->neighborhood_mask = new->wpd.neighborhood_len - 1; |
215 | | |
216 | 4 | new->lock = ossl_rcu_lock_new(1, conf->ctx); |
217 | 4 | if (new->lock == NULL) |
218 | 0 | goto err; |
219 | | |
220 | 4 | if (new->config.ht_hash_fn == NULL) |
221 | 4 | new->config.ht_hash_fn = ossl_fnv1a_hash; |
222 | | |
223 | 4 | return new; |
224 | | |
225 | 0 | err: |
226 | 0 | CRYPTO_THREAD_lock_free(new->atomic_lock); |
227 | 0 | ossl_rcu_lock_free(new->lock); |
228 | 0 | if (new->md != NULL) |
229 | 0 | OPENSSL_free(new->md->neighborhood_ptr_to_free); |
230 | 0 | OPENSSL_free(new->md); |
231 | 0 | OPENSSL_free(new); |
232 | 0 | return NULL; |
233 | 4 | } |
234 | | |
235 | | void ossl_ht_read_lock(HT *htable) |
236 | 0 | { |
237 | 0 | ossl_rcu_read_lock(htable->lock); |
238 | 0 | } |
239 | | |
240 | | void ossl_ht_read_unlock(HT *htable) |
241 | 0 | { |
242 | 0 | ossl_rcu_read_unlock(htable->lock); |
243 | 0 | } |
244 | | |
245 | | void ossl_ht_write_lock(HT *htable) |
246 | 2 | { |
247 | 2 | ossl_rcu_write_lock(htable->lock); |
248 | 2 | htable->wpd.need_sync = 0; |
249 | 2 | } |
250 | | |
251 | | void ossl_ht_write_unlock(HT *htable) |
252 | 2 | { |
253 | 2 | int need_sync = htable->wpd.need_sync; |
254 | | |
255 | 2 | htable->wpd.need_sync = 0; |
256 | 2 | ossl_rcu_write_unlock(htable->lock); |
257 | 2 | if (need_sync) |
258 | 2 | ossl_synchronize_rcu(htable->lock); |
259 | 2 | } |
260 | | |
261 | | static void free_oldmd(void *arg) |
262 | 2 | { |
263 | 2 | struct ht_mutable_data_st *oldmd = arg; |
264 | 2 | size_t i, j; |
265 | 2 | size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1; |
266 | 2 | struct ht_internal_value_st *v; |
267 | | |
268 | 4.09k | for (i = 0; i < neighborhood_len; i++) { |
269 | 4.09k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]); |
270 | 20.4k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
271 | 16.3k | if (oldmd->neighborhoods[i].entries[j].value != NULL) { |
272 | 619 | v = oldmd->neighborhoods[i].entries[j].value; |
273 | 619 | v->ht->config.ht_free_fn((HT_VALUE *)v); |
274 | 619 | free_value(v); |
275 | 619 | } |
276 | 16.3k | } |
277 | 4.09k | } |
278 | | |
279 | 2 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
280 | 2 | OPENSSL_free(oldmd); |
281 | 2 | } |
282 | | |
283 | | static int ossl_ht_flush_internal(HT *h) |
284 | 2 | { |
285 | 2 | struct ht_mutable_data_st *newmd = NULL; |
286 | 2 | struct ht_mutable_data_st *oldmd = NULL; |
287 | | |
288 | 2 | newmd = OPENSSL_zalloc(sizeof(*newmd)); |
289 | 2 | if (newmd == NULL) |
290 | 0 | return 0; |
291 | | |
292 | 2 | newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN, |
293 | 2 | &newmd->neighborhood_ptr_to_free); |
294 | 2 | if (newmd->neighborhoods == NULL) { |
295 | 0 | OPENSSL_free(newmd); |
296 | 0 | return 0; |
297 | 0 | } |
298 | | |
299 | 2 | newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1; |
300 | | |
301 | | /* Swap the old and new mutable data sets */ |
302 | 2 | oldmd = ossl_rcu_deref(&h->md); |
303 | 2 | ossl_rcu_assign_ptr(&h->md, &newmd); |
304 | | |
305 | | /* Set the number of entries to 0 */ |
306 | 2 | h->wpd.value_count = 0; |
307 | 2 | h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
308 | | |
309 | 2 | ossl_rcu_call(h->lock, free_oldmd, oldmd); |
310 | 2 | h->wpd.need_sync = 1; |
311 | 2 | return 1; |
312 | 2 | } |
313 | | |
314 | | int ossl_ht_flush(HT *h) |
315 | 0 | { |
316 | 0 | return ossl_ht_flush_internal(h); |
317 | 0 | } |
318 | | |
319 | | void ossl_ht_free(HT *h) |
320 | 2 | { |
321 | 2 | if (h == NULL) |
322 | 0 | return; |
323 | | |
324 | 2 | ossl_ht_write_lock(h); |
325 | 2 | ossl_ht_flush_internal(h); |
326 | 2 | ossl_ht_write_unlock(h); |
327 | | /* Freeing the lock does a final sync for us */ |
328 | 2 | CRYPTO_THREAD_lock_free(h->atomic_lock); |
329 | 2 | ossl_rcu_lock_free(h->lock); |
330 | 2 | OPENSSL_free(h->md->neighborhood_ptr_to_free); |
331 | 2 | OPENSSL_free(h->md); |
332 | 2 | OPENSSL_free(h); |
333 | 2 | return; |
334 | 2 | } |
335 | | |
336 | | size_t ossl_ht_count(HT *h) |
337 | 0 | { |
338 | 0 | size_t count; |
339 | |
|
340 | 0 | count = h->wpd.value_count; |
341 | 0 | return count; |
342 | 0 | } |
343 | | |
344 | | void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg), |
345 | | void *arg) |
346 | 0 | { |
347 | 0 | size_t i, j; |
348 | 0 | struct ht_mutable_data_st *md; |
349 | |
|
350 | 0 | md = ossl_rcu_deref(&h->md); |
351 | 0 | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
352 | 0 | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
353 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
354 | 0 | if (md->neighborhoods[i].entries[j].value != NULL) { |
355 | 0 | if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg)) |
356 | 0 | goto out; |
357 | 0 | } |
358 | 0 | } |
359 | 0 | } |
360 | 0 | out: |
361 | 0 | return; |
362 | 0 | } |
363 | | |
364 | | HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len, |
365 | | int (*filter)(HT_VALUE *obj, void *arg), |
366 | | void *arg) |
367 | 0 | { |
368 | 0 | struct ht_mutable_data_st *md; |
369 | 0 | HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST) |
370 | 0 | + (sizeof(HT_VALUE *) * max_len)); |
371 | 0 | size_t i, j; |
372 | 0 | struct ht_internal_value_st *v; |
373 | |
|
374 | 0 | if (list == NULL) |
375 | 0 | return NULL; |
376 | | |
377 | | /* |
378 | | * The list array lives just beyond the end of |
379 | | * the struct |
380 | | */ |
381 | 0 | list->list = (HT_VALUE **)(list + 1); |
382 | |
|
383 | 0 | md = ossl_rcu_deref(&h->md); |
384 | 0 | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
385 | 0 | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]); |
386 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
387 | 0 | v = md->neighborhoods[i].entries[j].value; |
388 | 0 | if (v != NULL && filter((HT_VALUE *)v, arg)) { |
389 | 0 | list->list[list->list_len++] = (HT_VALUE *)v; |
390 | 0 | if (list->list_len == max_len) |
391 | 0 | goto out; |
392 | 0 | } |
393 | 0 | } |
394 | 0 | } |
395 | 0 | out: |
396 | 0 | return list; |
397 | 0 | } |
398 | | |
399 | | void ossl_ht_value_list_free(HT_VALUE_LIST *list) |
400 | 0 | { |
401 | 0 | OPENSSL_free(list); |
402 | 0 | } |
403 | | |
404 | | static int compare_hash(uint64_t hash1, uint64_t hash2) |
405 | 2.76M | { |
406 | 2.76M | return (hash1 == hash2); |
407 | 2.76M | } |
408 | | |
409 | | static void free_old_neigh_table(void *arg) |
410 | 0 | { |
411 | 0 | struct ht_mutable_data_st *oldmd = arg; |
412 | |
|
413 | 0 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
414 | 0 | OPENSSL_free(oldmd); |
415 | 0 | } |
416 | | |
417 | | /* |
418 | | * Increase hash table bucket list |
419 | | * must be called with write_lock held |
420 | | */ |
421 | | static int grow_hashtable(HT *h, size_t oldsize) |
422 | 0 | { |
423 | 0 | struct ht_mutable_data_st *newmd; |
424 | 0 | struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md); |
425 | 0 | int rc = 0; |
426 | 0 | uint64_t oldi, oldj, newi, newj; |
427 | 0 | uint64_t oldhash; |
428 | 0 | struct ht_internal_value_st *oldv; |
429 | 0 | int rehashed; |
430 | 0 | size_t newsize = oldsize * 2; |
431 | |
|
432 | 0 | if (h->config.lockless_reads) |
433 | 0 | goto out; |
434 | | |
435 | 0 | if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL) |
436 | 0 | goto out; |
437 | | |
438 | | /* bucket list is always a power of 2 */ |
439 | 0 | newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2, |
440 | 0 | &newmd->neighborhood_ptr_to_free); |
441 | 0 | if (newmd->neighborhoods == NULL) |
442 | 0 | goto out_free; |
443 | | |
444 | | /* being a power of 2 makes for easy mask computation */ |
445 | 0 | newmd->neighborhood_mask = (newsize - 1); |
446 | | |
447 | | /* |
448 | | * Now we need to start rehashing entries |
449 | | * Note we don't need to use atomics here as the new |
450 | | * mutable data hasn't been published |
451 | | */ |
452 | 0 | for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) { |
453 | 0 | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]); |
454 | 0 | for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) { |
455 | 0 | oldv = oldmd->neighborhoods[oldi].entries[oldj].value; |
456 | 0 | if (oldv == NULL) |
457 | 0 | continue; |
458 | 0 | oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash; |
459 | 0 | newi = oldhash & newmd->neighborhood_mask; |
460 | 0 | rehashed = 0; |
461 | 0 | for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) { |
462 | 0 | if (newmd->neighborhoods[newi].entries[newj].value == NULL) { |
463 | 0 | newmd->neighborhoods[newi].entries[newj].value = oldv; |
464 | 0 | newmd->neighborhoods[newi].entries[newj].hash = oldhash; |
465 | 0 | rehashed = 1; |
466 | 0 | break; |
467 | 0 | } |
468 | 0 | } |
469 | 0 | if (rehashed == 0) { |
470 | | /* we ran out of space in a neighborhood, grow again */ |
471 | 0 | OPENSSL_free(newmd->neighborhoods); |
472 | 0 | OPENSSL_free(newmd); |
473 | 0 | return grow_hashtable(h, newsize); |
474 | 0 | } |
475 | 0 | } |
476 | 0 | } |
477 | | /* |
478 | | * Now that our entries are all hashed into the new bucket list |
479 | | * update our bucket_len and target_max_load |
480 | | */ |
481 | 0 | h->wpd.neighborhood_len = newsize; |
482 | | |
483 | | /* |
484 | | * Now we replace the old mutable data with the new |
485 | | */ |
486 | 0 | ossl_rcu_assign_ptr(&h->md, &newmd); |
487 | 0 | ossl_rcu_call(h->lock, free_old_neigh_table, oldmd); |
488 | 0 | h->wpd.need_sync = 1; |
489 | | /* |
490 | | * And we're done |
491 | | */ |
492 | 0 | rc = 1; |
493 | |
|
494 | 0 | out: |
495 | 0 | return rc; |
496 | 0 | out_free: |
497 | 0 | OPENSSL_free(newmd->neighborhoods); |
498 | 0 | OPENSSL_free(newmd); |
499 | 0 | goto out; |
500 | 0 | } |
501 | | |
502 | | static void free_old_ht_value(void *arg) |
503 | 0 | { |
504 | 0 | HT_VALUE *h = (HT_VALUE *)arg; |
505 | | |
506 | | /* |
507 | | * Note, this is only called on replacement, |
508 | | * the caller is responsible for freeing the |
509 | | * held data, we just need to free the wrapping |
510 | | * struct here |
511 | | */ |
512 | 0 | OPENSSL_free(h); |
513 | 0 | } |
514 | | |
515 | | static ossl_inline int match_key(HT_KEY *a, HT_KEY *b) |
516 | 2.38M | { |
517 | | /* |
518 | | * keys match if they are both present, the same size |
519 | | * and compare equal in memory |
520 | | */ |
521 | 2.38M | PREFETCH(a->keybuf); |
522 | 2.38M | PREFETCH(b->keybuf); |
523 | 2.38M | if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize) |
524 | 2.38M | return !memcmp(a->keybuf, b->keybuf, a->keysize); |
525 | | |
526 | 0 | return 1; |
527 | 2.38M | } |
528 | | |
529 | | static int ossl_ht_insert_locked(HT *h, uint64_t hash, |
530 | | struct ht_internal_value_st *newval, |
531 | | HT_VALUE **olddata) |
532 | 619 | { |
533 | 619 | struct ht_mutable_data_st *md = h->md; |
534 | 619 | uint64_t neigh_idx_start = hash & md->neighborhood_mask; |
535 | 619 | uint64_t neigh_idx = neigh_idx_start; |
536 | 619 | size_t j; |
537 | 619 | uint64_t ihash; |
538 | 619 | HT_VALUE *ival; |
539 | 619 | size_t empty_idx = SIZE_MAX; |
540 | 619 | int lockless_reads = h->config.lockless_reads; |
541 | | |
542 | 619 | do { |
543 | 619 | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
544 | | |
545 | 710 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
546 | 710 | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
547 | 710 | if (ival == NULL) { |
548 | 619 | empty_idx = j; |
549 | | /* lockless_reads implies no deletion, we can break out */ |
550 | 619 | if (lockless_reads) |
551 | 619 | goto not_found; |
552 | 0 | continue; |
553 | 619 | } |
554 | 91 | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
555 | 91 | &ihash, h->atomic_lock)) |
556 | 0 | return 0; |
557 | 91 | if (compare_hash(hash, ihash) && match_key(&newval->value.key, |
558 | 0 | &ival->key)) { |
559 | 0 | if (olddata == NULL) { |
560 | | /* This would insert a duplicate -> fail */ |
561 | 0 | return 0; |
562 | 0 | } |
563 | | /* Do a replacement */ |
564 | 0 | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash, |
565 | 0 | hash, h->atomic_lock)) |
566 | 0 | return 0; |
567 | 0 | *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value; |
568 | 0 | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value, |
569 | 0 | &newval); |
570 | 0 | ossl_rcu_call(h->lock, free_old_ht_value, *olddata); |
571 | 0 | h->wpd.need_sync = 1; |
572 | 0 | return 1; |
573 | 0 | } |
574 | 91 | } |
575 | 0 | if (!lockless_reads) |
576 | 0 | break; |
577 | | /* Continue search in subsequent neighborhoods */ |
578 | 0 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
579 | 0 | } while (neigh_idx != neigh_idx_start); |
580 | | |
581 | 619 | not_found: |
582 | | /* If we get to here, its just an insert */ |
583 | 619 | if (empty_idx == SIZE_MAX) |
584 | 0 | return -1; /* out of space */ |
585 | 619 | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash, |
586 | 619 | hash, h->atomic_lock)) |
587 | 0 | return 0; |
588 | 619 | h->wpd.value_count++; |
589 | 619 | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value, |
590 | 619 | &newval); |
591 | 619 | return 1; |
592 | 619 | } |
593 | | |
594 | | static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key, |
595 | | void *data, |
596 | | uintptr_t *type) |
597 | 619 | { |
598 | 619 | struct ht_internal_value_st *tmp; |
599 | 619 | size_t nvsize = sizeof(*tmp); |
600 | | |
601 | 619 | if (h->config.collision_check == 1) |
602 | 619 | nvsize += key->keysize; |
603 | | |
604 | 619 | tmp = OPENSSL_malloc(nvsize); |
605 | | |
606 | 619 | if (tmp == NULL) |
607 | 0 | return NULL; |
608 | | |
609 | 619 | tmp->ht = h; |
610 | 619 | tmp->value.value = data; |
611 | 619 | tmp->value.type_id = type; |
612 | 619 | tmp->value.key.keybuf = NULL; |
613 | 619 | if (h->config.collision_check) { |
614 | 619 | tmp->value.key.keybuf = (uint8_t *)(tmp + 1); |
615 | 619 | tmp->value.key.keysize = key->keysize; |
616 | 619 | memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize); |
617 | 619 | } |
618 | | |
619 | | |
620 | 619 | return tmp; |
621 | 619 | } |
622 | | |
623 | | static void free_value(struct ht_internal_value_st *v) |
624 | 619 | { |
625 | 619 | OPENSSL_free(v); |
626 | 619 | } |
627 | | |
628 | | int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata) |
629 | 619 | { |
630 | 619 | struct ht_internal_value_st *newval = NULL; |
631 | 619 | uint64_t hash; |
632 | 619 | int rc = 0; |
633 | 619 | int i; |
634 | | |
635 | 619 | if (data->value == NULL) |
636 | 0 | goto out; |
637 | | |
638 | 619 | newval = alloc_new_value(h, key, data->value, data->type_id); |
639 | 619 | if (newval == NULL) |
640 | 0 | goto out; |
641 | | |
642 | | /* |
643 | | * we have to take our lock here to prevent other changes |
644 | | * to the bucket list |
645 | | */ |
646 | 619 | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
647 | | |
648 | 619 | for (i = 0; |
649 | 619 | (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1 |
650 | 619 | && i < 4; |
651 | 619 | ++i) |
652 | 0 | if (!grow_hashtable(h, h->wpd.neighborhood_len)) { |
653 | 0 | rc = -1; |
654 | 0 | break; |
655 | 0 | } |
656 | | |
657 | 619 | if (rc <= 0) |
658 | 0 | free_value(newval); |
659 | | |
660 | 619 | out: |
661 | 619 | return rc; |
662 | 619 | } |
663 | | |
664 | | HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key) |
665 | 2.62M | { |
666 | 2.62M | struct ht_mutable_data_st *md; |
667 | 2.62M | uint64_t hash; |
668 | 2.62M | uint64_t neigh_idx_start; |
669 | 2.62M | uint64_t neigh_idx; |
670 | 2.62M | struct ht_internal_value_st *ival = NULL; |
671 | 2.62M | size_t j; |
672 | 2.62M | uint64_t ehash; |
673 | 2.62M | int lockless_reads = h->config.lockless_reads; |
674 | | |
675 | 2.62M | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
676 | | |
677 | 2.62M | md = ossl_rcu_deref(&h->md); |
678 | 2.62M | neigh_idx = neigh_idx_start = hash & md->neighborhood_mask; |
679 | 2.62M | do { |
680 | 2.62M | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
681 | 3.00M | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
682 | 3.00M | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
683 | 3.00M | if (ival == NULL) { |
684 | 244k | if (lockless_reads) |
685 | | /* lockless_reads implies no deletion, we can break out */ |
686 | 244k | return NULL; |
687 | 0 | continue; |
688 | 244k | } |
689 | 2.76M | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
690 | 2.76M | &ehash, h->atomic_lock)) |
691 | 0 | return NULL; |
692 | 2.76M | if (compare_hash(hash, ehash) && match_key(&ival->value.key, key)) |
693 | 2.38M | return (HT_VALUE *)ival; |
694 | 2.76M | } |
695 | 1 | if (!lockless_reads) |
696 | 0 | break; |
697 | | /* Continue search in subsequent neighborhoods */ |
698 | 1 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
699 | 1 | } while (neigh_idx != neigh_idx_start); |
700 | | |
701 | 0 | return NULL; |
702 | 2.62M | } |
703 | | |
704 | | static void free_old_entry(void *arg) |
705 | 0 | { |
706 | 0 | struct ht_internal_value_st *v = arg; |
707 | |
|
708 | 0 | v->ht->config.ht_free_fn((HT_VALUE *)v); |
709 | 0 | free_value(v); |
710 | 0 | } |
711 | | |
712 | | int ossl_ht_delete(HT *h, HT_KEY *key) |
713 | 0 | { |
714 | 0 | uint64_t hash; |
715 | 0 | uint64_t neigh_idx; |
716 | 0 | size_t j; |
717 | 0 | struct ht_internal_value_st *v = NULL; |
718 | 0 | HT_VALUE *nv = NULL; |
719 | 0 | int rc = 0; |
720 | |
|
721 | 0 | if (h->config.lockless_reads) |
722 | 0 | return 0; |
723 | | |
724 | 0 | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
725 | |
|
726 | 0 | neigh_idx = hash & h->md->neighborhood_mask; |
727 | 0 | PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]); |
728 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
729 | 0 | v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value; |
730 | 0 | if (v == NULL) |
731 | 0 | continue; |
732 | 0 | if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash) |
733 | 0 | && match_key(key, &v->value.key)) { |
734 | 0 | if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash, |
735 | 0 | 0, h->atomic_lock)) |
736 | 0 | break; |
737 | 0 | h->wpd.value_count--; |
738 | 0 | ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value, |
739 | 0 | &nv); |
740 | 0 | rc = 1; |
741 | 0 | break; |
742 | 0 | } |
743 | 0 | } |
744 | 0 | if (rc == 1) { |
745 | 0 | ossl_rcu_call(h->lock, free_old_entry, v); |
746 | 0 | h->wpd.need_sync = 1; |
747 | 0 | } |
748 | 0 | return rc; |
749 | 0 | } |