/src/openssl/crypto/hashtable/hashtable.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | * |
9 | | * |
10 | | * |
11 | | * Notes On hash table design and layout |
12 | | * This hashtable uses a hopscotch algorithm to do indexing. The data structure |
13 | | * looks as follows: |
14 | | * |
15 | | * hash +--------------+ |
16 | | * value+------->+ HT_VALUE | |
17 | | * + +--------------+ |
18 | | * +-------+ |
19 | | * | | |
20 | | * +---------------------------------------------------------+ |
21 | | * | | | | | | |
22 | | * | entry | entry | entry | entry | | |
23 | | * | | | | | | |
24 | | * +---------------------------------------------------------+ |
25 | | * | | | |
26 | | * | | | |
27 | | * +---------------------------------------------------------+ |
28 | | * | + + + |
29 | | * | neighborhood[0] neighborhood[1] | |
30 | | * | | |
31 | | * | | |
32 | | * +---------------------------------------------------------+ |
33 | | * | |
34 | | * + |
35 | | * neighborhoods |
36 | | * |
37 | | * On lookup/insert/delete, the items key is hashed to a 64 bit value |
38 | | * and the result is masked to provide an index into the neighborhoods |
39 | | * table. Once a neighborhood is determined, an in-order search is done |
40 | | * of the elements in the neighborhood indexes entries for a matching hash |
41 | | * value, if found, the corresponding HT_VALUE is used for the respective |
42 | | * operation. The number of entries in a neighborhood is determined at build |
43 | | * time based on the cacheline size of the target CPU. The intent is for a |
44 | | * neighborhood to have all entries in the neighborhood fit into a single cache |
45 | | * line to speed up lookups. If all entries in a neighborhood are in use at the |
46 | | * time of an insert, the table is expanded and rehashed. |
47 | | * |
48 | | * Lockless reads hash table is based on the same design but does not |
49 | | * allow growing and deletion. Thus subsequent neighborhoods are always |
50 | | * searched for a match until an empty entry is found. |
51 | | */ |
52 | | |
53 | | #include <string.h> |
54 | | #include <internal/rcu.h> |
55 | | #include <internal/hashtable.h> |
56 | | #include <openssl/rand.h> |
57 | | |
58 | | /* |
59 | | * gcc defines __SANITIZE_THREAD__ |
60 | | * but clang uses the feature attributes api |
61 | | * map the latter to the former |
62 | | */ |
63 | | #if defined(__clang__) && defined(__has_feature) |
64 | | # if __has_feature(thread_sanitizer) |
65 | | # define __SANITIZE_THREADS__ |
66 | | # endif |
67 | | #endif |
68 | | |
69 | | #ifdef __SANITIZE_THREADS__ |
70 | | # include <sanitizer/tsan_interface.h> |
71 | | #endif |
72 | | |
73 | | #include "internal/numbers.h" |
74 | | /* |
75 | | * When we do a lookup/insert/delete, there is a high likelihood |
76 | | * that we will iterate over at least part of the neighborhood list |
77 | | * As such, because we design a neighborhood entry to fit into a single |
78 | | * cache line it is advantageous, when supported to fetch the entire |
79 | | * structure for faster lookups |
80 | | */ |
81 | | #if defined(__GNUC__) || defined(__CLANG__) |
82 | 47.4k | # define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries) |
83 | 67.6k | # define PREFETCH(x) __builtin_prefetch(x) |
84 | | #else |
85 | | # define PREFETCH_NEIGHBORHOOD(x) |
86 | | # define PREFETCH(x) |
87 | | #endif |
88 | | |
89 | | static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len) |
90 | 39.2k | { |
91 | 39.2k | uint64_t hash = 0xcbf29ce484222325ULL; |
92 | 39.2k | size_t i; |
93 | | |
94 | 2.54M | for (i = 0; i < len; i++) { |
95 | 2.50M | hash ^= key[i]; |
96 | 2.50M | hash *= 0x00000100000001B3ULL; |
97 | 2.50M | } |
98 | 39.2k | return hash; |
99 | 39.2k | } |
100 | | |
101 | | /* |
102 | | * Define our neighborhood list length |
103 | | * Note: It should always be a power of 2 |
104 | | */ |
105 | 12 | #define DEFAULT_NEIGH_LEN_LOG 4 |
106 | 12 | #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG) |
107 | | |
108 | | /* |
109 | | * For now assume cache line size is 64 bytes |
110 | | */ |
111 | 84.1k | #define CACHE_LINE_BYTES 64 |
112 | | #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES |
113 | | |
114 | 84.1k | #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st)) |
115 | | /* |
116 | | * Defines our chains of values |
117 | | */ |
118 | | struct ht_internal_value_st { |
119 | | HT_VALUE value; |
120 | | HT *ht; |
121 | | }; |
122 | | |
123 | | struct ht_neighborhood_entry_st { |
124 | | uint64_t hash; |
125 | | struct ht_internal_value_st *value; |
126 | | }; |
127 | | |
128 | | struct ht_neighborhood_st { |
129 | | struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN]; |
130 | | }; |
131 | | |
132 | | /* |
133 | | * Updates to data in this struct |
134 | | * require an rcu sync after modification |
135 | | * prior to free |
136 | | */ |
137 | | struct ht_mutable_data_st { |
138 | | struct ht_neighborhood_st *neighborhoods; |
139 | | void *neighborhood_ptr_to_free; |
140 | | uint64_t neighborhood_mask; |
141 | | }; |
142 | | |
143 | | /* |
144 | | * Private data may be updated on the write |
145 | | * side only, and so do not require rcu sync |
146 | | */ |
147 | | struct ht_write_private_data_st { |
148 | | size_t neighborhood_len; |
149 | | size_t value_count; |
150 | | int need_sync; |
151 | | }; |
152 | | |
153 | | struct ht_internal_st { |
154 | | HT_CONFIG config; |
155 | | CRYPTO_RCU_LOCK *lock; |
156 | | CRYPTO_RWLOCK *atomic_lock; |
157 | | struct ht_mutable_data_st *md; |
158 | | struct ht_write_private_data_st wpd; |
159 | | }; |
160 | | |
161 | | static void free_value(struct ht_internal_value_st *v); |
162 | | |
163 | | static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len, |
164 | | void **freeptr) |
165 | 8 | { |
166 | 8 | struct ht_neighborhood_st *ret; |
167 | | |
168 | 8 | ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len, |
169 | 8 | CACHE_LINE_BYTES, freeptr); |
170 | | |
171 | | /* fall back to regular malloc */ |
172 | 8 | if (ret == NULL) { |
173 | 0 | ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len); |
174 | 0 | if (ret == NULL) |
175 | 0 | return NULL; |
176 | 0 | } |
177 | 8 | memset(ret, 0, sizeof(struct ht_neighborhood_st) * len); |
178 | 8 | return ret; |
179 | 8 | } |
180 | | |
181 | | static void internal_free_nop(HT_VALUE *v) |
182 | 1.10k | { |
183 | 1.10k | return; |
184 | 1.10k | } |
185 | | |
186 | | HT *ossl_ht_new(const HT_CONFIG *conf) |
187 | 4 | { |
188 | 4 | HT *new = OPENSSL_zalloc(sizeof(*new)); |
189 | | |
190 | 4 | if (new == NULL) |
191 | 0 | return NULL; |
192 | | |
193 | 4 | new->atomic_lock = CRYPTO_THREAD_lock_new(); |
194 | 4 | if (new->atomic_lock == NULL) |
195 | 0 | goto err; |
196 | | |
197 | 4 | memcpy(&new->config, conf, sizeof(*conf)); |
198 | | |
199 | 4 | if (new->config.init_neighborhoods != 0) { |
200 | 4 | new->wpd.neighborhood_len = new->config.init_neighborhoods; |
201 | | /* round up to the next power of 2 */ |
202 | 4 | new->wpd.neighborhood_len--; |
203 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1; |
204 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2; |
205 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4; |
206 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8; |
207 | 4 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16; |
208 | 4 | new->wpd.neighborhood_len++; |
209 | 4 | } else { |
210 | 0 | new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
211 | 0 | } |
212 | | |
213 | 4 | if (new->config.ht_free_fn == NULL) |
214 | 4 | new->config.ht_free_fn = internal_free_nop; |
215 | | |
216 | 4 | new->md = OPENSSL_zalloc(sizeof(*new->md)); |
217 | 4 | if (new->md == NULL) |
218 | 0 | goto err; |
219 | | |
220 | 4 | new->md->neighborhoods = |
221 | 4 | alloc_new_neighborhood_list(new->wpd.neighborhood_len, |
222 | 4 | &new->md->neighborhood_ptr_to_free); |
223 | 4 | if (new->md->neighborhoods == NULL) |
224 | 0 | goto err; |
225 | 4 | new->md->neighborhood_mask = new->wpd.neighborhood_len - 1; |
226 | | |
227 | 4 | new->lock = ossl_rcu_lock_new(1, conf->ctx); |
228 | 4 | if (new->lock == NULL) |
229 | 0 | goto err; |
230 | | |
231 | 4 | if (new->config.ht_hash_fn == NULL) |
232 | 4 | new->config.ht_hash_fn = fnv1a_hash; |
233 | | |
234 | 4 | return new; |
235 | | |
236 | 0 | err: |
237 | 0 | CRYPTO_THREAD_lock_free(new->atomic_lock); |
238 | 0 | ossl_rcu_lock_free(new->lock); |
239 | 0 | if (new->md != NULL) |
240 | 0 | OPENSSL_free(new->md->neighborhood_ptr_to_free); |
241 | 0 | OPENSSL_free(new->md); |
242 | 0 | OPENSSL_free(new); |
243 | 0 | return NULL; |
244 | 4 | } |
245 | | |
246 | | void ossl_ht_read_lock(HT *htable) |
247 | 0 | { |
248 | 0 | ossl_rcu_read_lock(htable->lock); |
249 | 0 | } |
250 | | |
251 | | void ossl_ht_read_unlock(HT *htable) |
252 | 0 | { |
253 | 0 | ossl_rcu_read_unlock(htable->lock); |
254 | 0 | } |
255 | | |
256 | | void ossl_ht_write_lock(HT *htable) |
257 | 4 | { |
258 | 4 | ossl_rcu_write_lock(htable->lock); |
259 | 4 | htable->wpd.need_sync = 0; |
260 | 4 | } |
261 | | |
262 | | void ossl_ht_write_unlock(HT *htable) |
263 | 4 | { |
264 | 4 | int need_sync = htable->wpd.need_sync; |
265 | | |
266 | 4 | htable->wpd.need_sync = 0; |
267 | 4 | ossl_rcu_write_unlock(htable->lock); |
268 | 4 | if (need_sync) |
269 | 4 | ossl_synchronize_rcu(htable->lock); |
270 | 4 | } |
271 | | |
272 | | static void free_oldmd(void *arg) |
273 | 4 | { |
274 | 4 | struct ht_mutable_data_st *oldmd = arg; |
275 | 4 | size_t i, j; |
276 | 4 | size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1; |
277 | 4 | struct ht_internal_value_st *v; |
278 | | |
279 | 8.19k | for (i = 0; i < neighborhood_len; i++) { |
280 | 8.19k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]); |
281 | 40.9k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
282 | 32.7k | if (oldmd->neighborhoods[i].entries[j].value != NULL) { |
283 | 1.10k | v = oldmd->neighborhoods[i].entries[j].value; |
284 | 1.10k | v->ht->config.ht_free_fn((HT_VALUE *)v); |
285 | 1.10k | free_value(v); |
286 | 1.10k | } |
287 | 32.7k | } |
288 | 8.19k | } |
289 | | |
290 | 4 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
291 | 4 | OPENSSL_free(oldmd); |
292 | 4 | } |
293 | | |
294 | | static int ossl_ht_flush_internal(HT *h) |
295 | 4 | { |
296 | 4 | struct ht_mutable_data_st *newmd = NULL; |
297 | 4 | struct ht_mutable_data_st *oldmd = NULL; |
298 | | |
299 | 4 | newmd = OPENSSL_zalloc(sizeof(*newmd)); |
300 | 4 | if (newmd == NULL) |
301 | 0 | return 0; |
302 | | |
303 | 4 | newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN, |
304 | 4 | &newmd->neighborhood_ptr_to_free); |
305 | 4 | if (newmd->neighborhoods == NULL) { |
306 | 0 | OPENSSL_free(newmd); |
307 | 0 | return 0; |
308 | 0 | } |
309 | | |
310 | 4 | newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1; |
311 | | |
312 | | /* Swap the old and new mutable data sets */ |
313 | 4 | oldmd = ossl_rcu_deref(&h->md); |
314 | 4 | ossl_rcu_assign_ptr(&h->md, &newmd); |
315 | | |
316 | | /* Set the number of entries to 0 */ |
317 | 4 | h->wpd.value_count = 0; |
318 | 4 | h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
319 | | |
320 | 4 | ossl_rcu_call(h->lock, free_oldmd, oldmd); |
321 | 4 | h->wpd.need_sync = 1; |
322 | 4 | return 1; |
323 | 4 | } |
324 | | |
325 | | int ossl_ht_flush(HT *h) |
326 | 0 | { |
327 | 0 | return ossl_ht_flush_internal(h); |
328 | 0 | } |
329 | | |
330 | | void ossl_ht_free(HT *h) |
331 | 4 | { |
332 | 4 | if (h == NULL) |
333 | 0 | return; |
334 | | |
335 | 4 | ossl_ht_write_lock(h); |
336 | 4 | ossl_ht_flush_internal(h); |
337 | 4 | ossl_ht_write_unlock(h); |
338 | | /* Freeing the lock does a final sync for us */ |
339 | 4 | CRYPTO_THREAD_lock_free(h->atomic_lock); |
340 | 4 | ossl_rcu_lock_free(h->lock); |
341 | 4 | OPENSSL_free(h->md->neighborhood_ptr_to_free); |
342 | 4 | OPENSSL_free(h->md); |
343 | 4 | OPENSSL_free(h); |
344 | 4 | return; |
345 | 4 | } |
346 | | |
347 | | size_t ossl_ht_count(HT *h) |
348 | 0 | { |
349 | 0 | size_t count; |
350 | |
|
351 | 0 | count = h->wpd.value_count; |
352 | 0 | return count; |
353 | 0 | } |
354 | | |
355 | | void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg), |
356 | | void *arg) |
357 | 0 | { |
358 | 0 | size_t i, j; |
359 | 0 | struct ht_mutable_data_st *md; |
360 | |
|
361 | 0 | md = ossl_rcu_deref(&h->md); |
362 | 0 | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
363 | 0 | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
364 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
365 | 0 | if (md->neighborhoods[i].entries[j].value != NULL) { |
366 | 0 | if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg)) |
367 | 0 | goto out; |
368 | 0 | } |
369 | 0 | } |
370 | 0 | } |
371 | 0 | out: |
372 | 0 | return; |
373 | 0 | } |
374 | | |
375 | | HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len, |
376 | | int (*filter)(HT_VALUE *obj, void *arg), |
377 | | void *arg) |
378 | 0 | { |
379 | 0 | struct ht_mutable_data_st *md; |
380 | 0 | HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST) |
381 | 0 | + (sizeof(HT_VALUE *) * max_len)); |
382 | 0 | size_t i, j; |
383 | 0 | struct ht_internal_value_st *v; |
384 | |
|
385 | 0 | if (list == NULL) |
386 | 0 | return NULL; |
387 | | |
388 | | /* |
389 | | * The list array lives just beyond the end of |
390 | | * the struct |
391 | | */ |
392 | 0 | list->list = (HT_VALUE **)(list + 1); |
393 | |
|
394 | 0 | md = ossl_rcu_deref(&h->md); |
395 | 0 | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
396 | 0 | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]); |
397 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
398 | 0 | v = md->neighborhoods[i].entries[j].value; |
399 | 0 | if (v != NULL && filter((HT_VALUE *)v, arg)) { |
400 | 0 | list->list[list->list_len++] = (HT_VALUE *)v; |
401 | 0 | if (list->list_len == max_len) |
402 | 0 | goto out; |
403 | 0 | } |
404 | 0 | } |
405 | 0 | } |
406 | 0 | out: |
407 | 0 | return list; |
408 | 0 | } |
409 | | |
410 | | void ossl_ht_value_list_free(HT_VALUE_LIST *list) |
411 | 0 | { |
412 | 0 | OPENSSL_free(list); |
413 | 0 | } |
414 | | |
415 | | static int compare_hash(uint64_t hash1, uint64_t hash2) |
416 | 37.8k | { |
417 | 37.8k | return (hash1 == hash2); |
418 | 37.8k | } |
419 | | |
420 | | static void free_old_neigh_table(void *arg) |
421 | 0 | { |
422 | 0 | struct ht_mutable_data_st *oldmd = arg; |
423 | |
|
424 | 0 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
425 | 0 | OPENSSL_free(oldmd); |
426 | 0 | } |
427 | | |
428 | | /* |
429 | | * Increase hash table bucket list |
430 | | * must be called with write_lock held |
431 | | */ |
432 | | static int grow_hashtable(HT *h, size_t oldsize) |
433 | 0 | { |
434 | 0 | struct ht_mutable_data_st *newmd; |
435 | 0 | struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md); |
436 | 0 | int rc = 0; |
437 | 0 | uint64_t oldi, oldj, newi, newj; |
438 | 0 | uint64_t oldhash; |
439 | 0 | struct ht_internal_value_st *oldv; |
440 | 0 | int rehashed; |
441 | 0 | size_t newsize = oldsize * 2; |
442 | |
|
443 | 0 | if (h->config.lockless_reads) |
444 | 0 | goto out; |
445 | | |
446 | 0 | if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL) |
447 | 0 | goto out; |
448 | | |
449 | | /* bucket list is always a power of 2 */ |
450 | 0 | newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2, |
451 | 0 | &newmd->neighborhood_ptr_to_free); |
452 | 0 | if (newmd->neighborhoods == NULL) |
453 | 0 | goto out_free; |
454 | | |
455 | | /* being a power of 2 makes for easy mask computation */ |
456 | 0 | newmd->neighborhood_mask = (newsize - 1); |
457 | | |
458 | | /* |
459 | | * Now we need to start rehashing entries |
460 | | * Note we don't need to use atomics here as the new |
461 | | * mutable data hasn't been published |
462 | | */ |
463 | 0 | for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) { |
464 | 0 | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]); |
465 | 0 | for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) { |
466 | 0 | oldv = oldmd->neighborhoods[oldi].entries[oldj].value; |
467 | 0 | if (oldv == NULL) |
468 | 0 | continue; |
469 | 0 | oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash; |
470 | 0 | newi = oldhash & newmd->neighborhood_mask; |
471 | 0 | rehashed = 0; |
472 | 0 | for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) { |
473 | 0 | if (newmd->neighborhoods[newi].entries[newj].value == NULL) { |
474 | 0 | newmd->neighborhoods[newi].entries[newj].value = oldv; |
475 | 0 | newmd->neighborhoods[newi].entries[newj].hash = oldhash; |
476 | 0 | rehashed = 1; |
477 | 0 | break; |
478 | 0 | } |
479 | 0 | } |
480 | 0 | if (rehashed == 0) { |
481 | | /* we ran out of space in a neighborhood, grow again */ |
482 | 0 | OPENSSL_free(newmd->neighborhoods); |
483 | 0 | OPENSSL_free(newmd); |
484 | 0 | return grow_hashtable(h, newsize); |
485 | 0 | } |
486 | 0 | } |
487 | 0 | } |
488 | | /* |
489 | | * Now that our entries are all hashed into the new bucket list |
490 | | * update our bucket_len and target_max_load |
491 | | */ |
492 | 0 | h->wpd.neighborhood_len = newsize; |
493 | | |
494 | | /* |
495 | | * Now we replace the old mutable data with the new |
496 | | */ |
497 | 0 | ossl_rcu_assign_ptr(&h->md, &newmd); |
498 | 0 | ossl_rcu_call(h->lock, free_old_neigh_table, oldmd); |
499 | 0 | h->wpd.need_sync = 1; |
500 | | /* |
501 | | * And we're done |
502 | | */ |
503 | 0 | rc = 1; |
504 | |
|
505 | 0 | out: |
506 | 0 | return rc; |
507 | 0 | out_free: |
508 | 0 | OPENSSL_free(newmd->neighborhoods); |
509 | 0 | OPENSSL_free(newmd); |
510 | 0 | goto out; |
511 | 0 | } |
512 | | |
513 | | static void free_old_ht_value(void *arg) |
514 | 0 | { |
515 | 0 | HT_VALUE *h = (HT_VALUE *)arg; |
516 | | |
517 | | /* |
518 | | * Note, this is only called on replacement, |
519 | | * the caller is responsible for freeing the |
520 | | * held data, we just need to free the wrapping |
521 | | * struct here |
522 | | */ |
523 | 0 | OPENSSL_free(h); |
524 | 0 | } |
525 | | |
526 | | static ossl_inline int match_key(HT_KEY *a, HT_KEY *b) |
527 | 33.8k | { |
528 | | /* |
529 | | * keys match if they are both present, the same size |
530 | | * and compare equal in memory |
531 | | */ |
532 | 33.8k | PREFETCH(a->keybuf); |
533 | 33.8k | PREFETCH(b->keybuf); |
534 | 33.8k | if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize) |
535 | 33.8k | return !memcmp(a->keybuf, b->keybuf, a->keysize); |
536 | | |
537 | 0 | return 1; |
538 | 33.8k | } |
539 | | |
540 | | static int ossl_ht_insert_locked(HT *h, uint64_t hash, |
541 | | struct ht_internal_value_st *newval, |
542 | | HT_VALUE **olddata) |
543 | 1.10k | { |
544 | 1.10k | struct ht_mutable_data_st *md = h->md; |
545 | 1.10k | uint64_t neigh_idx_start = hash & md->neighborhood_mask; |
546 | 1.10k | uint64_t neigh_idx = neigh_idx_start; |
547 | 1.10k | size_t j; |
548 | 1.10k | uint64_t ihash; |
549 | 1.10k | HT_VALUE *ival; |
550 | 1.10k | size_t empty_idx = SIZE_MAX; |
551 | 1.10k | int lockless_reads = h->config.lockless_reads; |
552 | | |
553 | 1.10k | do { |
554 | 1.10k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
555 | | |
556 | 1.24k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
557 | 1.24k | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
558 | 1.24k | if (ival == NULL) { |
559 | 1.10k | empty_idx = j; |
560 | | /* lockless_reads implies no deletion, we can break out */ |
561 | 1.10k | if (lockless_reads) |
562 | 1.10k | goto not_found; |
563 | 0 | continue; |
564 | 1.10k | } |
565 | 139 | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
566 | 139 | &ihash, h->atomic_lock)) |
567 | 0 | return 0; |
568 | 139 | if (compare_hash(hash, ihash) && match_key(&newval->value.key, |
569 | 0 | &ival->key)) { |
570 | 0 | if (olddata == NULL) { |
571 | | /* This would insert a duplicate -> fail */ |
572 | 0 | return 0; |
573 | 0 | } |
574 | | /* Do a replacement */ |
575 | 0 | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash, |
576 | 0 | hash, h->atomic_lock)) |
577 | 0 | return 0; |
578 | 0 | *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value; |
579 | 0 | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value, |
580 | 0 | &newval); |
581 | 0 | ossl_rcu_call(h->lock, free_old_ht_value, *olddata); |
582 | 0 | h->wpd.need_sync = 1; |
583 | 0 | return 1; |
584 | 0 | } |
585 | 139 | } |
586 | 0 | if (!lockless_reads) |
587 | 0 | break; |
588 | | /* Continue search in subsequent neighborhoods */ |
589 | 0 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
590 | 0 | } while (neigh_idx != neigh_idx_start); |
591 | | |
592 | 1.10k | not_found: |
593 | | /* If we get to here, its just an insert */ |
594 | 1.10k | if (empty_idx == SIZE_MAX) |
595 | 0 | return -1; /* out of space */ |
596 | 1.10k | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash, |
597 | 1.10k | hash, h->atomic_lock)) |
598 | 0 | return 0; |
599 | 1.10k | h->wpd.value_count++; |
600 | 1.10k | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value, |
601 | 1.10k | &newval); |
602 | 1.10k | return 1; |
603 | 1.10k | } |
604 | | |
605 | | static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key, |
606 | | void *data, |
607 | | uintptr_t *type) |
608 | 1.10k | { |
609 | 1.10k | struct ht_internal_value_st *tmp; |
610 | 1.10k | size_t nvsize = sizeof(*tmp); |
611 | | |
612 | 1.10k | if (h->config.collision_check == 1) |
613 | 1.10k | nvsize += key->keysize; |
614 | | |
615 | 1.10k | tmp = OPENSSL_malloc(nvsize); |
616 | | |
617 | 1.10k | if (tmp == NULL) |
618 | 0 | return NULL; |
619 | | |
620 | 1.10k | tmp->ht = h; |
621 | 1.10k | tmp->value.value = data; |
622 | 1.10k | tmp->value.type_id = type; |
623 | 1.10k | tmp->value.key.keybuf = NULL; |
624 | 1.10k | if (h->config.collision_check) { |
625 | 1.10k | tmp->value.key.keybuf = (uint8_t *)(tmp + 1); |
626 | 1.10k | tmp->value.key.keysize = key->keysize; |
627 | 1.10k | memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize); |
628 | 1.10k | } |
629 | | |
630 | | |
631 | 1.10k | return tmp; |
632 | 1.10k | } |
633 | | |
634 | | static void free_value(struct ht_internal_value_st *v) |
635 | 1.10k | { |
636 | 1.10k | OPENSSL_free(v); |
637 | 1.10k | } |
638 | | |
639 | | int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata) |
640 | 1.10k | { |
641 | 1.10k | struct ht_internal_value_st *newval = NULL; |
642 | 1.10k | uint64_t hash; |
643 | 1.10k | int rc = 0; |
644 | 1.10k | int i; |
645 | | |
646 | 1.10k | if (data->value == NULL) |
647 | 0 | goto out; |
648 | | |
649 | 1.10k | newval = alloc_new_value(h, key, data->value, data->type_id); |
650 | 1.10k | if (newval == NULL) |
651 | 0 | goto out; |
652 | | |
653 | | /* |
654 | | * we have to take our lock here to prevent other changes |
655 | | * to the bucket list |
656 | | */ |
657 | 1.10k | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
658 | | |
659 | 1.10k | for (i = 0; |
660 | 1.10k | (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1 |
661 | 1.10k | && i < 4; |
662 | 1.10k | ++i) |
663 | 0 | if (!grow_hashtable(h, h->wpd.neighborhood_len)) { |
664 | 0 | rc = -1; |
665 | 0 | break; |
666 | 0 | } |
667 | | |
668 | 1.10k | if (rc <= 0) |
669 | 0 | free_value(newval); |
670 | | |
671 | 1.10k | out: |
672 | 1.10k | return rc; |
673 | 1.10k | } |
674 | | |
675 | | HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key) |
676 | 38.1k | { |
677 | 38.1k | struct ht_mutable_data_st *md; |
678 | 38.1k | uint64_t hash; |
679 | 38.1k | uint64_t neigh_idx_start; |
680 | 38.1k | uint64_t neigh_idx; |
681 | 38.1k | struct ht_internal_value_st *ival = NULL; |
682 | 38.1k | size_t j; |
683 | 38.1k | uint64_t ehash; |
684 | 38.1k | int lockless_reads = h->config.lockless_reads; |
685 | | |
686 | 38.1k | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
687 | | |
688 | 38.1k | md = ossl_rcu_deref(&h->md); |
689 | 38.1k | neigh_idx = neigh_idx_start = hash & md->neighborhood_mask; |
690 | 38.1k | do { |
691 | 38.1k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
692 | 41.9k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
693 | 41.9k | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
694 | 41.9k | if (ival == NULL) { |
695 | 4.29k | if (lockless_reads) |
696 | | /* lockless_reads implies no deletion, we can break out */ |
697 | 4.29k | return NULL; |
698 | 0 | continue; |
699 | 4.29k | } |
700 | 37.6k | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
701 | 37.6k | &ehash, h->atomic_lock)) |
702 | 0 | return NULL; |
703 | 37.6k | if (compare_hash(hash, ehash) && match_key(&ival->value.key, key)) |
704 | 33.8k | return (HT_VALUE *)ival; |
705 | 37.6k | } |
706 | 0 | if (!lockless_reads) |
707 | 0 | break; |
708 | | /* Continue search in subsequent neighborhoods */ |
709 | 0 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
710 | 0 | } while (neigh_idx != neigh_idx_start); |
711 | | |
712 | 0 | return NULL; |
713 | 38.1k | } |
714 | | |
715 | | static void free_old_entry(void *arg) |
716 | 0 | { |
717 | 0 | struct ht_internal_value_st *v = arg; |
718 | |
|
719 | 0 | v->ht->config.ht_free_fn((HT_VALUE *)v); |
720 | 0 | free_value(v); |
721 | 0 | } |
722 | | |
723 | | int ossl_ht_delete(HT *h, HT_KEY *key) |
724 | 0 | { |
725 | 0 | uint64_t hash; |
726 | 0 | uint64_t neigh_idx; |
727 | 0 | size_t j; |
728 | 0 | struct ht_internal_value_st *v = NULL; |
729 | 0 | HT_VALUE *nv = NULL; |
730 | 0 | int rc = 0; |
731 | |
|
732 | 0 | if (h->config.lockless_reads) |
733 | 0 | return 0; |
734 | | |
735 | 0 | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
736 | |
|
737 | 0 | neigh_idx = hash & h->md->neighborhood_mask; |
738 | 0 | PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]); |
739 | 0 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
740 | 0 | v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value; |
741 | 0 | if (v == NULL) |
742 | 0 | continue; |
743 | 0 | if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash) |
744 | 0 | && match_key(key, &v->value.key)) { |
745 | 0 | if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash, |
746 | 0 | 0, h->atomic_lock)) |
747 | 0 | break; |
748 | 0 | h->wpd.value_count--; |
749 | 0 | ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value, |
750 | 0 | &nv); |
751 | 0 | rc = 1; |
752 | 0 | break; |
753 | 0 | } |
754 | 0 | } |
755 | 0 | if (rc == 1) { |
756 | 0 | ossl_rcu_call(h->lock, free_old_entry, v); |
757 | 0 | h->wpd.need_sync = 1; |
758 | 0 | } |
759 | 0 | return rc; |
760 | 0 | } |