/src/openssl34/crypto/hashtable/hashtable.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | * |
9 | | * |
10 | | * |
11 | | * Notes On hash table design and layout |
12 | | * This hashtable uses a hopscotch algorithm to do indexing. The data structure |
13 | | * looks as follows: |
14 | | * |
15 | | * hash +--------------+ |
16 | | * value+------->+ HT_VALUE | |
17 | | * + +--------------+ |
18 | | * +-------+ |
19 | | * | | |
20 | | * +---------------------------------------------------------+ |
21 | | * | | | | | | |
22 | | * | entry | entry | entry | entry | | |
23 | | * | | | | | | |
24 | | * +---------------------------------------------------------+ |
25 | | * | | | |
26 | | * | | | |
27 | | * +---------------------------------------------------------+ |
28 | | * | + + + |
29 | | * | neighborhood[0] neighborhood[1] | |
30 | | * | | |
31 | | * | | |
32 | | * +---------------------------------------------------------+ |
33 | | * | |
34 | | * + |
35 | | * neighborhoods |
36 | | * |
37 | | * On lookup/insert/delete, the items key is hashed to a 64 bit value |
38 | | * and the result is masked to provide an index into the neighborhoods |
39 | | * table. Once a neighborhood is determined, an in-order search is done |
40 | | * of the elements in the neighborhood indexes entries for a matching hash |
41 | | * value, if found, the corresponding HT_VALUE is used for the respective |
42 | | * operation. The number of entries in a neighborhood is determined at build |
43 | | * time based on the cacheline size of the target CPU. The intent is for a |
44 | | * neighborhood to have all entries in the neighborhood fit into a single cache |
45 | | * line to speed up lookups. If all entries in a neighborhood are in use at the |
46 | | * time of an insert, the table is expanded and rehashed. |
47 | | * |
48 | | * Lockless reads hash table is based on the same design but does not |
49 | | * allow growing and deletion. Thus subsequent neighborhoods are always |
50 | | * searched for a match until an empty entry is found. |
51 | | */ |
52 | | |
53 | | #include <string.h> |
54 | | #include <internal/rcu.h> |
55 | | #include <internal/hashtable.h> |
56 | | #include <openssl/rand.h> |
57 | | |
58 | | /* |
59 | | * gcc defines __SANITIZE_THREAD__ |
60 | | * but clang uses the feature attributes api |
61 | | * map the latter to the former |
62 | | */ |
63 | | #if defined(__clang__) && defined(__has_feature) |
64 | | #if __has_feature(thread_sanitizer) |
65 | | #define __SANITIZE_THREADS__ |
66 | | #endif |
67 | | #endif |
68 | | |
69 | | #ifdef __SANITIZE_THREADS__ |
70 | | #include <sanitizer/tsan_interface.h> |
71 | | #endif |
72 | | |
73 | | #include "internal/numbers.h" |
74 | | /* |
75 | | * When we do a lookup/insert/delete, there is a high likelihood |
76 | | * that we will iterate over at least part of the neighborhood list |
77 | | * As such, because we design a neighborhood entry to fit into a single |
78 | | * cache line it is advantageous, when supported to fetch the entire |
79 | | * structure for faster lookups |
80 | | */ |
81 | | #if defined(__GNUC__) || defined(__CLANG__) |
82 | 26.5M | #define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries) |
83 | 70.0M | #define PREFETCH(x) __builtin_prefetch(x) |
84 | | #define ALIGN __attribute__((aligned(8))) |
85 | | #else |
86 | | #define PREFETCH_NEIGHBORHOOD(x) |
87 | | #define PREFETCH(x) |
88 | | #define ALIGN |
89 | | #endif |
90 | | |
91 | | static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len) |
92 | 7.67M | { |
93 | 7.67M | uint64_t hash = 0xcbf29ce484222325ULL; |
94 | 7.67M | size_t i; |
95 | | |
96 | 498M | for (i = 0; i < len; i++) { |
97 | 490M | hash ^= key[i]; |
98 | 490M | hash *= 0x00000100000001B3ULL; |
99 | 490M | } |
100 | 7.67M | return hash; |
101 | 7.67M | } |
102 | | |
103 | | /* |
104 | | * Define our neighborhood list length |
105 | | * Note: It should always be a power of 2 |
106 | | */ |
107 | 477 | #define DEFAULT_NEIGH_LEN_LOG 4 |
108 | 477 | #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG) |
109 | | |
110 | | /* |
111 | | * For now assume cache line size is 64 bytes |
112 | | */ |
113 | 35.7M | #define CACHE_LINE_BYTES 64 |
114 | | #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES |
115 | | |
116 | 35.7M | #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st)) |
117 | | /* |
118 | | * Defines our chains of values |
119 | | */ |
120 | | struct ht_internal_value_st { |
121 | | HT_VALUE value; |
122 | | HT *ht; |
123 | | }; |
124 | | |
125 | | struct ht_neighborhood_entry_st { |
126 | | uint64_t hash; |
127 | | struct ht_internal_value_st *value; |
128 | | } ALIGN; |
129 | | |
130 | | struct ht_neighborhood_st { |
131 | | struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN]; |
132 | | }; |
133 | | |
134 | | /* |
135 | | * Updates to data in this struct |
136 | | * require an rcu sync after modification |
137 | | * prior to free |
138 | | */ |
139 | | struct ht_mutable_data_st { |
140 | | struct ht_neighborhood_st *neighborhoods; |
141 | | void *neighborhood_ptr_to_free; |
142 | | uint64_t neighborhood_mask; |
143 | | }; |
144 | | |
145 | | /* |
146 | | * Private data may be updated on the write |
147 | | * side only, and so do not require rcu sync |
148 | | */ |
149 | | struct ht_write_private_data_st { |
150 | | size_t neighborhood_len; |
151 | | size_t value_count; |
152 | | int need_sync; |
153 | | }; |
154 | | |
155 | | struct ht_internal_st { |
156 | | HT_CONFIG config; |
157 | | CRYPTO_RCU_LOCK *lock; |
158 | | CRYPTO_RWLOCK *atomic_lock; |
159 | | struct ht_mutable_data_st *md; |
160 | | struct ht_write_private_data_st wpd; |
161 | | }; |
162 | | |
163 | | static void free_value(struct ht_internal_value_st *v); |
164 | | |
165 | | static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len, |
166 | | void **freeptr) |
167 | 63.2k | { |
168 | 63.2k | struct ht_neighborhood_st *ret; |
169 | | |
170 | 63.2k | ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len, |
171 | 63.2k | CACHE_LINE_BYTES, freeptr); |
172 | | |
173 | | /* fall back to regular malloc */ |
174 | 63.2k | if (ret == NULL) { |
175 | 0 | ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len); |
176 | 0 | if (ret == NULL) |
177 | 0 | return NULL; |
178 | 0 | } |
179 | 63.2k | memset(ret, 0, sizeof(struct ht_neighborhood_st) * len); |
180 | 63.2k | return ret; |
181 | 63.2k | } |
182 | | |
183 | | static void internal_free_nop(HT_VALUE *v) |
184 | 24.7k | { |
185 | 24.7k | return; |
186 | 24.7k | } |
187 | | |
188 | | HT *ossl_ht_new(const HT_CONFIG *conf) |
189 | 242 | { |
190 | 242 | HT *new = OPENSSL_zalloc(sizeof(*new)); |
191 | | |
192 | 242 | if (new == NULL) |
193 | 0 | return NULL; |
194 | | |
195 | 242 | new->atomic_lock = CRYPTO_THREAD_lock_new(); |
196 | 242 | if (new->atomic_lock == NULL) |
197 | 0 | goto err; |
198 | | |
199 | 242 | memcpy(&new->config, conf, sizeof(*conf)); |
200 | | |
201 | 242 | if (new->config.init_neighborhoods != 0) { |
202 | 236 | new->wpd.neighborhood_len = new->config.init_neighborhoods; |
203 | | /* round up to the next power of 2 */ |
204 | 236 | new->wpd.neighborhood_len--; |
205 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1; |
206 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2; |
207 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4; |
208 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8; |
209 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16; |
210 | 236 | new->wpd.neighborhood_len++; |
211 | 236 | } else { |
212 | 6 | new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
213 | 6 | } |
214 | | |
215 | 242 | if (new->config.ht_free_fn == NULL) |
216 | 236 | new->config.ht_free_fn = internal_free_nop; |
217 | | |
218 | 242 | new->md = OPENSSL_zalloc(sizeof(*new->md)); |
219 | 242 | if (new->md == NULL) |
220 | 0 | goto err; |
221 | | |
222 | 242 | new->md->neighborhoods = alloc_new_neighborhood_list(new->wpd.neighborhood_len, |
223 | 242 | &new->md->neighborhood_ptr_to_free); |
224 | 242 | if (new->md->neighborhoods == NULL) |
225 | 0 | goto err; |
226 | 242 | new->md->neighborhood_mask = new->wpd.neighborhood_len - 1; |
227 | | |
228 | 242 | new->lock = ossl_rcu_lock_new(1, conf->ctx); |
229 | 242 | if (new->lock == NULL) |
230 | 0 | goto err; |
231 | | |
232 | 242 | if (new->config.ht_hash_fn == NULL) |
233 | 242 | new->config.ht_hash_fn = fnv1a_hash; |
234 | | |
235 | 242 | return new; |
236 | | |
237 | 0 | err: |
238 | 0 | CRYPTO_THREAD_lock_free(new->atomic_lock); |
239 | 0 | ossl_rcu_lock_free(new->lock); |
240 | 0 | if (new->md != NULL) |
241 | 0 | OPENSSL_free(new->md->neighborhood_ptr_to_free); |
242 | 0 | OPENSSL_free(new->md); |
243 | 0 | OPENSSL_free(new); |
244 | 0 | return NULL; |
245 | 242 | } |
246 | | |
247 | | void ossl_ht_read_lock(HT *htable) |
248 | 39 | { |
249 | 39 | ossl_rcu_read_lock(htable->lock); |
250 | 39 | } |
251 | | |
252 | | void ossl_ht_read_unlock(HT *htable) |
253 | 58 | { |
254 | 58 | ossl_rcu_read_unlock(htable->lock); |
255 | 58 | } |
256 | | |
257 | | void ossl_ht_write_lock(HT *htable) |
258 | 285 | { |
259 | 285 | ossl_rcu_write_lock(htable->lock); |
260 | 285 | htable->wpd.need_sync = 0; |
261 | 285 | } |
262 | | |
263 | | void ossl_ht_write_unlock(HT *htable) |
264 | 285 | { |
265 | 285 | int need_sync = htable->wpd.need_sync; |
266 | | |
267 | 285 | htable->wpd.need_sync = 0; |
268 | 285 | ossl_rcu_write_unlock(htable->lock); |
269 | 285 | if (need_sync) |
270 | 181 | ossl_synchronize_rcu(htable->lock); |
271 | 285 | } |
272 | | |
273 | | static void free_oldmd(void *arg) |
274 | 31.0k | { |
275 | 31.0k | struct ht_mutable_data_st *oldmd = arg; |
276 | 31.0k | size_t i, j; |
277 | 31.0k | size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1; |
278 | 31.0k | struct ht_internal_value_st *v; |
279 | | |
280 | 356k | for (i = 0; i < neighborhood_len; i++) { |
281 | 324k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]); |
282 | 1.62M | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
283 | 1.29M | if (oldmd->neighborhoods[i].entries[j].value != NULL) { |
284 | 26.1k | v = oldmd->neighborhoods[i].entries[j].value; |
285 | 26.1k | v->ht->config.ht_free_fn((HT_VALUE *)v); |
286 | 26.1k | free_value(v); |
287 | 26.1k | } |
288 | 1.29M | } |
289 | 324k | } |
290 | | |
291 | 31.0k | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
292 | 31.0k | OPENSSL_free(oldmd); |
293 | 31.0k | } |
294 | | |
295 | | static int ossl_ht_flush_internal(HT *h) |
296 | 157 | { |
297 | 157 | struct ht_mutable_data_st *newmd = NULL; |
298 | 157 | struct ht_mutable_data_st *oldmd = NULL; |
299 | | |
300 | 157 | newmd = OPENSSL_zalloc(sizeof(*newmd)); |
301 | 157 | if (newmd == NULL) |
302 | 0 | return 0; |
303 | | |
304 | 157 | newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN, |
305 | 157 | &newmd->neighborhood_ptr_to_free); |
306 | 157 | if (newmd->neighborhoods == NULL) { |
307 | 0 | OPENSSL_free(newmd); |
308 | 0 | return 0; |
309 | 0 | } |
310 | | |
311 | 157 | newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1; |
312 | | |
313 | | /* Swap the old and new mutable data sets */ |
314 | 157 | oldmd = ossl_rcu_deref(&h->md); |
315 | 157 | ossl_rcu_assign_ptr(&h->md, &newmd); |
316 | | |
317 | | /* Set the number of entries to 0 */ |
318 | 157 | h->wpd.value_count = 0; |
319 | 157 | h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
320 | | |
321 | 157 | ossl_rcu_call(h->lock, free_oldmd, oldmd); |
322 | 157 | h->wpd.need_sync = 1; |
323 | 157 | return 1; |
324 | 157 | } |
325 | | |
326 | | int ossl_ht_flush(HT *h) |
327 | 420 | { |
328 | 420 | return ossl_ht_flush_internal(h); |
329 | 420 | } |
330 | | |
331 | | void ossl_ht_free(HT *h) |
332 | 154 | { |
333 | 154 | if (h == NULL) |
334 | 0 | return; |
335 | | |
336 | 154 | ossl_ht_write_lock(h); |
337 | 154 | ossl_ht_flush_internal(h); |
338 | 154 | ossl_ht_write_unlock(h); |
339 | | /* Freeing the lock does a final sync for us */ |
340 | 154 | CRYPTO_THREAD_lock_free(h->atomic_lock); |
341 | 154 | ossl_rcu_lock_free(h->lock); |
342 | 154 | OPENSSL_free(h->md->neighborhood_ptr_to_free); |
343 | 154 | OPENSSL_free(h->md); |
344 | 154 | OPENSSL_free(h); |
345 | 154 | return; |
346 | 154 | } |
347 | | |
348 | | size_t ossl_ht_count(HT *h) |
349 | 1.60k | { |
350 | 1.60k | size_t count; |
351 | | |
352 | 1.60k | count = h->wpd.value_count; |
353 | 1.60k | return count; |
354 | 1.60k | } |
355 | | |
356 | | void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg), |
357 | | void *arg) |
358 | 95 | { |
359 | 95 | size_t i, j; |
360 | 95 | struct ht_mutable_data_st *md; |
361 | | |
362 | 95 | md = ossl_rcu_deref(&h->md); |
363 | 85.2k | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
364 | 85.1k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
365 | 425k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
366 | 340k | if (md->neighborhoods[i].entries[j].value != NULL) { |
367 | 609 | if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg)) |
368 | 7 | goto out; |
369 | 609 | } |
370 | 340k | } |
371 | 85.1k | } |
372 | 95 | out: |
373 | 95 | return; |
374 | 95 | } |
375 | | |
376 | | HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len, |
377 | | int (*filter)(HT_VALUE *obj, void *arg), |
378 | | void *arg) |
379 | 72 | { |
380 | 72 | struct ht_mutable_data_st *md; |
381 | 72 | HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST) |
382 | 72 | + (sizeof(HT_VALUE *) * max_len)); |
383 | 72 | size_t i, j; |
384 | 72 | struct ht_internal_value_st *v; |
385 | | |
386 | 72 | if (list == NULL) |
387 | 0 | return NULL; |
388 | | |
389 | | /* |
390 | | * The list array lives just beyond the end of |
391 | | * the struct |
392 | | */ |
393 | 72 | list->list = (HT_VALUE **)(list + 1); |
394 | | |
395 | 72 | md = ossl_rcu_deref(&h->md); |
396 | 61.6k | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
397 | 61.6k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
398 | 308k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
399 | 246k | v = md->neighborhoods[i].entries[j].value; |
400 | 246k | if (v != NULL && filter((HT_VALUE *)v, arg)) { |
401 | 5 | list->list[list->list_len++] = (HT_VALUE *)v; |
402 | 5 | if (list->list_len == max_len) |
403 | 5 | goto out; |
404 | 5 | } |
405 | 246k | } |
406 | 61.6k | } |
407 | 72 | out: |
408 | 72 | return list; |
409 | 72 | } |
410 | | |
411 | | void ossl_ht_value_list_free(HT_VALUE_LIST *list) |
412 | 72 | { |
413 | 72 | OPENSSL_free(list); |
414 | 72 | } |
415 | | |
416 | | static int compare_hash(uint64_t hash1, uint64_t hash2) |
417 | 48.2M | { |
418 | 48.2M | return (hash1 == hash2); |
419 | 48.2M | } |
420 | | |
421 | | static void free_old_neigh_table(void *arg) |
422 | 105 | { |
423 | 105 | struct ht_mutable_data_st *oldmd = arg; |
424 | | |
425 | 105 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
426 | 105 | OPENSSL_free(oldmd); |
427 | 105 | } |
428 | | |
429 | | /* |
430 | | * Increase hash table bucket list |
431 | | * must be called with write_lock held |
432 | | */ |
433 | | static int grow_hashtable(HT *h, size_t oldsize) |
434 | 24 | { |
435 | 24 | struct ht_mutable_data_st *newmd; |
436 | 24 | struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md); |
437 | 24 | int rc = 0; |
438 | 24 | uint64_t oldi, oldj, newi, newj; |
439 | 24 | uint64_t oldhash; |
440 | 24 | struct ht_internal_value_st *oldv; |
441 | 24 | int rehashed; |
442 | 24 | size_t newsize = oldsize * 2; |
443 | | |
444 | 24 | if (h->config.lockless_reads) |
445 | 0 | goto out; |
446 | | |
447 | 24 | if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL) |
448 | 0 | goto out; |
449 | | |
450 | | /* bucket list is always a power of 2 */ |
451 | 24 | newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2, |
452 | 24 | &newmd->neighborhood_ptr_to_free); |
453 | 24 | if (newmd->neighborhoods == NULL) |
454 | 0 | goto out_free; |
455 | | |
456 | | /* being a power of 2 makes for easy mask computation */ |
457 | 24 | newmd->neighborhood_mask = (newsize - 1); |
458 | | |
459 | | /* |
460 | | * Now we need to start rehashing entries |
461 | | * Note we don't need to use atomics here as the new |
462 | | * mutable data hasn't been published |
463 | | */ |
464 | 2.16k | for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) { |
465 | 2.14k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]); |
466 | 10.7k | for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) { |
467 | 8.57k | oldv = oldmd->neighborhoods[oldi].entries[oldj].value; |
468 | 8.57k | if (oldv == NULL) |
469 | 8.43k | continue; |
470 | 143 | oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash; |
471 | 143 | newi = oldhash & newmd->neighborhood_mask; |
472 | 143 | rehashed = 0; |
473 | 247 | for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) { |
474 | 247 | if (newmd->neighborhoods[newi].entries[newj].value == NULL) { |
475 | 143 | newmd->neighborhoods[newi].entries[newj].value = oldv; |
476 | 143 | newmd->neighborhoods[newi].entries[newj].hash = oldhash; |
477 | 143 | rehashed = 1; |
478 | 143 | break; |
479 | 143 | } |
480 | 247 | } |
481 | 143 | if (rehashed == 0) { |
482 | | /* we ran out of space in a neighborhood, grow again */ |
483 | 0 | OPENSSL_free(newmd->neighborhoods); |
484 | 0 | OPENSSL_free(newmd); |
485 | 0 | return grow_hashtable(h, newsize); |
486 | 0 | } |
487 | 143 | } |
488 | 2.14k | } |
489 | | /* |
490 | | * Now that our entries are all hashed into the new bucket list |
491 | | * update our bucket_len and target_max_load |
492 | | */ |
493 | 24 | h->wpd.neighborhood_len = newsize; |
494 | | |
495 | | /* |
496 | | * Now we replace the old mutable data with the new |
497 | | */ |
498 | 24 | ossl_rcu_assign_ptr(&h->md, &newmd); |
499 | 24 | ossl_rcu_call(h->lock, free_old_neigh_table, oldmd); |
500 | 24 | h->wpd.need_sync = 1; |
501 | | /* |
502 | | * And we're done |
503 | | */ |
504 | 24 | rc = 1; |
505 | | |
506 | 24 | out: |
507 | 24 | return rc; |
508 | 0 | out_free: |
509 | 0 | OPENSSL_free(newmd->neighborhoods); |
510 | 0 | OPENSSL_free(newmd); |
511 | 0 | goto out; |
512 | 24 | } |
513 | | |
514 | | static void free_old_ht_value(void *arg) |
515 | 5 | { |
516 | 5 | HT_VALUE *h = (HT_VALUE *)arg; |
517 | | |
518 | | /* |
519 | | * Note, this is only called on replacement, |
520 | | * the caller is responsible for freeing the |
521 | | * held data, we just need to free the wrapping |
522 | | * struct here |
523 | | */ |
524 | 5 | OPENSSL_free(h); |
525 | 5 | } |
526 | | |
527 | | static ossl_inline int match_key(HT_KEY *a, HT_KEY *b) |
528 | 35.0M | { |
529 | | /* |
530 | | * keys match if they are both present, the same size |
531 | | * and compare equal in memory |
532 | | */ |
533 | 35.0M | PREFETCH(a->keybuf); |
534 | 35.0M | PREFETCH(b->keybuf); |
535 | 35.0M | if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize) |
536 | 35.0M | return !memcmp(a->keybuf, b->keybuf, a->keysize); |
537 | | |
538 | 3.67k | return 1; |
539 | 35.0M | } |
540 | | |
541 | | static int ossl_ht_insert_locked(HT *h, uint64_t hash, |
542 | | struct ht_internal_value_st *newval, |
543 | | HT_VALUE **olddata) |
544 | 28.4k | { |
545 | 28.4k | struct ht_mutable_data_st *md = h->md; |
546 | 28.4k | uint64_t neigh_idx_start = hash & md->neighborhood_mask; |
547 | 28.4k | uint64_t neigh_idx = neigh_idx_start; |
548 | 28.4k | size_t j; |
549 | 28.4k | uint64_t ihash; |
550 | 28.4k | HT_VALUE *ival; |
551 | 28.4k | size_t empty_idx = SIZE_MAX; |
552 | 28.4k | int lockless_reads = h->config.lockless_reads; |
553 | | |
554 | 28.7k | do { |
555 | 28.7k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
556 | | |
557 | 43.4k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
558 | 43.1k | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
559 | 43.1k | if (ival == NULL) { |
560 | 28.5k | empty_idx = j; |
561 | | /* lockless_reads implies no deletion, we can break out */ |
562 | 28.5k | if (lockless_reads) |
563 | 28.3k | goto not_found; |
564 | 165 | continue; |
565 | 28.5k | } |
566 | 14.6k | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
567 | 14.6k | &ihash, h->atomic_lock)) |
568 | 0 | return 0; |
569 | 14.6k | if (compare_hash(hash, ihash) && match_key(&newval->value.key, &ival->key)) { |
570 | 12 | if (olddata == NULL) { |
571 | | /* This would insert a duplicate -> fail */ |
572 | 8 | return 0; |
573 | 8 | } |
574 | | /* Do a replacement */ |
575 | 4 | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash, |
576 | 4 | hash, h->atomic_lock)) |
577 | 0 | return 0; |
578 | 4 | *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value; |
579 | 4 | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value, |
580 | 4 | &newval); |
581 | 4 | ossl_rcu_call(h->lock, free_old_ht_value, *olddata); |
582 | 4 | h->wpd.need_sync = 1; |
583 | 4 | return 1; |
584 | 4 | } |
585 | 14.6k | } |
586 | 353 | if (!lockless_reads) |
587 | 86 | break; |
588 | | /* Continue search in subsequent neighborhoods */ |
589 | 267 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
590 | 267 | } while (neigh_idx != neigh_idx_start); |
591 | | |
592 | 28.4k | not_found: |
593 | | /* If we get to here, its just an insert */ |
594 | 28.4k | if (empty_idx == SIZE_MAX) |
595 | 24 | return -1; /* out of space */ |
596 | 28.4k | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash, |
597 | 28.4k | hash, h->atomic_lock)) |
598 | 0 | return 0; |
599 | 28.4k | h->wpd.value_count++; |
600 | 28.4k | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value, |
601 | 28.4k | &newval); |
602 | 28.4k | return 1; |
603 | 28.4k | } |
604 | | |
605 | | static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key, |
606 | | void *data, |
607 | | uintptr_t *type) |
608 | 43.7k | { |
609 | 43.7k | struct ht_internal_value_st *tmp; |
610 | 43.7k | size_t nvsize = sizeof(*tmp); |
611 | | |
612 | 43.7k | if (h->config.collision_check == 1) |
613 | 42.4k | nvsize += key->keysize; |
614 | | |
615 | 43.7k | tmp = OPENSSL_malloc(nvsize); |
616 | | |
617 | 43.7k | if (tmp == NULL) |
618 | 0 | return NULL; |
619 | | |
620 | 43.7k | tmp->ht = h; |
621 | 43.7k | tmp->value.value = data; |
622 | 43.7k | tmp->value.type_id = type; |
623 | 43.7k | tmp->value.key.keybuf = NULL; |
624 | 43.7k | if (h->config.collision_check) { |
625 | 42.4k | tmp->value.key.keybuf = (uint8_t *)(tmp + 1); |
626 | 42.4k | tmp->value.key.keysize = key->keysize; |
627 | 42.4k | memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize); |
628 | 42.4k | } |
629 | | |
630 | 43.7k | return tmp; |
631 | 43.7k | } |
632 | | |
633 | | static void free_value(struct ht_internal_value_st *v) |
634 | 27.6k | { |
635 | 27.6k | OPENSSL_free(v); |
636 | 27.6k | } |
637 | | |
638 | | int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata) |
639 | 28.4k | { |
640 | 28.4k | struct ht_internal_value_st *newval = NULL; |
641 | 28.4k | uint64_t hash; |
642 | 28.4k | int rc = 0; |
643 | 28.4k | int i; |
644 | | |
645 | 28.4k | if (data->value == NULL) |
646 | 0 | goto out; |
647 | | |
648 | 28.4k | newval = alloc_new_value(h, key, data->value, data->type_id); |
649 | 28.4k | if (newval == NULL) |
650 | 0 | goto out; |
651 | | |
652 | | /* |
653 | | * we have to take our lock here to prevent other changes |
654 | | * to the bucket list |
655 | | */ |
656 | 28.4k | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
657 | | |
658 | 28.4k | for (i = 0; |
659 | 28.4k | (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1 |
660 | 24 | && i < 4; |
661 | 28.4k | ++i) |
662 | 24 | if (!grow_hashtable(h, h->wpd.neighborhood_len)) { |
663 | 0 | rc = -1; |
664 | 0 | break; |
665 | 0 | } |
666 | | |
667 | 28.4k | if (rc <= 0) |
668 | 8 | free_value(newval); |
669 | | |
670 | 28.4k | out: |
671 | 28.4k | return rc; |
672 | 28.4k | } |
673 | | |
674 | | HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key) |
675 | 26.0M | { |
676 | 26.0M | struct ht_mutable_data_st *md; |
677 | 26.0M | uint64_t hash; |
678 | 26.0M | uint64_t neigh_idx_start; |
679 | 26.0M | uint64_t neigh_idx; |
680 | 26.0M | struct ht_internal_value_st *ival = NULL; |
681 | 26.0M | size_t j; |
682 | 26.0M | uint64_t ehash; |
683 | 26.0M | int lockless_reads = h->config.lockless_reads; |
684 | | |
685 | 26.0M | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
686 | | |
687 | 26.0M | md = ossl_rcu_deref(&h->md); |
688 | 26.0M | neigh_idx = neigh_idx_start = hash & md->neighborhood_mask; |
689 | 26.0M | do { |
690 | 26.0M | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
691 | 33.3M | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
692 | 33.3M | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
693 | 33.3M | if (ival == NULL) { |
694 | 2.36M | if (lockless_reads) |
695 | | /* lockless_reads implies no deletion, we can break out */ |
696 | 2.36M | return NULL; |
697 | 131 | continue; |
698 | 2.36M | } |
699 | 30.9M | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
700 | 30.9M | &ehash, h->atomic_lock)) |
701 | 0 | return NULL; |
702 | 30.9M | if (compare_hash(hash, ehash) && match_key(&ival->value.key, key)) |
703 | 23.6M | return (HT_VALUE *)ival; |
704 | 30.9M | } |
705 | 5.36k | if (!lockless_reads) |
706 | 51 | break; |
707 | | /* Continue search in subsequent neighborhoods */ |
708 | 5.31k | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
709 | 5.31k | } while (neigh_idx != neigh_idx_start); |
710 | | |
711 | 51 | return NULL; |
712 | 26.0M | } |
713 | | |
714 | | static void free_old_entry(void *arg) |
715 | 1.50k | { |
716 | 1.50k | struct ht_internal_value_st *v = arg; |
717 | | |
718 | 1.50k | v->ht->config.ht_free_fn((HT_VALUE *)v); |
719 | 1.50k | free_value(v); |
720 | 1.50k | } |
721 | | |
722 | | int ossl_ht_delete(HT *h, HT_KEY *key) |
723 | 54 | { |
724 | 54 | uint64_t hash; |
725 | 54 | uint64_t neigh_idx; |
726 | 54 | size_t j; |
727 | 54 | struct ht_internal_value_st *v = NULL; |
728 | 54 | HT_VALUE *nv = NULL; |
729 | 54 | int rc = 0; |
730 | | |
731 | 54 | if (h->config.lockless_reads) |
732 | 0 | return 0; |
733 | | |
734 | 54 | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
735 | | |
736 | 54 | neigh_idx = hash & h->md->neighborhood_mask; |
737 | 54 | PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]); |
738 | 249 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
739 | 201 | v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value; |
740 | 201 | if (v == NULL) |
741 | 113 | continue; |
742 | 88 | if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash) |
743 | 6 | && match_key(key, &v->value.key)) { |
744 | 6 | if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash, |
745 | 6 | 0, h->atomic_lock)) |
746 | 0 | break; |
747 | 6 | h->wpd.value_count--; |
748 | 6 | ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value, |
749 | 6 | &nv); |
750 | 6 | rc = 1; |
751 | 6 | break; |
752 | 6 | } |
753 | 88 | } |
754 | 54 | if (rc == 1) { |
755 | 6 | ossl_rcu_call(h->lock, free_old_entry, v); |
756 | 6 | h->wpd.need_sync = 1; |
757 | 6 | } |
758 | 54 | return rc; |
759 | 54 | } |