/src/openssl34/crypto/hashtable/hashtable.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | * |
9 | | * |
10 | | * |
11 | | * Notes On hash table design and layout |
12 | | * This hashtable uses a hopscotch algorithm to do indexing. The data structure |
13 | | * looks as follows: |
14 | | * |
15 | | * hash +--------------+ |
16 | | * value+------->+ HT_VALUE | |
17 | | * + +--------------+ |
18 | | * +-------+ |
19 | | * | | |
20 | | * +---------------------------------------------------------+ |
21 | | * | | | | | | |
22 | | * | entry | entry | entry | entry | | |
23 | | * | | | | | | |
24 | | * +---------------------------------------------------------+ |
25 | | * | | | |
26 | | * | | | |
27 | | * +---------------------------------------------------------+ |
28 | | * | + + + |
29 | | * | neighborhood[0] neighborhood[1] | |
30 | | * | | |
31 | | * | | |
32 | | * +---------------------------------------------------------+ |
33 | | * | |
34 | | * + |
35 | | * neighborhoods |
36 | | * |
37 | | * On lookup/insert/delete, the items key is hashed to a 64 bit value |
38 | | * and the result is masked to provide an index into the neighborhoods |
39 | | * table. Once a neighborhood is determined, an in-order search is done |
40 | | * of the elements in the neighborhood indexes entries for a matching hash |
41 | | * value, if found, the corresponding HT_VALUE is used for the respective |
42 | | * operation. The number of entries in a neighborhood is determined at build |
43 | | * time based on the cacheline size of the target CPU. The intent is for a |
44 | | * neighborhood to have all entries in the neighborhood fit into a single cache |
45 | | * line to speed up lookups. If all entries in a neighborhood are in use at the |
46 | | * time of an insert, the table is expanded and rehashed. |
47 | | * |
48 | | * Lockless reads hash table is based on the same design but does not |
49 | | * allow growing and deletion. Thus subsequent neighborhoods are always |
50 | | * searched for a match until an empty entry is found. |
51 | | */ |
52 | | |
53 | | #include <string.h> |
54 | | #include <internal/rcu.h> |
55 | | #include <internal/hashtable.h> |
56 | | #include <openssl/rand.h> |
57 | | |
58 | | /* |
59 | | * gcc defines __SANITIZE_THREAD__ |
60 | | * but clang uses the feature attributes api |
61 | | * map the latter to the former |
62 | | */ |
63 | | #if defined(__clang__) && defined(__has_feature) |
64 | | #if __has_feature(thread_sanitizer) |
65 | | #define __SANITIZE_THREADS__ |
66 | | #endif |
67 | | #endif |
68 | | |
69 | | #ifdef __SANITIZE_THREADS__ |
70 | | #include <sanitizer/tsan_interface.h> |
71 | | #endif |
72 | | |
73 | | #include "internal/numbers.h" |
74 | | /* |
75 | | * When we do a lookup/insert/delete, there is a high likelihood |
76 | | * that we will iterate over at least part of the neighborhood list |
77 | | * As such, because we design a neighborhood entry to fit into a single |
78 | | * cache line it is advantageous, when supported to fetch the entire |
79 | | * structure for faster lookups |
80 | | */ |
81 | | #if defined(__GNUC__) || defined(__CLANG__) |
82 | 25.0M | #define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries) |
83 | 61.2M | #define PREFETCH(x) __builtin_prefetch(x) |
84 | | #else |
85 | | #define PREFETCH_NEIGHBORHOOD(x) |
86 | | #define PREFETCH(x) |
87 | | #endif |
88 | | |
89 | | static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len) |
90 | 7.36M | { |
91 | 7.36M | uint64_t hash = 0xcbf29ce484222325ULL; |
92 | 7.36M | size_t i; |
93 | | |
94 | 478M | for (i = 0; i < len; i++) { |
95 | 471M | hash ^= key[i]; |
96 | 471M | hash *= 0x00000100000001B3ULL; |
97 | 471M | } |
98 | 7.36M | return hash; |
99 | 7.36M | } |
100 | | |
101 | | /* |
102 | | * Define our neighborhood list length |
103 | | * Note: It should always be a power of 2 |
104 | | */ |
105 | 477 | #define DEFAULT_NEIGH_LEN_LOG 4 |
106 | 477 | #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG) |
107 | | |
108 | | /* |
109 | | * For now assume cache line size is 64 bytes |
110 | | */ |
111 | 33.4M | #define CACHE_LINE_BYTES 64 |
112 | | #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES |
113 | | |
114 | 33.4M | #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st)) |
115 | | /* |
116 | | * Defines our chains of values |
117 | | */ |
118 | | struct ht_internal_value_st { |
119 | | HT_VALUE value; |
120 | | HT *ht; |
121 | | }; |
122 | | |
123 | | struct ht_neighborhood_entry_st { |
124 | | uint64_t hash; |
125 | | struct ht_internal_value_st *value; |
126 | | }; |
127 | | |
128 | | struct ht_neighborhood_st { |
129 | | struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN]; |
130 | | }; |
131 | | |
132 | | /* |
133 | | * Updates to data in this struct |
134 | | * require an rcu sync after modification |
135 | | * prior to free |
136 | | */ |
137 | | struct ht_mutable_data_st { |
138 | | struct ht_neighborhood_st *neighborhoods; |
139 | | void *neighborhood_ptr_to_free; |
140 | | uint64_t neighborhood_mask; |
141 | | }; |
142 | | |
143 | | /* |
144 | | * Private data may be updated on the write |
145 | | * side only, and so do not require rcu sync |
146 | | */ |
147 | | struct ht_write_private_data_st { |
148 | | size_t neighborhood_len; |
149 | | size_t value_count; |
150 | | int need_sync; |
151 | | }; |
152 | | |
153 | | struct ht_internal_st { |
154 | | HT_CONFIG config; |
155 | | CRYPTO_RCU_LOCK *lock; |
156 | | CRYPTO_RWLOCK *atomic_lock; |
157 | | struct ht_mutable_data_st *md; |
158 | | struct ht_write_private_data_st wpd; |
159 | | }; |
160 | | |
161 | | static void free_value(struct ht_internal_value_st *v); |
162 | | |
163 | | static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len, |
164 | | void **freeptr) |
165 | 62.7k | { |
166 | 62.7k | struct ht_neighborhood_st *ret; |
167 | | |
168 | 62.7k | ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len, |
169 | 62.7k | CACHE_LINE_BYTES, freeptr); |
170 | | |
171 | | /* fall back to regular malloc */ |
172 | 62.7k | if (ret == NULL) { |
173 | 0 | ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len); |
174 | 0 | if (ret == NULL) |
175 | 0 | return NULL; |
176 | 0 | } |
177 | 62.7k | memset(ret, 0, sizeof(struct ht_neighborhood_st) * len); |
178 | 62.7k | return ret; |
179 | 62.7k | } |
180 | | |
181 | | static void internal_free_nop(HT_VALUE *v) |
182 | 24.5k | { |
183 | 24.5k | return; |
184 | 24.5k | } |
185 | | |
186 | | HT *ossl_ht_new(const HT_CONFIG *conf) |
187 | 242 | { |
188 | 242 | HT *new = OPENSSL_zalloc(sizeof(*new)); |
189 | | |
190 | 242 | if (new == NULL) |
191 | 0 | return NULL; |
192 | | |
193 | 242 | new->atomic_lock = CRYPTO_THREAD_lock_new(); |
194 | 242 | if (new->atomic_lock == NULL) |
195 | 0 | goto err; |
196 | | |
197 | 242 | memcpy(&new->config, conf, sizeof(*conf)); |
198 | | |
199 | 242 | if (new->config.init_neighborhoods != 0) { |
200 | 236 | new->wpd.neighborhood_len = new->config.init_neighborhoods; |
201 | | /* round up to the next power of 2 */ |
202 | 236 | new->wpd.neighborhood_len--; |
203 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1; |
204 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2; |
205 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4; |
206 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8; |
207 | 236 | new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16; |
208 | 236 | new->wpd.neighborhood_len++; |
209 | 236 | } else { |
210 | 6 | new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
211 | 6 | } |
212 | | |
213 | 242 | if (new->config.ht_free_fn == NULL) |
214 | 236 | new->config.ht_free_fn = internal_free_nop; |
215 | | |
216 | 242 | new->md = OPENSSL_zalloc(sizeof(*new->md)); |
217 | 242 | if (new->md == NULL) |
218 | 0 | goto err; |
219 | | |
220 | 242 | new->md->neighborhoods = alloc_new_neighborhood_list(new->wpd.neighborhood_len, |
221 | 242 | &new->md->neighborhood_ptr_to_free); |
222 | 242 | if (new->md->neighborhoods == NULL) |
223 | 0 | goto err; |
224 | 242 | new->md->neighborhood_mask = new->wpd.neighborhood_len - 1; |
225 | | |
226 | 242 | new->lock = ossl_rcu_lock_new(1, conf->ctx); |
227 | 242 | if (new->lock == NULL) |
228 | 0 | goto err; |
229 | | |
230 | 242 | if (new->config.ht_hash_fn == NULL) |
231 | 242 | new->config.ht_hash_fn = fnv1a_hash; |
232 | | |
233 | 242 | return new; |
234 | | |
235 | 0 | err: |
236 | 0 | CRYPTO_THREAD_lock_free(new->atomic_lock); |
237 | 0 | ossl_rcu_lock_free(new->lock); |
238 | 0 | if (new->md != NULL) |
239 | 0 | OPENSSL_free(new->md->neighborhood_ptr_to_free); |
240 | 0 | OPENSSL_free(new->md); |
241 | 0 | OPENSSL_free(new); |
242 | 0 | return NULL; |
243 | 242 | } |
244 | | |
245 | | void ossl_ht_read_lock(HT *htable) |
246 | 28 | { |
247 | 28 | ossl_rcu_read_lock(htable->lock); |
248 | 28 | } |
249 | | |
250 | | void ossl_ht_read_unlock(HT *htable) |
251 | 46 | { |
252 | 46 | ossl_rcu_read_unlock(htable->lock); |
253 | 46 | } |
254 | | |
255 | | void ossl_ht_write_lock(HT *htable) |
256 | 271 | { |
257 | 271 | ossl_rcu_write_lock(htable->lock); |
258 | 271 | htable->wpd.need_sync = 0; |
259 | 271 | } |
260 | | |
261 | | void ossl_ht_write_unlock(HT *htable) |
262 | 271 | { |
263 | 271 | int need_sync = htable->wpd.need_sync; |
264 | | |
265 | 271 | htable->wpd.need_sync = 0; |
266 | 271 | ossl_rcu_write_unlock(htable->lock); |
267 | 271 | if (need_sync) |
268 | 181 | ossl_synchronize_rcu(htable->lock); |
269 | 271 | } |
270 | | |
271 | | static void free_oldmd(void *arg) |
272 | 31.2k | { |
273 | 31.2k | struct ht_mutable_data_st *oldmd = arg; |
274 | 31.2k | size_t i, j; |
275 | 31.2k | size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1; |
276 | 31.2k | struct ht_internal_value_st *v; |
277 | | |
278 | 359k | for (i = 0; i < neighborhood_len; i++) { |
279 | 328k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]); |
280 | 1.64M | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
281 | 1.31M | if (oldmd->neighborhoods[i].entries[j].value != NULL) { |
282 | 26.1k | v = oldmd->neighborhoods[i].entries[j].value; |
283 | 26.1k | v->ht->config.ht_free_fn((HT_VALUE *)v); |
284 | 26.1k | free_value(v); |
285 | 26.1k | } |
286 | 1.31M | } |
287 | 328k | } |
288 | | |
289 | 31.2k | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
290 | 31.2k | OPENSSL_free(oldmd); |
291 | 31.2k | } |
292 | | |
293 | | static int ossl_ht_flush_internal(HT *h) |
294 | 157 | { |
295 | 157 | struct ht_mutable_data_st *newmd = NULL; |
296 | 157 | struct ht_mutable_data_st *oldmd = NULL; |
297 | | |
298 | 157 | newmd = OPENSSL_zalloc(sizeof(*newmd)); |
299 | 157 | if (newmd == NULL) |
300 | 0 | return 0; |
301 | | |
302 | 157 | newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN, |
303 | 157 | &newmd->neighborhood_ptr_to_free); |
304 | 157 | if (newmd->neighborhoods == NULL) { |
305 | 0 | OPENSSL_free(newmd); |
306 | 0 | return 0; |
307 | 0 | } |
308 | | |
309 | 157 | newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1; |
310 | | |
311 | | /* Swap the old and new mutable data sets */ |
312 | 157 | oldmd = ossl_rcu_deref(&h->md); |
313 | 157 | ossl_rcu_assign_ptr(&h->md, &newmd); |
314 | | |
315 | | /* Set the number of entries to 0 */ |
316 | 157 | h->wpd.value_count = 0; |
317 | 157 | h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN; |
318 | | |
319 | 157 | ossl_rcu_call(h->lock, free_oldmd, oldmd); |
320 | 157 | h->wpd.need_sync = 1; |
321 | 157 | return 1; |
322 | 157 | } |
323 | | |
324 | | int ossl_ht_flush(HT *h) |
325 | 4 | { |
326 | 4 | return ossl_ht_flush_internal(h); |
327 | 4 | } |
328 | | |
329 | | void ossl_ht_free(HT *h) |
330 | 154 | { |
331 | 154 | if (h == NULL) |
332 | 0 | return; |
333 | | |
334 | 154 | ossl_ht_write_lock(h); |
335 | 154 | ossl_ht_flush_internal(h); |
336 | 154 | ossl_ht_write_unlock(h); |
337 | | /* Freeing the lock does a final sync for us */ |
338 | 154 | CRYPTO_THREAD_lock_free(h->atomic_lock); |
339 | 154 | ossl_rcu_lock_free(h->lock); |
340 | 154 | OPENSSL_free(h->md->neighborhood_ptr_to_free); |
341 | 154 | OPENSSL_free(h->md); |
342 | 154 | OPENSSL_free(h); |
343 | 154 | return; |
344 | 154 | } |
345 | | |
346 | | size_t ossl_ht_count(HT *h) |
347 | 0 | { |
348 | 0 | size_t count; |
349 | |
|
350 | 0 | count = h->wpd.value_count; |
351 | 0 | return count; |
352 | 0 | } |
353 | | |
354 | | void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg), |
355 | | void *arg) |
356 | 74 | { |
357 | 74 | size_t i, j; |
358 | 74 | struct ht_mutable_data_st *md; |
359 | | |
360 | 74 | md = ossl_rcu_deref(&h->md); |
361 | 11.8k | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
362 | 11.7k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
363 | 58.9k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
364 | 47.1k | if (md->neighborhoods[i].entries[j].value != NULL) { |
365 | 459 | if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg)) |
366 | 6 | goto out; |
367 | 459 | } |
368 | 47.1k | } |
369 | 11.7k | } |
370 | 74 | out: |
371 | 74 | return; |
372 | 74 | } |
373 | | |
374 | | HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len, |
375 | | int (*filter)(HT_VALUE *obj, void *arg), |
376 | | void *arg) |
377 | 53 | { |
378 | 53 | struct ht_mutable_data_st *md; |
379 | 53 | HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST) |
380 | 53 | + (sizeof(HT_VALUE *) * max_len)); |
381 | 53 | size_t i, j; |
382 | 53 | struct ht_internal_value_st *v; |
383 | | |
384 | 53 | if (list == NULL) |
385 | 0 | return NULL; |
386 | | |
387 | | /* |
388 | | * The list array lives just beyond the end of |
389 | | * the struct |
390 | | */ |
391 | 53 | list->list = (HT_VALUE **)(list + 1); |
392 | | |
393 | 53 | md = ossl_rcu_deref(&h->md); |
394 | 7.01k | for (i = 0; i < md->neighborhood_mask + 1; i++) { |
395 | 6.96k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]); |
396 | 34.8k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
397 | 27.8k | v = md->neighborhoods[i].entries[j].value; |
398 | 27.8k | if (v != NULL && filter((HT_VALUE *)v, arg)) { |
399 | 1 | list->list[list->list_len++] = (HT_VALUE *)v; |
400 | 1 | if (list->list_len == max_len) |
401 | 1 | goto out; |
402 | 1 | } |
403 | 27.8k | } |
404 | 6.96k | } |
405 | 53 | out: |
406 | 53 | return list; |
407 | 53 | } |
408 | | |
409 | | void ossl_ht_value_list_free(HT_VALUE_LIST *list) |
410 | 53 | { |
411 | 53 | OPENSSL_free(list); |
412 | 53 | } |
413 | | |
414 | | static int compare_hash(uint64_t hash1, uint64_t hash2) |
415 | 40.0M | { |
416 | 40.0M | return (hash1 == hash2); |
417 | 40.0M | } |
418 | | |
419 | | static void free_old_neigh_table(void *arg) |
420 | 20 | { |
421 | 20 | struct ht_mutable_data_st *oldmd = arg; |
422 | | |
423 | 20 | OPENSSL_free(oldmd->neighborhood_ptr_to_free); |
424 | 20 | OPENSSL_free(oldmd); |
425 | 20 | } |
426 | | |
427 | | /* |
428 | | * Increase hash table bucket list |
429 | | * must be called with write_lock held |
430 | | */ |
431 | | static int grow_hashtable(HT *h, size_t oldsize) |
432 | 17 | { |
433 | 17 | struct ht_mutable_data_st *newmd; |
434 | 17 | struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md); |
435 | 17 | int rc = 0; |
436 | 17 | uint64_t oldi, oldj, newi, newj; |
437 | 17 | uint64_t oldhash; |
438 | 17 | struct ht_internal_value_st *oldv; |
439 | 17 | int rehashed; |
440 | 17 | size_t newsize = oldsize * 2; |
441 | | |
442 | 17 | if (h->config.lockless_reads) |
443 | 0 | goto out; |
444 | | |
445 | 17 | if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL) |
446 | 0 | goto out; |
447 | | |
448 | | /* bucket list is always a power of 2 */ |
449 | 17 | newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2, |
450 | 17 | &newmd->neighborhood_ptr_to_free); |
451 | 17 | if (newmd->neighborhoods == NULL) |
452 | 0 | goto out_free; |
453 | | |
454 | | /* being a power of 2 makes for easy mask computation */ |
455 | 17 | newmd->neighborhood_mask = (newsize - 1); |
456 | | |
457 | | /* |
458 | | * Now we need to start rehashing entries |
459 | | * Note we don't need to use atomics here as the new |
460 | | * mutable data hasn't been published |
461 | | */ |
462 | 2.51k | for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) { |
463 | 2.49k | PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]); |
464 | 12.4k | for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) { |
465 | 9.98k | oldv = oldmd->neighborhoods[oldi].entries[oldj].value; |
466 | 9.98k | if (oldv == NULL) |
467 | 9.86k | continue; |
468 | 116 | oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash; |
469 | 116 | newi = oldhash & newmd->neighborhood_mask; |
470 | 116 | rehashed = 0; |
471 | 203 | for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) { |
472 | 203 | if (newmd->neighborhoods[newi].entries[newj].value == NULL) { |
473 | 116 | newmd->neighborhoods[newi].entries[newj].value = oldv; |
474 | 116 | newmd->neighborhoods[newi].entries[newj].hash = oldhash; |
475 | 116 | rehashed = 1; |
476 | 116 | break; |
477 | 116 | } |
478 | 203 | } |
479 | 116 | if (rehashed == 0) { |
480 | | /* we ran out of space in a neighborhood, grow again */ |
481 | 0 | OPENSSL_free(newmd->neighborhoods); |
482 | 0 | OPENSSL_free(newmd); |
483 | 0 | return grow_hashtable(h, newsize); |
484 | 0 | } |
485 | 116 | } |
486 | 2.49k | } |
487 | | /* |
488 | | * Now that our entries are all hashed into the new bucket list |
489 | | * update our bucket_len and target_max_load |
490 | | */ |
491 | 17 | h->wpd.neighborhood_len = newsize; |
492 | | |
493 | | /* |
494 | | * Now we replace the old mutable data with the new |
495 | | */ |
496 | 17 | ossl_rcu_assign_ptr(&h->md, &newmd); |
497 | 17 | ossl_rcu_call(h->lock, free_old_neigh_table, oldmd); |
498 | 17 | h->wpd.need_sync = 1; |
499 | | /* |
500 | | * And we're done |
501 | | */ |
502 | 17 | rc = 1; |
503 | | |
504 | 17 | out: |
505 | 17 | return rc; |
506 | 0 | out_free: |
507 | 0 | OPENSSL_free(newmd->neighborhoods); |
508 | 0 | OPENSSL_free(newmd); |
509 | 0 | goto out; |
510 | 17 | } |
511 | | |
512 | | static void free_old_ht_value(void *arg) |
513 | 9 | { |
514 | 9 | HT_VALUE *h = (HT_VALUE *)arg; |
515 | | |
516 | | /* |
517 | | * Note, this is only called on replacement, |
518 | | * the caller is responsible for freeing the |
519 | | * held data, we just need to free the wrapping |
520 | | * struct here |
521 | | */ |
522 | 9 | OPENSSL_free(h); |
523 | 9 | } |
524 | | |
525 | | static ossl_inline int match_key(HT_KEY *a, HT_KEY *b) |
526 | 30.6M | { |
527 | | /* |
528 | | * keys match if they are both present, the same size |
529 | | * and compare equal in memory |
530 | | */ |
531 | 30.6M | PREFETCH(a->keybuf); |
532 | 30.6M | PREFETCH(b->keybuf); |
533 | 30.6M | if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize) |
534 | 30.6M | return !memcmp(a->keybuf, b->keybuf, a->keysize); |
535 | | |
536 | 2.51k | return 1; |
537 | 30.6M | } |
538 | | |
539 | | static int ossl_ht_insert_locked(HT *h, uint64_t hash, |
540 | | struct ht_internal_value_st *newval, |
541 | | HT_VALUE **olddata) |
542 | 28.2k | { |
543 | 28.2k | struct ht_mutable_data_st *md = h->md; |
544 | 28.2k | uint64_t neigh_idx_start = hash & md->neighborhood_mask; |
545 | 28.2k | uint64_t neigh_idx = neigh_idx_start; |
546 | 28.2k | size_t j; |
547 | 28.2k | uint64_t ihash; |
548 | 28.2k | HT_VALUE *ival; |
549 | 28.2k | size_t empty_idx = SIZE_MAX; |
550 | 28.2k | int lockless_reads = h->config.lockless_reads; |
551 | | |
552 | 28.5k | do { |
553 | 28.5k | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
554 | | |
555 | 43.1k | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
556 | 42.7k | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
557 | 42.7k | if (ival == NULL) { |
558 | 28.3k | empty_idx = j; |
559 | | /* lockless_reads implies no deletion, we can break out */ |
560 | 28.3k | if (lockless_reads) |
561 | 28.1k | goto not_found; |
562 | 181 | continue; |
563 | 28.3k | } |
564 | 14.4k | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
565 | 14.4k | &ihash, h->atomic_lock)) |
566 | 0 | return 0; |
567 | 14.4k | if (compare_hash(hash, ihash) && match_key(&newval->value.key, &ival->key)) { |
568 | 12 | if (olddata == NULL) { |
569 | | /* This would insert a duplicate -> fail */ |
570 | 7 | return 0; |
571 | 7 | } |
572 | | /* Do a replacement */ |
573 | 5 | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash, |
574 | 5 | hash, h->atomic_lock)) |
575 | 0 | return 0; |
576 | 5 | *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value; |
577 | 5 | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value, |
578 | 5 | &newval); |
579 | 5 | ossl_rcu_call(h->lock, free_old_ht_value, *olddata); |
580 | 5 | h->wpd.need_sync = 1; |
581 | 5 | return 1; |
582 | 5 | } |
583 | 14.4k | } |
584 | 343 | if (!lockless_reads) |
585 | 78 | break; |
586 | | /* Continue search in subsequent neighborhoods */ |
587 | 265 | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
588 | 265 | } while (neigh_idx != neigh_idx_start); |
589 | | |
590 | 28.2k | not_found: |
591 | | /* If we get to here, its just an insert */ |
592 | 28.2k | if (empty_idx == SIZE_MAX) |
593 | 17 | return -1; /* out of space */ |
594 | 28.2k | if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash, |
595 | 28.2k | hash, h->atomic_lock)) |
596 | 0 | return 0; |
597 | 28.2k | h->wpd.value_count++; |
598 | 28.2k | ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value, |
599 | 28.2k | &newval); |
600 | 28.2k | return 1; |
601 | 28.2k | } |
602 | | |
603 | | static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key, |
604 | | void *data, |
605 | | uintptr_t *type) |
606 | 40.4k | { |
607 | 40.4k | struct ht_internal_value_st *tmp; |
608 | 40.4k | size_t nvsize = sizeof(*tmp); |
609 | | |
610 | 40.4k | if (h->config.collision_check == 1) |
611 | 38.9k | nvsize += key->keysize; |
612 | | |
613 | 40.4k | tmp = OPENSSL_malloc(nvsize); |
614 | | |
615 | 40.4k | if (tmp == NULL) |
616 | 0 | return NULL; |
617 | | |
618 | 40.4k | tmp->ht = h; |
619 | 40.4k | tmp->value.value = data; |
620 | 40.4k | tmp->value.type_id = type; |
621 | 40.4k | tmp->value.key.keybuf = NULL; |
622 | 40.4k | if (h->config.collision_check) { |
623 | 38.9k | tmp->value.key.keybuf = (uint8_t *)(tmp + 1); |
624 | 38.9k | tmp->value.key.keysize = key->keysize; |
625 | 38.9k | memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize); |
626 | 38.9k | } |
627 | | |
628 | 40.4k | return tmp; |
629 | 40.4k | } |
630 | | |
631 | | static void free_value(struct ht_internal_value_st *v) |
632 | 26.1k | { |
633 | 26.1k | OPENSSL_free(v); |
634 | 26.1k | } |
635 | | |
636 | | int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata) |
637 | 40.4k | { |
638 | 40.4k | struct ht_internal_value_st *newval = NULL; |
639 | 40.4k | uint64_t hash; |
640 | 40.4k | int rc = 0; |
641 | 40.4k | int i; |
642 | | |
643 | 40.4k | if (data->value == NULL) |
644 | 0 | goto out; |
645 | | |
646 | 40.4k | newval = alloc_new_value(h, key, data->value, data->type_id); |
647 | 40.4k | if (newval == NULL) |
648 | 0 | goto out; |
649 | | |
650 | | /* |
651 | | * we have to take our lock here to prevent other changes |
652 | | * to the bucket list |
653 | | */ |
654 | 40.4k | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
655 | | |
656 | 40.4k | for (i = 0; |
657 | 40.4k | (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1 |
658 | 20 | && i < 4; |
659 | 40.4k | ++i) |
660 | 20 | if (!grow_hashtable(h, h->wpd.neighborhood_len)) { |
661 | 0 | rc = -1; |
662 | 0 | break; |
663 | 0 | } |
664 | | |
665 | 40.4k | if (rc <= 0) |
666 | 11 | free_value(newval); |
667 | | |
668 | 40.4k | out: |
669 | 40.4k | return rc; |
670 | 40.4k | } |
671 | | |
672 | | HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key) |
673 | 24.6M | { |
674 | 24.6M | struct ht_mutable_data_st *md; |
675 | 24.6M | uint64_t hash; |
676 | 24.6M | uint64_t neigh_idx_start; |
677 | 24.6M | uint64_t neigh_idx; |
678 | 24.6M | struct ht_internal_value_st *ival = NULL; |
679 | 24.6M | size_t j; |
680 | 24.6M | uint64_t ehash; |
681 | 24.6M | int lockless_reads = h->config.lockless_reads; |
682 | | |
683 | 24.6M | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
684 | | |
685 | 24.6M | md = ossl_rcu_deref(&h->md); |
686 | 24.6M | neigh_idx = neigh_idx_start = hash & md->neighborhood_mask; |
687 | 24.6M | do { |
688 | 24.6M | PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]); |
689 | 31.6M | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
690 | 31.6M | ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value); |
691 | 31.6M | if (ival == NULL) { |
692 | 2.27M | if (lockless_reads) |
693 | | /* lockless_reads implies no deletion, we can break out */ |
694 | 2.27M | return NULL; |
695 | 98 | continue; |
696 | 2.27M | } |
697 | 29.3M | if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash, |
698 | 29.3M | &ehash, h->atomic_lock)) |
699 | 0 | return NULL; |
700 | 29.3M | if (compare_hash(hash, ehash) && match_key(&ival->value.key, key)) |
701 | 22.4M | return (HT_VALUE *)ival; |
702 | 29.3M | } |
703 | 4.44k | if (!lockless_reads) |
704 | 41 | break; |
705 | | /* Continue search in subsequent neighborhoods */ |
706 | 4.39k | neigh_idx = (neigh_idx + 1) & md->neighborhood_mask; |
707 | 4.39k | } while (neigh_idx != neigh_idx_start); |
708 | | |
709 | 41 | return NULL; |
710 | 24.6M | } |
711 | | |
712 | | static void free_old_entry(void *arg) |
713 | 13 | { |
714 | 13 | struct ht_internal_value_st *v = arg; |
715 | | |
716 | 13 | v->ht->config.ht_free_fn((HT_VALUE *)v); |
717 | 13 | free_value(v); |
718 | 13 | } |
719 | | |
720 | | int ossl_ht_delete(HT *h, HT_KEY *key) |
721 | 41 | { |
722 | 41 | uint64_t hash; |
723 | 41 | uint64_t neigh_idx; |
724 | 41 | size_t j; |
725 | 41 | struct ht_internal_value_st *v = NULL; |
726 | 41 | HT_VALUE *nv = NULL; |
727 | 41 | int rc = 0; |
728 | | |
729 | 41 | if (h->config.lockless_reads) |
730 | 0 | return 0; |
731 | | |
732 | 41 | hash = h->config.ht_hash_fn(key->keybuf, key->keysize); |
733 | | |
734 | 41 | neigh_idx = hash & h->md->neighborhood_mask; |
735 | 41 | PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]); |
736 | 184 | for (j = 0; j < NEIGHBORHOOD_LEN; j++) { |
737 | 152 | v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value; |
738 | 152 | if (v == NULL) |
739 | 78 | continue; |
740 | 74 | if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash) |
741 | 9 | && match_key(key, &v->value.key)) { |
742 | 9 | if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash, |
743 | 9 | 0, h->atomic_lock)) |
744 | 0 | break; |
745 | 9 | h->wpd.value_count--; |
746 | 9 | ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value, |
747 | 9 | &nv); |
748 | 9 | rc = 1; |
749 | 9 | break; |
750 | 9 | } |
751 | 74 | } |
752 | 41 | if (rc == 1) { |
753 | 9 | ossl_rcu_call(h->lock, free_old_entry, v); |
754 | 9 | h->wpd.need_sync = 1; |
755 | 9 | } |
756 | 41 | return rc; |
757 | 41 | } |