/src/openssl33/crypto/threads_pthread.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | /* We need to use the OPENSSL_fork_*() deprecated APIs */ |
11 | | #define OPENSSL_SUPPRESS_DEPRECATED |
12 | | |
13 | | #include <openssl/crypto.h> |
14 | | #include <crypto/cryptlib.h> |
15 | | #include "internal/cryptlib.h" |
16 | | #include "internal/rcu.h" |
17 | | #include "rcu_internal.h" |
18 | | |
19 | | #if defined(__sun) |
20 | | #include <atomic.h> |
21 | | #endif |
22 | | |
23 | | #if defined(__apple_build_version__) && __apple_build_version__ < 6000000 |
24 | | /* |
25 | | * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and |
26 | | * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free() |
27 | | * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))). |
28 | | * All of this makes impossible to use __atomic_is_lock_free here. |
29 | | * |
30 | | * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760 |
31 | | */ |
32 | | #define BROKEN_CLANG_ATOMICS |
33 | | #endif |
34 | | |
35 | | #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS) |
36 | | |
37 | | #if defined(OPENSSL_SYS_UNIX) |
38 | | #include <sys/types.h> |
39 | | #include <unistd.h> |
40 | | #endif |
41 | | |
42 | | #include <assert.h> |
43 | | |
44 | | /* |
45 | | * The Non-Stop KLT thread model currently seems broken in its rwlock |
46 | | * implementation |
47 | | * Likewise is there a problem with the glibc implementation on riscv. |
48 | | */ |
49 | | #if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \ |
50 | | && !defined(__riscv) |
51 | | #define USE_RWLOCK |
52 | | #endif |
53 | | |
54 | | /* |
55 | | * For all GNU/clang atomic builtins, we also need fallbacks, to cover all |
56 | | * other compilers. |
57 | | |
58 | | * Unfortunately, we can't do that with some "generic type", because there's no |
59 | | * guarantee that the chosen generic type is large enough to cover all cases. |
60 | | * Therefore, we implement fallbacks for each applicable type, with composed |
61 | | * names that include the type they handle. |
62 | | * |
63 | | * (an anecdote: we previously tried to use |void *| as the generic type, with |
64 | | * the thought that the pointer itself is the largest type. However, this is |
65 | | * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large) |
66 | | * |
67 | | * All applicable ATOMIC_ macros take the intended type as first parameter, so |
68 | | * they can map to the correct fallback function. In the GNU/clang case, that |
69 | | * parameter is simply ignored. |
70 | | */ |
71 | | |
72 | | /* |
73 | | * Internal types used with the ATOMIC_ macros, to make it possible to compose |
74 | | * fallback function names. |
75 | | */ |
76 | | typedef void *pvoid; |
77 | | |
78 | | #if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \ |
79 | | && !defined(USE_ATOMIC_FALLBACKS) |
80 | 75.7M | #define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o) |
81 | 918 | #define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o) |
82 | 40.2k | #define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o) |
83 | 989 | #define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o) |
84 | 71 | #define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o) |
85 | | #else |
86 | | static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER; |
87 | | |
88 | | #define IMPL_fallback_atomic_load_n(t) \ |
89 | | static ossl_inline t fallback_atomic_load_n_##t(t *p) \ |
90 | | { \ |
91 | | t ret; \ |
92 | | \ |
93 | | pthread_mutex_lock(&atomic_sim_lock); \ |
94 | | ret = *p; \ |
95 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
96 | | return ret; \ |
97 | | } |
98 | | IMPL_fallback_atomic_load_n(uint32_t) |
99 | | IMPL_fallback_atomic_load_n(uint64_t) |
100 | | IMPL_fallback_atomic_load_n(pvoid) |
101 | | |
102 | | #define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p) |
103 | | |
104 | | #define IMPL_fallback_atomic_store_n(t) \ |
105 | | static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \ |
106 | | { \ |
107 | | t ret; \ |
108 | | \ |
109 | | pthread_mutex_lock(&atomic_sim_lock); \ |
110 | | ret = *p; \ |
111 | | *p = v; \ |
112 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
113 | | return ret; \ |
114 | | } |
115 | | IMPL_fallback_atomic_store_n(uint32_t) |
116 | | |
117 | | #define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v) |
118 | | |
119 | | #define IMPL_fallback_atomic_store(t) \ |
120 | | static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \ |
121 | | { \ |
122 | | pthread_mutex_lock(&atomic_sim_lock); \ |
123 | | *p = *v; \ |
124 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
125 | | } |
126 | | IMPL_fallback_atomic_store(pvoid) |
127 | | |
128 | | #define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v) |
129 | | |
130 | | /* |
131 | | * The fallbacks that follow don't need any per type implementation, as |
132 | | * they are designed for uint64_t only. If there comes a time when multiple |
133 | | * types need to be covered, it's relatively easy to refactor them the same |
134 | | * way as the fallbacks above. |
135 | | */ |
136 | | |
137 | | static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v) |
138 | | { |
139 | | uint64_t ret; |
140 | | |
141 | | pthread_mutex_lock(&atomic_sim_lock); |
142 | | *p += v; |
143 | | ret = *p; |
144 | | pthread_mutex_unlock(&atomic_sim_lock); |
145 | | return ret; |
146 | | } |
147 | | |
148 | | #define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v) |
149 | | |
150 | | static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v) |
151 | | { |
152 | | uint64_t ret; |
153 | | |
154 | | pthread_mutex_lock(&atomic_sim_lock); |
155 | | *p -= v; |
156 | | ret = *p; |
157 | | pthread_mutex_unlock(&atomic_sim_lock); |
158 | | return ret; |
159 | | } |
160 | | |
161 | | #define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v) |
162 | | #endif |
163 | | |
164 | | /* |
165 | | * This is the core of an rcu lock. It tracks the readers and writers for the |
166 | | * current quiescence point for a given lock. Users is the 64 bit value that |
167 | | * stores the READERS/ID as defined above |
168 | | * |
169 | | */ |
170 | | struct rcu_qp { |
171 | | uint64_t users; |
172 | | }; |
173 | | |
174 | | struct thread_qp { |
175 | | struct rcu_qp *qp; |
176 | | unsigned int depth; |
177 | | CRYPTO_RCU_LOCK *lock; |
178 | | }; |
179 | | |
180 | 434 | #define MAX_QPS 10 |
181 | | /* |
182 | | * This is the per thread tracking data |
183 | | * that is assigned to each thread participating |
184 | | * in an rcu qp |
185 | | * |
186 | | * qp points to the qp that it last acquired |
187 | | * |
188 | | */ |
189 | | struct rcu_thr_data { |
190 | | struct thread_qp thread_qps[MAX_QPS]; |
191 | | }; |
192 | | |
193 | | /* |
194 | | * This is the internal version of a CRYPTO_RCU_LOCK |
195 | | * it is cast from CRYPTO_RCU_LOCK |
196 | | */ |
197 | | struct rcu_lock_st { |
198 | | /* Callbacks to call for next ossl_synchronize_rcu */ |
199 | | struct rcu_cb_item *cb_items; |
200 | | |
201 | | /* The context we are being created against */ |
202 | | OSSL_LIB_CTX *ctx; |
203 | | |
204 | | /* Array of quiescent points for synchronization */ |
205 | | struct rcu_qp *qp_group; |
206 | | |
207 | | /* rcu generation counter for in-order retirement */ |
208 | | uint32_t id_ctr; |
209 | | |
210 | | /* Number of elements in qp_group array */ |
211 | | uint32_t group_count; |
212 | | |
213 | | /* Index of the current qp in the qp_group array */ |
214 | | uint32_t reader_idx; |
215 | | |
216 | | /* value of the next id_ctr value to be retired */ |
217 | | uint32_t next_to_retire; |
218 | | |
219 | | /* index of the next free rcu_qp in the qp_group */ |
220 | | uint32_t current_alloc_idx; |
221 | | |
222 | | /* number of qp's in qp_group array currently being retired */ |
223 | | uint32_t writers_alloced; |
224 | | |
225 | | /* lock protecting write side operations */ |
226 | | pthread_mutex_t write_lock; |
227 | | |
228 | | /* lock protecting updates to writers_alloced/current_alloc_idx */ |
229 | | pthread_mutex_t alloc_lock; |
230 | | |
231 | | /* signal to wake threads waiting on alloc_lock */ |
232 | | pthread_cond_t alloc_signal; |
233 | | |
234 | | /* lock to enforce in-order retirement */ |
235 | | pthread_mutex_t prior_lock; |
236 | | |
237 | | /* signal to wake threads waiting on prior_lock */ |
238 | | pthread_cond_t prior_signal; |
239 | | }; |
240 | | |
241 | | /* Read side acquisition of the current qp */ |
242 | | static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock) |
243 | 71 | { |
244 | 71 | uint32_t qp_idx; |
245 | | |
246 | | /* get the current qp index */ |
247 | 71 | for (;;) { |
248 | 71 | qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED); |
249 | | |
250 | | /* |
251 | | * Notes on use of __ATOMIC_ACQUIRE |
252 | | * We need to ensure the following: |
253 | | * 1) That subsequent operations aren't optimized by hoisting them above |
254 | | * this operation. Specifically, we don't want the below re-load of |
255 | | * qp_idx to get optimized away |
256 | | * 2) We want to ensure that any updating of reader_idx on the write side |
257 | | * of the lock is flushed from a local cpu cache so that we see any |
258 | | * updates prior to the load. This is a non-issue on cache coherent |
259 | | * systems like x86, but is relevant on other arches |
260 | | */ |
261 | 71 | ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1, |
262 | 71 | __ATOMIC_ACQUIRE); |
263 | | |
264 | | /* if the idx hasn't changed, we're good, else try again */ |
265 | 71 | if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE)) |
266 | 71 | break; |
267 | | |
268 | 0 | ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1, |
269 | 0 | __ATOMIC_RELAXED); |
270 | 0 | } |
271 | | |
272 | 71 | return &lock->qp_group[qp_idx]; |
273 | 71 | } |
274 | | |
275 | | static void ossl_rcu_free_local_data(void *arg) |
276 | 3 | { |
277 | 3 | OSSL_LIB_CTX *ctx = arg; |
278 | 3 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx); |
279 | 3 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey); |
280 | | |
281 | 3 | OPENSSL_free(data); |
282 | 3 | CRYPTO_THREAD_set_local(lkey, NULL); |
283 | 3 | } |
284 | | |
285 | | void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock) |
286 | 33 | { |
287 | 33 | struct rcu_thr_data *data; |
288 | 33 | int i, available_qp = -1; |
289 | 33 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx); |
290 | | |
291 | | /* |
292 | | * we're going to access current_qp here so ask the |
293 | | * processor to fetch it |
294 | | */ |
295 | 33 | data = CRYPTO_THREAD_get_local(lkey); |
296 | | |
297 | 33 | if (data == NULL) { |
298 | 2 | data = OPENSSL_zalloc(sizeof(*data)); |
299 | 2 | OPENSSL_assert(data != NULL); |
300 | 2 | CRYPTO_THREAD_set_local(lkey, data); |
301 | 2 | ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data); |
302 | 2 | } |
303 | | |
304 | 363 | for (i = 0; i < MAX_QPS; i++) { |
305 | 330 | if (data->thread_qps[i].qp == NULL && available_qp == -1) |
306 | 33 | available_qp = i; |
307 | | /* If we have a hold on this lock already, we're good */ |
308 | 330 | if (data->thread_qps[i].lock == lock) { |
309 | 0 | data->thread_qps[i].depth++; |
310 | 0 | return; |
311 | 0 | } |
312 | 330 | } |
313 | | |
314 | | /* |
315 | | * if we get here, then we don't have a hold on this lock yet |
316 | | */ |
317 | 33 | assert(available_qp != -1); |
318 | | |
319 | 33 | data->thread_qps[available_qp].qp = get_hold_current_qp(lock); |
320 | 33 | data->thread_qps[available_qp].depth = 1; |
321 | 33 | data->thread_qps[available_qp].lock = lock; |
322 | 33 | } |
323 | | |
324 | | void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock) |
325 | 71 | { |
326 | 71 | int i; |
327 | 71 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx); |
328 | 71 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey); |
329 | 71 | uint64_t ret; |
330 | | |
331 | 71 | assert(data != NULL); |
332 | | |
333 | 71 | for (i = 0; i < MAX_QPS; i++) { |
334 | 71 | if (data->thread_qps[i].lock == lock) { |
335 | | /* |
336 | | * we have to use __ATOMIC_RELEASE here |
337 | | * to ensure that all preceding read instructions complete |
338 | | * before the decrement is visible to ossl_synchronize_rcu |
339 | | */ |
340 | 71 | data->thread_qps[i].depth--; |
341 | 71 | if (data->thread_qps[i].depth == 0) { |
342 | 71 | ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, |
343 | 71 | (uint64_t)1, __ATOMIC_RELEASE); |
344 | 71 | OPENSSL_assert(ret != UINT64_MAX); |
345 | 71 | data->thread_qps[i].qp = NULL; |
346 | 71 | data->thread_qps[i].lock = NULL; |
347 | 71 | } |
348 | 71 | return; |
349 | 71 | } |
350 | 71 | } |
351 | | /* |
352 | | * If we get here, we're trying to unlock a lock that we never acquired - |
353 | | * that's fatal. |
354 | | */ |
355 | 71 | assert(0); |
356 | 0 | } |
357 | | |
358 | | /* |
359 | | * Write side allocation routine to get the current qp |
360 | | * and replace it with a new one |
361 | | */ |
362 | | static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id) |
363 | 918 | { |
364 | 918 | uint32_t current_idx; |
365 | | |
366 | 918 | pthread_mutex_lock(&lock->alloc_lock); |
367 | | |
368 | | /* |
369 | | * we need at least one qp to be available with one |
370 | | * left over, so that readers can start working on |
371 | | * one that isn't yet being waited on |
372 | | */ |
373 | 918 | while (lock->group_count - lock->writers_alloced < 2) |
374 | | /* we have to wait for one to be free */ |
375 | 0 | pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock); |
376 | | |
377 | 918 | current_idx = lock->current_alloc_idx; |
378 | | |
379 | | /* Allocate the qp */ |
380 | 918 | lock->writers_alloced++; |
381 | | |
382 | | /* increment the allocation index */ |
383 | 918 | lock->current_alloc_idx = (lock->current_alloc_idx + 1) % lock->group_count; |
384 | | |
385 | 918 | *curr_id = lock->id_ctr; |
386 | 918 | lock->id_ctr++; |
387 | | |
388 | | /* |
389 | | * make the current state of everything visible by this release |
390 | | * when get_hold_current_qp acquires the next qp |
391 | | */ |
392 | 918 | ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx, |
393 | 918 | __ATOMIC_RELEASE); |
394 | | |
395 | | /* |
396 | | * this should make sure that the new value of reader_idx is visible in |
397 | | * get_hold_current_qp, directly after incrementing the users count |
398 | | */ |
399 | 918 | ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0, |
400 | 918 | __ATOMIC_RELEASE); |
401 | | |
402 | | /* wake up any waiters */ |
403 | 918 | pthread_cond_signal(&lock->alloc_signal); |
404 | 918 | pthread_mutex_unlock(&lock->alloc_lock); |
405 | 918 | return &lock->qp_group[current_idx]; |
406 | 918 | } |
407 | | |
408 | | static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp) |
409 | 918 | { |
410 | 918 | pthread_mutex_lock(&lock->alloc_lock); |
411 | 918 | lock->writers_alloced--; |
412 | 918 | pthread_cond_signal(&lock->alloc_signal); |
413 | 918 | pthread_mutex_unlock(&lock->alloc_lock); |
414 | 918 | } |
415 | | |
416 | | static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock, |
417 | | uint32_t count) |
418 | 518 | { |
419 | 518 | struct rcu_qp *new = OPENSSL_zalloc(sizeof(*new) * count); |
420 | | |
421 | 518 | lock->group_count = count; |
422 | 518 | return new; |
423 | 518 | } |
424 | | |
425 | | void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock) |
426 | 702 | { |
427 | 702 | pthread_mutex_lock(&lock->write_lock); |
428 | 702 | } |
429 | | |
430 | | void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock) |
431 | 702 | { |
432 | 702 | pthread_mutex_unlock(&lock->write_lock); |
433 | 702 | } |
434 | | |
435 | | void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock) |
436 | 918 | { |
437 | 918 | struct rcu_qp *qp; |
438 | 918 | uint64_t count; |
439 | 918 | uint32_t curr_id; |
440 | 918 | struct rcu_cb_item *cb_items, *tmpcb; |
441 | | |
442 | 918 | pthread_mutex_lock(&lock->write_lock); |
443 | 918 | cb_items = lock->cb_items; |
444 | 918 | lock->cb_items = NULL; |
445 | 918 | pthread_mutex_unlock(&lock->write_lock); |
446 | | |
447 | 918 | qp = update_qp(lock, &curr_id); |
448 | | |
449 | | /* retire in order */ |
450 | 918 | pthread_mutex_lock(&lock->prior_lock); |
451 | 918 | while (lock->next_to_retire != curr_id) |
452 | 0 | pthread_cond_wait(&lock->prior_signal, &lock->prior_lock); |
453 | | |
454 | | /* |
455 | | * wait for the reader count to reach zero |
456 | | * Note the use of __ATOMIC_ACQUIRE here to ensure that any |
457 | | * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock |
458 | | * is visible prior to our read |
459 | | * however this is likely just necessary to silence a tsan warning |
460 | | * because the read side should not do any write operation |
461 | | * outside the atomic itself |
462 | | */ |
463 | 918 | do { |
464 | 918 | count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE); |
465 | 918 | } while (count != (uint64_t)0); |
466 | | |
467 | 918 | lock->next_to_retire++; |
468 | 918 | pthread_cond_broadcast(&lock->prior_signal); |
469 | 918 | pthread_mutex_unlock(&lock->prior_lock); |
470 | | |
471 | 918 | retire_qp(lock, qp); |
472 | | |
473 | | /* handle any callbacks that we have */ |
474 | 1.10k | while (cb_items != NULL) { |
475 | 189 | tmpcb = cb_items; |
476 | 189 | cb_items = cb_items->next; |
477 | 189 | tmpcb->fn(tmpcb->data); |
478 | 189 | OPENSSL_free(tmpcb); |
479 | 189 | } |
480 | 918 | } |
481 | | |
482 | | /* |
483 | | * Note: This call assumes its made under the protection of |
484 | | * ossl_rcu_write_lock |
485 | | */ |
486 | | int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data) |
487 | 189 | { |
488 | 189 | struct rcu_cb_item *new = OPENSSL_zalloc(sizeof(*new)); |
489 | | |
490 | 189 | if (new == NULL) |
491 | 0 | return 0; |
492 | | |
493 | 189 | new->data = data; |
494 | 189 | new->fn = cb; |
495 | | |
496 | 189 | new->next = lock->cb_items; |
497 | 189 | lock->cb_items = new; |
498 | | |
499 | 189 | return 1; |
500 | 189 | } |
501 | | |
502 | | void *ossl_rcu_uptr_deref(void **p) |
503 | 75.7M | { |
504 | 75.7M | return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE); |
505 | 75.7M | } |
506 | | |
507 | | void ossl_rcu_assign_uptr(void **p, void **v) |
508 | 40.2k | { |
509 | 40.2k | ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE); |
510 | 40.2k | } |
511 | | |
512 | | CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx) |
513 | 518 | { |
514 | 518 | struct rcu_lock_st *new; |
515 | | |
516 | | /* |
517 | | * We need a minimum of 2 qp's |
518 | | */ |
519 | 518 | if (num_writers < 2) |
520 | 518 | num_writers = 2; |
521 | | |
522 | 518 | ctx = ossl_lib_ctx_get_concrete(ctx); |
523 | 518 | if (ctx == NULL) |
524 | 0 | return 0; |
525 | | |
526 | 518 | new = OPENSSL_zalloc(sizeof(*new)); |
527 | 518 | if (new == NULL) |
528 | 0 | return NULL; |
529 | | |
530 | 518 | new->ctx = ctx; |
531 | 518 | pthread_mutex_init(&new->write_lock, NULL); |
532 | 518 | pthread_mutex_init(&new->prior_lock, NULL); |
533 | 518 | pthread_mutex_init(&new->alloc_lock, NULL); |
534 | 518 | pthread_cond_init(&new->prior_signal, NULL); |
535 | 518 | pthread_cond_init(&new->alloc_signal, NULL); |
536 | | |
537 | 518 | new->qp_group = allocate_new_qp_group(new, num_writers); |
538 | 518 | if (new->qp_group == NULL) { |
539 | 0 | OPENSSL_free(new); |
540 | 0 | new = NULL; |
541 | 0 | } |
542 | | |
543 | 518 | return new; |
544 | 518 | } |
545 | | |
546 | | void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock) |
547 | 350 | { |
548 | 350 | struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock; |
549 | | |
550 | 350 | if (lock == NULL) |
551 | 0 | return; |
552 | | |
553 | | /* make sure we're synchronized */ |
554 | 350 | ossl_synchronize_rcu(rlock); |
555 | | |
556 | 350 | OPENSSL_free(rlock->qp_group); |
557 | | /* There should only be a single qp left now */ |
558 | 350 | OPENSSL_free(rlock); |
559 | 350 | } |
560 | | |
561 | | CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void) |
562 | 10.5M | { |
563 | 10.5M | #ifdef USE_RWLOCK |
564 | 10.5M | CRYPTO_RWLOCK *lock; |
565 | | |
566 | 10.5M | if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL) |
567 | | /* Don't set error, to avoid recursion blowup. */ |
568 | 0 | return NULL; |
569 | | |
570 | 10.5M | if (pthread_rwlock_init(lock, NULL) != 0) { |
571 | 0 | OPENSSL_free(lock); |
572 | 0 | return NULL; |
573 | 0 | } |
574 | | #else |
575 | | pthread_mutexattr_t attr; |
576 | | CRYPTO_RWLOCK *lock; |
577 | | |
578 | | if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL) |
579 | | /* Don't set error, to avoid recursion blowup. */ |
580 | | return NULL; |
581 | | |
582 | | /* |
583 | | * We don't use recursive mutexes, but try to catch errors if we do. |
584 | | */ |
585 | | pthread_mutexattr_init(&attr); |
586 | | #if !defined(__TANDEM) && !defined(_SPT_MODEL_) |
587 | | #if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK) |
588 | | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); |
589 | | #endif |
590 | | #else |
591 | | /* The SPT Thread Library does not define MUTEX attributes. */ |
592 | | #endif |
593 | | |
594 | | if (pthread_mutex_init(lock, &attr) != 0) { |
595 | | pthread_mutexattr_destroy(&attr); |
596 | | OPENSSL_free(lock); |
597 | | return NULL; |
598 | | } |
599 | | |
600 | | pthread_mutexattr_destroy(&attr); |
601 | | #endif |
602 | | |
603 | 10.5M | return lock; |
604 | 10.5M | } |
605 | | |
606 | | __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock) |
607 | 1.08G | { |
608 | 1.08G | #ifdef USE_RWLOCK |
609 | 1.08G | if (pthread_rwlock_rdlock(lock) != 0) |
610 | 0 | return 0; |
611 | | #else |
612 | | if (pthread_mutex_lock(lock) != 0) { |
613 | | assert(errno != EDEADLK && errno != EBUSY); |
614 | | return 0; |
615 | | } |
616 | | #endif |
617 | | |
618 | 1.08G | return 1; |
619 | 1.08G | } |
620 | | |
621 | | __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock) |
622 | 53.9M | { |
623 | 53.9M | #ifdef USE_RWLOCK |
624 | 53.9M | if (pthread_rwlock_wrlock(lock) != 0) |
625 | 0 | return 0; |
626 | | #else |
627 | | if (pthread_mutex_lock(lock) != 0) { |
628 | | assert(errno != EDEADLK && errno != EBUSY); |
629 | | return 0; |
630 | | } |
631 | | #endif |
632 | | |
633 | 53.9M | return 1; |
634 | 53.9M | } |
635 | | |
636 | | int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock) |
637 | 1.27G | { |
638 | 1.27G | #ifdef USE_RWLOCK |
639 | 1.27G | if (pthread_rwlock_unlock(lock) != 0) |
640 | 0 | return 0; |
641 | | #else |
642 | | if (pthread_mutex_unlock(lock) != 0) { |
643 | | assert(errno != EPERM); |
644 | | return 0; |
645 | | } |
646 | | #endif |
647 | | |
648 | 1.27G | return 1; |
649 | 1.27G | } |
650 | | |
651 | | void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock) |
652 | 10.5M | { |
653 | 10.5M | if (lock == NULL) |
654 | 2.41k | return; |
655 | | |
656 | 10.5M | #ifdef USE_RWLOCK |
657 | 10.5M | pthread_rwlock_destroy(lock); |
658 | | #else |
659 | | pthread_mutex_destroy(lock); |
660 | | #endif |
661 | 10.5M | OPENSSL_free(lock); |
662 | | |
663 | 10.5M | return; |
664 | 10.5M | } |
665 | | |
666 | | int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void)) |
667 | 2.39G | { |
668 | 2.39G | if (pthread_once(once, init) != 0) |
669 | 0 | return 0; |
670 | | |
671 | 2.39G | return 1; |
672 | 2.39G | } |
673 | | |
674 | | int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *)) |
675 | 1.54k | { |
676 | | |
677 | 1.54k | #ifndef FIPS_MODULE |
678 | 1.54k | if (!ossl_init_thread()) |
679 | 0 | return 0; |
680 | 1.54k | #endif |
681 | | |
682 | 1.54k | if (pthread_key_create(key, cleanup) != 0) |
683 | 0 | return 0; |
684 | | |
685 | 1.54k | return 1; |
686 | 1.54k | } |
687 | | |
688 | | void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key) |
689 | 1.92G | { |
690 | 1.92G | return pthread_getspecific(*key); |
691 | 1.92G | } |
692 | | |
693 | | int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val) |
694 | 1.79k | { |
695 | 1.79k | if (pthread_setspecific(*key, val) != 0) |
696 | 0 | return 0; |
697 | | |
698 | 1.79k | return 1; |
699 | 1.79k | } |
700 | | |
701 | | int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key) |
702 | 1.38k | { |
703 | 1.38k | if (pthread_key_delete(*key) != 0) |
704 | 0 | return 0; |
705 | | |
706 | 1.38k | return 1; |
707 | 1.38k | } |
708 | | |
709 | | CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void) |
710 | 203k | { |
711 | 203k | return pthread_self(); |
712 | 203k | } |
713 | | |
714 | | int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b) |
715 | 12.4k | { |
716 | 12.4k | return pthread_equal(a, b); |
717 | 12.4k | } |
718 | | |
719 | | int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock) |
720 | 11.8M | { |
721 | 11.8M | #if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
722 | 11.8M | if (__atomic_is_lock_free(sizeof(*val), val)) { |
723 | 11.8M | *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL); |
724 | 11.8M | return 1; |
725 | 11.8M | } |
726 | | #elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
727 | | /* This will work for all future Solaris versions. */ |
728 | | if (ret != NULL) { |
729 | | *ret = atomic_add_int_nv((volatile unsigned int *)val, amount); |
730 | | return 1; |
731 | | } |
732 | | #endif |
733 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
734 | 0 | return 0; |
735 | | |
736 | 0 | *val += amount; |
737 | 0 | *ret = *val; |
738 | |
|
739 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
740 | 0 | return 0; |
741 | | |
742 | 0 | return 1; |
743 | 0 | } |
744 | | |
745 | | int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret, |
746 | | CRYPTO_RWLOCK *lock) |
747 | 716 | { |
748 | 716 | #if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
749 | 716 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
750 | 716 | *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL); |
751 | 716 | return 1; |
752 | 716 | } |
753 | | #elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
754 | | /* This will work for all future Solaris versions. */ |
755 | | if (ret != NULL) { |
756 | | *ret = atomic_or_64_nv(val, op); |
757 | | return 1; |
758 | | } |
759 | | #endif |
760 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
761 | 0 | return 0; |
762 | 0 | *val |= op; |
763 | 0 | *ret = *val; |
764 | |
|
765 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
766 | 0 | return 0; |
767 | | |
768 | 0 | return 1; |
769 | 0 | } |
770 | | |
771 | | int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock) |
772 | 2.16G | { |
773 | 2.16G | #if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
774 | 2.16G | if (__atomic_is_lock_free(sizeof(*val), val)) { |
775 | 2.16G | __atomic_load(val, ret, __ATOMIC_ACQUIRE); |
776 | 2.16G | return 1; |
777 | 2.16G | } |
778 | | #elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
779 | | /* This will work for all future Solaris versions. */ |
780 | | if (ret != NULL) { |
781 | | *ret = atomic_or_64_nv(val, 0); |
782 | | return 1; |
783 | | } |
784 | | #endif |
785 | 0 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) |
786 | 0 | return 0; |
787 | 0 | *ret = *val; |
788 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
789 | 0 | return 0; |
790 | | |
791 | 0 | return 1; |
792 | 0 | } |
793 | | |
794 | | int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock) |
795 | 0 | { |
796 | 0 | #if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
797 | 0 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
798 | 0 | __atomic_load(val, ret, __ATOMIC_ACQUIRE); |
799 | 0 | return 1; |
800 | 0 | } |
801 | | #elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
802 | | /* This will work for all future Solaris versions. */ |
803 | | if (ret != NULL) { |
804 | | *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0); |
805 | | return 1; |
806 | | } |
807 | | #endif |
808 | 0 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) |
809 | 0 | return 0; |
810 | 0 | *ret = *val; |
811 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
812 | 0 | return 0; |
813 | | |
814 | 0 | return 1; |
815 | 0 | } |
816 | | |
817 | | #ifndef FIPS_MODULE |
818 | | int openssl_init_fork_handlers(void) |
819 | 0 | { |
820 | 0 | return 1; |
821 | 0 | } |
822 | | #endif /* FIPS_MODULE */ |
823 | | |
824 | | int openssl_get_fork_id(void) |
825 | 134k | { |
826 | 134k | return getpid(); |
827 | 134k | } |
828 | | #endif |