/src/openssl/crypto/threads_pthread.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved. |
3 | | * |
4 | | * Licensed under the Apache License 2.0 (the "License"). You may not use |
5 | | * this file except in compliance with the License. You can obtain a copy |
6 | | * in the file LICENSE in the source distribution or at |
7 | | * https://www.openssl.org/source/license.html |
8 | | */ |
9 | | |
10 | | /* We need to use the OPENSSL_fork_*() deprecated APIs */ |
11 | | #define OPENSSL_SUPPRESS_DEPRECATED |
12 | | |
13 | | #if !defined(__GNUC__) || !defined(__ATOMIC_ACQ_REL) || \ |
14 | | defined(BROKEN_CLANG_ATOMICS) || defined(OPENSSL_NO_STDIO) |
15 | | /* |
16 | | * we only enable REPORT_RWLOCK_CONTENTION on clang/gcc when we have |
17 | | * atomics available. We do this because we need to use an atomic to track |
18 | | * when we can close the log file. We could use the CRYPTO_atomic_ api |
19 | | * but that requires lock creation which gets us into a bad recursive loop |
20 | | * when we try to initialize the file pointer |
21 | | */ |
22 | | # ifdef REPORT_RWLOCK_CONTENTION |
23 | | # warning "RWLOCK CONTENTION REPORTING NOT SUPPORTED, Disabling" |
24 | | # undef REPORT_RWLOCK_CONTENTION |
25 | | # endif |
26 | | #endif |
27 | | |
28 | | #ifdef REPORT_RWLOCK_CONTENTION |
29 | | # define _GNU_SOURCE |
30 | | # include <execinfo.h> |
31 | | # include <unistd.h> |
32 | | #endif |
33 | | |
34 | | #include <openssl/crypto.h> |
35 | | #include <crypto/cryptlib.h> |
36 | | #include <crypto/sparse_array.h> |
37 | | #include "internal/cryptlib.h" |
38 | | #include "internal/threads_common.h" |
39 | | #include "internal/rcu.h" |
40 | | #ifdef REPORT_RWLOCK_CONTENTION |
41 | | # include "internal/time.h" |
42 | | #endif |
43 | | #include "rcu_internal.h" |
44 | | |
45 | | #if defined(__clang__) && defined(__has_feature) |
46 | | # if __has_feature(thread_sanitizer) |
47 | | # define __SANITIZE_THREAD__ |
48 | | # endif |
49 | | #endif |
50 | | |
51 | | #if defined(__SANITIZE_THREAD__) |
52 | | # include <sanitizer/tsan_interface.h> |
53 | | # define TSAN_FAKE_UNLOCK(x) __tsan_mutex_pre_unlock((x), 0); \ |
54 | | __tsan_mutex_post_unlock((x), 0) |
55 | | |
56 | | # define TSAN_FAKE_LOCK(x) __tsan_mutex_pre_lock((x), 0); \ |
57 | | __tsan_mutex_post_lock((x), 0, 0) |
58 | | #else |
59 | | # define TSAN_FAKE_UNLOCK(x) |
60 | | # define TSAN_FAKE_LOCK(x) |
61 | | #endif |
62 | | |
63 | | #if defined(__sun) |
64 | | # include <atomic.h> |
65 | | #endif |
66 | | |
67 | | #if defined(__apple_build_version__) && __apple_build_version__ < 6000000 |
68 | | /* |
69 | | * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and |
70 | | * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free() |
71 | | * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))). |
72 | | * All of this makes impossible to use __atomic_is_lock_free here. |
73 | | * |
74 | | * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760 |
75 | | */ |
76 | | # define BROKEN_CLANG_ATOMICS |
77 | | #endif |
78 | | |
79 | | #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS) |
80 | | |
81 | | # if defined(OPENSSL_SYS_UNIX) |
82 | | # include <sys/types.h> |
83 | | # include <unistd.h> |
84 | | # endif |
85 | | |
86 | | # include <assert.h> |
87 | | |
88 | | /* |
89 | | * The Non-Stop KLT thread model currently seems broken in its rwlock |
90 | | * implementation |
91 | | */ |
92 | | # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) |
93 | | # define USE_RWLOCK |
94 | | # endif |
95 | | |
96 | | /* |
97 | | * For all GNU/clang atomic builtins, we also need fallbacks, to cover all |
98 | | * other compilers. |
99 | | |
100 | | * Unfortunately, we can't do that with some "generic type", because there's no |
101 | | * guarantee that the chosen generic type is large enough to cover all cases. |
102 | | * Therefore, we implement fallbacks for each applicable type, with composed |
103 | | * names that include the type they handle. |
104 | | * |
105 | | * (an anecdote: we previously tried to use |void *| as the generic type, with |
106 | | * the thought that the pointer itself is the largest type. However, this is |
107 | | * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large) |
108 | | * |
109 | | * All applicable ATOMIC_ macros take the intended type as first parameter, so |
110 | | * they can map to the correct fallback function. In the GNU/clang case, that |
111 | | * parameter is simply ignored. |
112 | | */ |
113 | | |
114 | | /* |
115 | | * Internal types used with the ATOMIC_ macros, to make it possible to compose |
116 | | * fallback function names. |
117 | | */ |
118 | | typedef void *pvoid; |
119 | | |
120 | | # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \ |
121 | | && !defined(USE_ATOMIC_FALLBACKS) |
122 | 3.37k | # define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o) |
123 | 5 | # define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o) |
124 | 357 | # define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o) |
125 | 5 | # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o) |
126 | 0 | # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o) |
127 | | # else |
128 | | static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER; |
129 | | |
130 | | # define IMPL_fallback_atomic_load_n(t) \ |
131 | | static ossl_inline t fallback_atomic_load_n_##t(t *p) \ |
132 | | { \ |
133 | | t ret; \ |
134 | | \ |
135 | | pthread_mutex_lock(&atomic_sim_lock); \ |
136 | | ret = *p; \ |
137 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
138 | | return ret; \ |
139 | | } |
140 | | IMPL_fallback_atomic_load_n(uint32_t) |
141 | | IMPL_fallback_atomic_load_n(uint64_t) |
142 | | IMPL_fallback_atomic_load_n(pvoid) |
143 | | |
144 | | # define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p) |
145 | | |
146 | | # define IMPL_fallback_atomic_store_n(t) \ |
147 | | static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \ |
148 | | { \ |
149 | | t ret; \ |
150 | | \ |
151 | | pthread_mutex_lock(&atomic_sim_lock); \ |
152 | | ret = *p; \ |
153 | | *p = v; \ |
154 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
155 | | return ret; \ |
156 | | } |
157 | | IMPL_fallback_atomic_store_n(uint32_t) |
158 | | |
159 | | # define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v) |
160 | | |
161 | | # define IMPL_fallback_atomic_store(t) \ |
162 | | static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \ |
163 | | { \ |
164 | | pthread_mutex_lock(&atomic_sim_lock); \ |
165 | | *p = *v; \ |
166 | | pthread_mutex_unlock(&atomic_sim_lock); \ |
167 | | } |
168 | | IMPL_fallback_atomic_store(pvoid) |
169 | | |
170 | | # define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v) |
171 | | |
172 | | /* |
173 | | * The fallbacks that follow don't need any per type implementation, as |
174 | | * they are designed for uint64_t only. If there comes a time when multiple |
175 | | * types need to be covered, it's relatively easy to refactor them the same |
176 | | * way as the fallbacks above. |
177 | | */ |
178 | | |
179 | | static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v) |
180 | | { |
181 | | uint64_t ret; |
182 | | |
183 | | pthread_mutex_lock(&atomic_sim_lock); |
184 | | *p += v; |
185 | | ret = *p; |
186 | | pthread_mutex_unlock(&atomic_sim_lock); |
187 | | return ret; |
188 | | } |
189 | | |
190 | | # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v) |
191 | | |
192 | | static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v) |
193 | | { |
194 | | uint64_t ret; |
195 | | |
196 | | pthread_mutex_lock(&atomic_sim_lock); |
197 | | *p -= v; |
198 | | ret = *p; |
199 | | pthread_mutex_unlock(&atomic_sim_lock); |
200 | | return ret; |
201 | | } |
202 | | |
203 | | # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v) |
204 | | # endif |
205 | | |
206 | | /* |
207 | | * This is the core of an rcu lock. It tracks the readers and writers for the |
208 | | * current quiescence point for a given lock. Users is the 64 bit value that |
209 | | * stores the READERS/ID as defined above |
210 | | * |
211 | | */ |
212 | | struct rcu_qp { |
213 | | uint64_t users; |
214 | | }; |
215 | | |
216 | | struct thread_qp { |
217 | | struct rcu_qp *qp; |
218 | | unsigned int depth; |
219 | | CRYPTO_RCU_LOCK *lock; |
220 | | }; |
221 | | |
222 | 0 | # define MAX_QPS 10 |
223 | | /* |
224 | | * This is the per thread tracking data |
225 | | * that is assigned to each thread participating |
226 | | * in an rcu qp |
227 | | * |
228 | | * qp points to the qp that it last acquired |
229 | | * |
230 | | */ |
231 | | struct rcu_thr_data { |
232 | | struct thread_qp thread_qps[MAX_QPS]; |
233 | | }; |
234 | | |
235 | | /* |
236 | | * This is the internal version of a CRYPTO_RCU_LOCK |
237 | | * it is cast from CRYPTO_RCU_LOCK |
238 | | */ |
239 | | struct rcu_lock_st { |
240 | | /* Callbacks to call for next ossl_synchronize_rcu */ |
241 | | struct rcu_cb_item *cb_items; |
242 | | |
243 | | /* The context we are being created against */ |
244 | | OSSL_LIB_CTX *ctx; |
245 | | |
246 | | /* Array of quiescent points for synchronization */ |
247 | | struct rcu_qp *qp_group; |
248 | | |
249 | | /* rcu generation counter for in-order retirement */ |
250 | | uint32_t id_ctr; |
251 | | |
252 | | /* Number of elements in qp_group array */ |
253 | | uint32_t group_count; |
254 | | |
255 | | /* Index of the current qp in the qp_group array */ |
256 | | uint32_t reader_idx; |
257 | | |
258 | | /* value of the next id_ctr value to be retired */ |
259 | | uint32_t next_to_retire; |
260 | | |
261 | | /* index of the next free rcu_qp in the qp_group */ |
262 | | uint32_t current_alloc_idx; |
263 | | |
264 | | /* number of qp's in qp_group array currently being retired */ |
265 | | uint32_t writers_alloced; |
266 | | |
267 | | /* lock protecting write side operations */ |
268 | | pthread_mutex_t write_lock; |
269 | | |
270 | | /* lock protecting updates to writers_alloced/current_alloc_idx */ |
271 | | pthread_mutex_t alloc_lock; |
272 | | |
273 | | /* signal to wake threads waiting on alloc_lock */ |
274 | | pthread_cond_t alloc_signal; |
275 | | |
276 | | /* lock to enforce in-order retirement */ |
277 | | pthread_mutex_t prior_lock; |
278 | | |
279 | | /* signal to wake threads waiting on prior_lock */ |
280 | | pthread_cond_t prior_signal; |
281 | | }; |
282 | | |
283 | | /* Read side acquisition of the current qp */ |
284 | | static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock) |
285 | 0 | { |
286 | 0 | uint32_t qp_idx; |
287 | | |
288 | | /* get the current qp index */ |
289 | 0 | for (;;) { |
290 | 0 | qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED); |
291 | | |
292 | | /* |
293 | | * Notes on use of __ATOMIC_ACQUIRE |
294 | | * We need to ensure the following: |
295 | | * 1) That subsequent operations aren't optimized by hoisting them above |
296 | | * this operation. Specifically, we don't want the below re-load of |
297 | | * qp_idx to get optimized away |
298 | | * 2) We want to ensure that any updating of reader_idx on the write side |
299 | | * of the lock is flushed from a local cpu cache so that we see any |
300 | | * updates prior to the load. This is a non-issue on cache coherent |
301 | | * systems like x86, but is relevant on other arches |
302 | | */ |
303 | 0 | ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1, |
304 | 0 | __ATOMIC_ACQUIRE); |
305 | | |
306 | | /* if the idx hasn't changed, we're good, else try again */ |
307 | 0 | if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, |
308 | 0 | __ATOMIC_RELAXED)) |
309 | 0 | break; |
310 | | |
311 | 0 | ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1, |
312 | 0 | __ATOMIC_RELAXED); |
313 | 0 | } |
314 | |
|
315 | 0 | return &lock->qp_group[qp_idx]; |
316 | 0 | } |
317 | | |
318 | | static void ossl_rcu_free_local_data(void *arg) |
319 | 0 | { |
320 | 0 | OSSL_LIB_CTX *ctx = arg; |
321 | 0 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx); |
322 | |
|
323 | 0 | CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx, NULL); |
324 | 0 | OPENSSL_free(data); |
325 | 0 | } |
326 | | |
327 | | void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock) |
328 | 0 | { |
329 | 0 | struct rcu_thr_data *data; |
330 | 0 | int i, available_qp = -1; |
331 | | |
332 | | /* |
333 | | * we're going to access current_qp here so ask the |
334 | | * processor to fetch it |
335 | | */ |
336 | 0 | data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx); |
337 | |
|
338 | 0 | if (data == NULL) { |
339 | 0 | data = OPENSSL_zalloc(sizeof(*data)); |
340 | 0 | OPENSSL_assert(data != NULL); |
341 | 0 | CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx, data); |
342 | 0 | ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data); |
343 | 0 | } |
344 | |
|
345 | 0 | for (i = 0; i < MAX_QPS; i++) { |
346 | 0 | if (data->thread_qps[i].qp == NULL && available_qp == -1) |
347 | 0 | available_qp = i; |
348 | | /* If we have a hold on this lock already, we're good */ |
349 | 0 | if (data->thread_qps[i].lock == lock) { |
350 | 0 | data->thread_qps[i].depth++; |
351 | 0 | return; |
352 | 0 | } |
353 | 0 | } |
354 | | |
355 | | /* |
356 | | * if we get here, then we don't have a hold on this lock yet |
357 | | */ |
358 | 0 | assert(available_qp != -1); |
359 | |
|
360 | 0 | data->thread_qps[available_qp].qp = get_hold_current_qp(lock); |
361 | 0 | data->thread_qps[available_qp].depth = 1; |
362 | 0 | data->thread_qps[available_qp].lock = lock; |
363 | 0 | } |
364 | | |
365 | | void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock) |
366 | 0 | { |
367 | 0 | int i; |
368 | 0 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx); |
369 | 0 | uint64_t ret; |
370 | |
|
371 | 0 | assert(data != NULL); |
372 | |
|
373 | 0 | for (i = 0; i < MAX_QPS; i++) { |
374 | 0 | if (data->thread_qps[i].lock == lock) { |
375 | | /* |
376 | | * we have to use __ATOMIC_RELEASE here |
377 | | * to ensure that all preceding read instructions complete |
378 | | * before the decrement is visible to ossl_synchronize_rcu |
379 | | */ |
380 | 0 | data->thread_qps[i].depth--; |
381 | 0 | if (data->thread_qps[i].depth == 0) { |
382 | 0 | ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, |
383 | 0 | (uint64_t)1, __ATOMIC_RELEASE); |
384 | 0 | OPENSSL_assert(ret != UINT64_MAX); |
385 | 0 | data->thread_qps[i].qp = NULL; |
386 | 0 | data->thread_qps[i].lock = NULL; |
387 | 0 | } |
388 | 0 | return; |
389 | 0 | } |
390 | 0 | } |
391 | | /* |
392 | | * If we get here, we're trying to unlock a lock that we never acquired - |
393 | | * that's fatal. |
394 | | */ |
395 | 0 | assert(0); |
396 | 0 | } |
397 | | |
398 | | /* |
399 | | * Write side allocation routine to get the current qp |
400 | | * and replace it with a new one |
401 | | */ |
402 | | static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id) |
403 | 5 | { |
404 | 5 | uint32_t current_idx; |
405 | | |
406 | 5 | pthread_mutex_lock(&lock->alloc_lock); |
407 | | |
408 | | /* |
409 | | * we need at least one qp to be available with one |
410 | | * left over, so that readers can start working on |
411 | | * one that isn't yet being waited on |
412 | | */ |
413 | 5 | while (lock->group_count - lock->writers_alloced < 2) |
414 | | /* we have to wait for one to be free */ |
415 | 0 | pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock); |
416 | | |
417 | 5 | current_idx = lock->current_alloc_idx; |
418 | | |
419 | | /* Allocate the qp */ |
420 | 5 | lock->writers_alloced++; |
421 | | |
422 | | /* increment the allocation index */ |
423 | 5 | lock->current_alloc_idx = |
424 | 5 | (lock->current_alloc_idx + 1) % lock->group_count; |
425 | | |
426 | 5 | *curr_id = lock->id_ctr; |
427 | 5 | lock->id_ctr++; |
428 | | |
429 | 5 | ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx, |
430 | 5 | __ATOMIC_RELAXED); |
431 | | |
432 | | /* |
433 | | * this should make sure that the new value of reader_idx is visible in |
434 | | * get_hold_current_qp, directly after incrementing the users count |
435 | | */ |
436 | 5 | ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0, |
437 | 5 | __ATOMIC_RELEASE); |
438 | | |
439 | | /* wake up any waiters */ |
440 | 5 | pthread_cond_signal(&lock->alloc_signal); |
441 | 5 | pthread_mutex_unlock(&lock->alloc_lock); |
442 | 5 | return &lock->qp_group[current_idx]; |
443 | 5 | } |
444 | | |
445 | | static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp) |
446 | 5 | { |
447 | 5 | pthread_mutex_lock(&lock->alloc_lock); |
448 | 5 | lock->writers_alloced--; |
449 | 5 | pthread_cond_signal(&lock->alloc_signal); |
450 | 5 | pthread_mutex_unlock(&lock->alloc_lock); |
451 | 5 | } |
452 | | |
453 | | static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock, |
454 | | uint32_t count) |
455 | 2 | { |
456 | 2 | struct rcu_qp *new = |
457 | 2 | OPENSSL_zalloc(sizeof(*new) * count); |
458 | | |
459 | 2 | lock->group_count = count; |
460 | 2 | return new; |
461 | 2 | } |
462 | | |
463 | | void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock) |
464 | 3 | { |
465 | 3 | pthread_mutex_lock(&lock->write_lock); |
466 | 3 | TSAN_FAKE_UNLOCK(&lock->write_lock); |
467 | 3 | } |
468 | | |
469 | | void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock) |
470 | 3 | { |
471 | 3 | TSAN_FAKE_LOCK(&lock->write_lock); |
472 | 3 | pthread_mutex_unlock(&lock->write_lock); |
473 | 3 | } |
474 | | |
475 | | void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock) |
476 | 5 | { |
477 | 5 | struct rcu_qp *qp; |
478 | 5 | uint64_t count; |
479 | 5 | uint32_t curr_id; |
480 | 5 | struct rcu_cb_item *cb_items, *tmpcb; |
481 | | |
482 | 5 | pthread_mutex_lock(&lock->write_lock); |
483 | 5 | cb_items = lock->cb_items; |
484 | 5 | lock->cb_items = NULL; |
485 | 5 | pthread_mutex_unlock(&lock->write_lock); |
486 | | |
487 | 5 | qp = update_qp(lock, &curr_id); |
488 | | |
489 | | /* retire in order */ |
490 | 5 | pthread_mutex_lock(&lock->prior_lock); |
491 | 5 | while (lock->next_to_retire != curr_id) |
492 | 0 | pthread_cond_wait(&lock->prior_signal, &lock->prior_lock); |
493 | | |
494 | | /* |
495 | | * wait for the reader count to reach zero |
496 | | * Note the use of __ATOMIC_ACQUIRE here to ensure that any |
497 | | * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock |
498 | | * is visible prior to our read |
499 | | * however this is likely just necessary to silence a tsan warning |
500 | | * because the read side should not do any write operation |
501 | | * outside the atomic itself |
502 | | */ |
503 | 5 | do { |
504 | 5 | count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE); |
505 | 5 | } while (count != (uint64_t)0); |
506 | | |
507 | 5 | lock->next_to_retire++; |
508 | 5 | pthread_cond_broadcast(&lock->prior_signal); |
509 | 5 | pthread_mutex_unlock(&lock->prior_lock); |
510 | | |
511 | 5 | retire_qp(lock, qp); |
512 | | |
513 | | /* handle any callbacks that we have */ |
514 | 6 | while (cb_items != NULL) { |
515 | 1 | tmpcb = cb_items; |
516 | 1 | cb_items = cb_items->next; |
517 | 1 | tmpcb->fn(tmpcb->data); |
518 | 1 | OPENSSL_free(tmpcb); |
519 | 1 | } |
520 | 5 | } |
521 | | |
522 | | /* |
523 | | * Note: This call assumes its made under the protection of |
524 | | * ossl_rcu_write_lock |
525 | | */ |
526 | | int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data) |
527 | 1 | { |
528 | 1 | struct rcu_cb_item *new = |
529 | 1 | OPENSSL_zalloc(sizeof(*new)); |
530 | | |
531 | 1 | if (new == NULL) |
532 | 0 | return 0; |
533 | | |
534 | 1 | new->data = data; |
535 | 1 | new->fn = cb; |
536 | | |
537 | 1 | new->next = lock->cb_items; |
538 | 1 | lock->cb_items = new; |
539 | | |
540 | 1 | return 1; |
541 | 1 | } |
542 | | |
543 | | void *ossl_rcu_uptr_deref(void **p) |
544 | 3.37k | { |
545 | 3.37k | return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE); |
546 | 3.37k | } |
547 | | |
548 | | void ossl_rcu_assign_uptr(void **p, void **v) |
549 | 357 | { |
550 | 357 | ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE); |
551 | 357 | } |
552 | | |
553 | | CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx) |
554 | 2 | { |
555 | 2 | struct rcu_lock_st *new; |
556 | | |
557 | | /* |
558 | | * We need a minimum of 2 qp's |
559 | | */ |
560 | 2 | if (num_writers < 2) |
561 | 2 | num_writers = 2; |
562 | | |
563 | 2 | ctx = ossl_lib_ctx_get_concrete(ctx); |
564 | 2 | if (ctx == NULL) |
565 | 0 | return 0; |
566 | | |
567 | 2 | new = OPENSSL_zalloc(sizeof(*new)); |
568 | 2 | if (new == NULL) |
569 | 0 | return NULL; |
570 | | |
571 | 2 | new->ctx = ctx; |
572 | 2 | pthread_mutex_init(&new->write_lock, NULL); |
573 | 2 | pthread_mutex_init(&new->prior_lock, NULL); |
574 | 2 | pthread_mutex_init(&new->alloc_lock, NULL); |
575 | 2 | pthread_cond_init(&new->prior_signal, NULL); |
576 | 2 | pthread_cond_init(&new->alloc_signal, NULL); |
577 | | |
578 | 2 | new->qp_group = allocate_new_qp_group(new, num_writers); |
579 | 2 | if (new->qp_group == NULL) { |
580 | 0 | OPENSSL_free(new); |
581 | 0 | new = NULL; |
582 | 0 | } |
583 | | |
584 | 2 | return new; |
585 | 2 | } |
586 | | |
587 | | void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock) |
588 | 2 | { |
589 | 2 | struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock; |
590 | | |
591 | 2 | if (lock == NULL) |
592 | 0 | return; |
593 | | |
594 | | /* make sure we're synchronized */ |
595 | 2 | ossl_synchronize_rcu(rlock); |
596 | | |
597 | 2 | OPENSSL_free(rlock->qp_group); |
598 | | /* There should only be a single qp left now */ |
599 | 2 | OPENSSL_free(rlock); |
600 | 2 | } |
601 | | |
602 | | # ifdef REPORT_RWLOCK_CONTENTION |
603 | | /* |
604 | | * Normally we would use a BIO here to do this, but we create locks during |
605 | | * library initialization, and creating a bio too early, creates a recursive set |
606 | | * of stack calls that leads us to call CRYPTO_thread_run_once while currently |
607 | | * executing the init routine for various run_once functions, which leads to |
608 | | * deadlock. Avoid that by just using a FILE pointer. Also note that we |
609 | | * directly use a pthread_mutex_t to protect access from multiple threads |
610 | | * to the contention log file. We do this because we want to avoid use |
611 | | * of the CRYPTO_THREAD api so as to prevent recursive blocking reports. |
612 | | */ |
613 | | static FILE *contention_fp = NULL; |
614 | | static CRYPTO_ONCE init_contention_fp = CRYPTO_ONCE_STATIC_INIT; |
615 | | static int rwlock_count = 0; |
616 | | pthread_mutex_t log_lock = PTHREAD_MUTEX_INITIALIZER; |
617 | | CRYPTO_THREAD_LOCAL thread_contention_data; |
618 | | |
619 | | static void destroy_contention_data(void *data) |
620 | | { |
621 | | OPENSSL_free(data); |
622 | | } |
623 | | |
624 | | struct stack_info { |
625 | | unsigned int nptrs; |
626 | | int write; |
627 | | OSSL_TIME start; |
628 | | OSSL_TIME duration; |
629 | | char **strings; |
630 | | }; |
631 | | |
632 | | # define STACKS_COUNT 32 |
633 | | struct stack_traces { |
634 | | int lock_depth; |
635 | | size_t idx; |
636 | | struct stack_info stacks[STACKS_COUNT]; |
637 | | }; |
638 | | |
639 | | static void init_contention_fp_once(void) |
640 | | { |
641 | | # ifdef FIPS_MODULE |
642 | | contention_fp = fopen("lock-contention-log-fips.txt", "w"); |
643 | | # else |
644 | | contention_fp = fopen("lock-contention-log.txt", "w"); |
645 | | # endif |
646 | | if (contention_fp == NULL) |
647 | | fprintf(stderr, "Contention log file could not be opened, log will not be recorded\n"); |
648 | | |
649 | | /* |
650 | | * Create a thread local key here to store our list of stack traces |
651 | | * to be printed when we unlock the lock we are holding |
652 | | */ |
653 | | CRYPTO_THREAD_init_local(&thread_contention_data, destroy_contention_data); |
654 | | return; |
655 | | } |
656 | | # endif |
657 | | |
658 | | CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void) |
659 | 27 | { |
660 | 27 | # ifdef USE_RWLOCK |
661 | 27 | CRYPTO_RWLOCK *lock; |
662 | | |
663 | | # ifdef REPORT_RWLOCK_CONTENTION |
664 | | CRYPTO_THREAD_run_once(&init_contention_fp, init_contention_fp_once); |
665 | | __atomic_add_fetch(&rwlock_count, 1, __ATOMIC_ACQ_REL); |
666 | | { |
667 | | struct stack_info *thread_stack_info; |
668 | | |
669 | | thread_stack_info = CRYPTO_THREAD_get_local(&thread_contention_data); |
670 | | if (thread_stack_info == NULL) { |
671 | | thread_stack_info = OPENSSL_zalloc(sizeof(struct stack_traces)); |
672 | | CRYPTO_THREAD_set_local(&thread_contention_data, thread_stack_info); |
673 | | } |
674 | | } |
675 | | # endif |
676 | | |
677 | 27 | if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL) |
678 | | /* Don't set error, to avoid recursion blowup. */ |
679 | 0 | return NULL; |
680 | | |
681 | 27 | if (pthread_rwlock_init(lock, NULL) != 0) { |
682 | 0 | OPENSSL_free(lock); |
683 | 0 | return NULL; |
684 | 0 | } |
685 | | # else |
686 | | pthread_mutexattr_t attr; |
687 | | CRYPTO_RWLOCK *lock; |
688 | | |
689 | | if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL) |
690 | | /* Don't set error, to avoid recursion blowup. */ |
691 | | return NULL; |
692 | | |
693 | | /* |
694 | | * We don't use recursive mutexes, but try to catch errors if we do. |
695 | | */ |
696 | | pthread_mutexattr_init(&attr); |
697 | | # if !defined (__TANDEM) && !defined (_SPT_MODEL_) |
698 | | # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK) |
699 | | pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); |
700 | | # endif |
701 | | # else |
702 | | /* The SPT Thread Library does not define MUTEX attributes. */ |
703 | | # endif |
704 | | |
705 | | if (pthread_mutex_init(lock, &attr) != 0) { |
706 | | pthread_mutexattr_destroy(&attr); |
707 | | OPENSSL_free(lock); |
708 | | return NULL; |
709 | | } |
710 | | |
711 | | pthread_mutexattr_destroy(&attr); |
712 | | # endif |
713 | | |
714 | 27 | return lock; |
715 | 27 | } |
716 | | |
717 | | # ifdef REPORT_RWLOCK_CONTENTION |
718 | | static void print_stack_traces(struct stack_traces *traces, FILE *fptr) |
719 | | { |
720 | | unsigned int j; |
721 | | |
722 | | pthread_mutex_lock(&log_lock); |
723 | | while (traces != NULL && traces->idx >= 1) { |
724 | | traces->idx--; |
725 | | fprintf(fptr, "lock blocked on %s for %zu usec at time %zu tid %d\n", |
726 | | traces->stacks[traces->idx].write == 1 ? "WRITE" : "READ", |
727 | | ossl_time2us(traces->stacks[traces->idx].duration), |
728 | | ossl_time2us(traces->stacks[traces->idx].start), |
729 | | gettid()); |
730 | | if (traces->stacks[traces->idx].strings != NULL) { |
731 | | for (j = 0; j < traces->stacks[traces->idx].nptrs; j++) |
732 | | fprintf(fptr, "%s\n", traces->stacks[traces->idx].strings[j]); |
733 | | free(traces->stacks[traces->idx].strings); |
734 | | } else { |
735 | | fprintf(fptr, "No stack trace available\n"); |
736 | | } |
737 | | fprintf(contention_fp, "\n"); |
738 | | } |
739 | | pthread_mutex_unlock(&log_lock); |
740 | | } |
741 | | # endif |
742 | | |
743 | | # define BT_BUF_SIZE 1024 |
744 | | |
745 | | __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock) |
746 | 828 | { |
747 | 828 | # ifdef USE_RWLOCK |
748 | | # ifdef REPORT_RWLOCK_CONTENTION |
749 | | struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data); |
750 | | |
751 | | if (ossl_unlikely(traces == NULL)) { |
752 | | traces = OPENSSL_zalloc(sizeof(struct stack_traces)); |
753 | | CRYPTO_THREAD_set_local(&thread_contention_data, traces); |
754 | | if (ossl_unlikely(traces == NULL)) |
755 | | return 0; |
756 | | } |
757 | | |
758 | | traces->lock_depth++; |
759 | | if (pthread_rwlock_tryrdlock(lock)) { |
760 | | void *buffer[BT_BUF_SIZE]; |
761 | | OSSL_TIME start, end; |
762 | | |
763 | | start = ossl_time_now(); |
764 | | if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0)) { |
765 | | traces->lock_depth--; |
766 | | return 0; |
767 | | } |
768 | | end = ossl_time_now(); |
769 | | traces->stacks[traces->idx].duration = ossl_time_subtract(end, start); |
770 | | traces->stacks[traces->idx].nptrs = backtrace(buffer, BT_BUF_SIZE); |
771 | | traces->stacks[traces->idx].strings = backtrace_symbols(buffer, |
772 | | traces->stacks[traces->idx].nptrs); |
773 | | traces->stacks[traces->idx].duration = ossl_time_subtract(end, start); |
774 | | traces->stacks[traces->idx].start = start; |
775 | | traces->stacks[traces->idx].write = 0; |
776 | | traces->idx++; |
777 | | if (traces->idx >= STACKS_COUNT) { |
778 | | fprintf(stderr, "STACK RECORD OVERFLOW!\n"); |
779 | | print_stack_traces(traces, contention_fp); |
780 | | } |
781 | | } |
782 | | # else |
783 | 828 | if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0)) |
784 | 0 | return 0; |
785 | 828 | # endif |
786 | | # else |
787 | | if (pthread_mutex_lock(lock) != 0) { |
788 | | assert(errno != EDEADLK && errno != EBUSY); |
789 | | return 0; |
790 | | } |
791 | | # endif |
792 | | |
793 | 828 | return 1; |
794 | 828 | } |
795 | | |
796 | | __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock) |
797 | 1.68k | { |
798 | 1.68k | # ifdef USE_RWLOCK |
799 | | # ifdef REPORT_RWLOCK_CONTENTION |
800 | | struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data); |
801 | | |
802 | | if (ossl_unlikely(traces == NULL)) { |
803 | | traces = OPENSSL_zalloc(sizeof(struct stack_traces)); |
804 | | CRYPTO_THREAD_set_local(&thread_contention_data, traces); |
805 | | if (ossl_unlikely(traces == NULL)) |
806 | | return 0; |
807 | | } |
808 | | |
809 | | traces->lock_depth++; |
810 | | if (pthread_rwlock_trywrlock(lock)) { |
811 | | void *buffer[BT_BUF_SIZE]; |
812 | | OSSL_TIME start, end; |
813 | | |
814 | | start = ossl_time_now(); |
815 | | if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0)) { |
816 | | traces->lock_depth--; |
817 | | return 0; |
818 | | } |
819 | | end = ossl_time_now(); |
820 | | traces->stacks[traces->idx].nptrs = backtrace(buffer, BT_BUF_SIZE); |
821 | | traces->stacks[traces->idx].strings = backtrace_symbols(buffer, |
822 | | traces->stacks[traces->idx].nptrs); |
823 | | traces->stacks[traces->idx].duration = ossl_time_subtract(end, start); |
824 | | traces->stacks[traces->idx].start = start; |
825 | | traces->stacks[traces->idx].write = 1; |
826 | | traces->idx++; |
827 | | if (traces->idx >= STACKS_COUNT) { |
828 | | fprintf(stderr, "STACK RECORD OVERFLOW!\n"); |
829 | | print_stack_traces(traces, contention_fp); |
830 | | } |
831 | | } |
832 | | # else |
833 | 1.68k | if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0)) |
834 | 0 | return 0; |
835 | 1.68k | # endif |
836 | | # else |
837 | | if (pthread_mutex_lock(lock) != 0) { |
838 | | assert(errno != EDEADLK && errno != EBUSY); |
839 | | return 0; |
840 | | } |
841 | | # endif |
842 | | |
843 | 1.68k | return 1; |
844 | 1.68k | } |
845 | | |
846 | | int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock) |
847 | 2.50k | { |
848 | 2.50k | # ifdef USE_RWLOCK |
849 | 2.50k | if (pthread_rwlock_unlock(lock) != 0) |
850 | 0 | return 0; |
851 | | # ifdef REPORT_RWLOCK_CONTENTION |
852 | | { |
853 | | struct stack_traces *traces = CRYPTO_THREAD_get_local(&thread_contention_data); |
854 | | |
855 | | if (contention_fp != NULL && traces != NULL) { |
856 | | traces->lock_depth--; |
857 | | assert(traces->lock_depth >= 0); |
858 | | if (traces->lock_depth == 0) |
859 | | print_stack_traces(traces, contention_fp); |
860 | | } |
861 | | } |
862 | | # endif |
863 | | # else |
864 | | if (pthread_mutex_unlock(lock) != 0) { |
865 | | assert(errno != EPERM); |
866 | | return 0; |
867 | | } |
868 | | # endif |
869 | | |
870 | 2.50k | return 1; |
871 | 2.50k | } |
872 | | |
873 | | void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock) |
874 | 34 | { |
875 | 34 | if (lock == NULL) |
876 | 7 | return; |
877 | | # ifdef REPORT_RWLOCK_CONTENTION |
878 | | |
879 | | /* |
880 | | * Note: It's possible here that OpenSSL may allocate a lock and immediately |
881 | | * free it, in which case we would erroneously close the contention log |
882 | | * prior to the library going on to do more real work. In practice |
883 | | * that never happens though, and since this is a debug facility |
884 | | * we don't worry about that here. |
885 | | */ |
886 | | if (__atomic_add_fetch(&rwlock_count, -1, __ATOMIC_ACQ_REL) == 0) { |
887 | | fclose(contention_fp); |
888 | | contention_fp = NULL; |
889 | | } |
890 | | # endif |
891 | | |
892 | 27 | # ifdef USE_RWLOCK |
893 | 27 | pthread_rwlock_destroy(lock); |
894 | | # else |
895 | | pthread_mutex_destroy(lock); |
896 | | # endif |
897 | 27 | OPENSSL_free(lock); |
898 | | |
899 | 27 | return; |
900 | 34 | } |
901 | | |
902 | | int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void)) |
903 | 1.52k | { |
904 | 1.52k | if (ossl_unlikely(pthread_once(once, init) != 0)) |
905 | 0 | return 0; |
906 | | |
907 | 1.52k | return 1; |
908 | 1.52k | } |
909 | | |
910 | | int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *)) |
911 | 4 | { |
912 | 4 | if (pthread_key_create(key, cleanup) != 0) |
913 | 0 | return 0; |
914 | | |
915 | 4 | return 1; |
916 | 4 | } |
917 | | |
918 | | void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key) |
919 | 296 | { |
920 | 296 | return pthread_getspecific(*key); |
921 | 296 | } |
922 | | |
923 | | int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val) |
924 | 4 | { |
925 | 4 | if (pthread_setspecific(*key, val) != 0) |
926 | 0 | return 0; |
927 | | |
928 | 4 | return 1; |
929 | 4 | } |
930 | | |
931 | | int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key) |
932 | 3 | { |
933 | 3 | if (pthread_key_delete(*key) != 0) |
934 | 0 | return 0; |
935 | | |
936 | 3 | return 1; |
937 | 3 | } |
938 | | |
939 | | CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void) |
940 | 0 | { |
941 | 0 | return pthread_self(); |
942 | 0 | } |
943 | | |
944 | | int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b) |
945 | 0 | { |
946 | 0 | return pthread_equal(a, b); |
947 | 0 | } |
948 | | |
949 | | int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock) |
950 | 4 | { |
951 | 4 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
952 | 4 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
953 | 4 | *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL); |
954 | 4 | return 1; |
955 | 4 | } |
956 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
957 | | /* This will work for all future Solaris versions. */ |
958 | | if (ret != NULL) { |
959 | | *ret = atomic_add_int_nv((volatile unsigned int *)val, amount); |
960 | | return 1; |
961 | | } |
962 | | # endif |
963 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
964 | 0 | return 0; |
965 | | |
966 | 0 | *val += amount; |
967 | 0 | *ret = *val; |
968 | |
|
969 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
970 | 0 | return 0; |
971 | | |
972 | 0 | return 1; |
973 | 0 | } |
974 | | |
975 | | int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret, |
976 | | CRYPTO_RWLOCK *lock) |
977 | 0 | { |
978 | 0 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
979 | 0 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
980 | 0 | *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL); |
981 | 0 | return 1; |
982 | 0 | } |
983 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
984 | | /* This will work for all future Solaris versions. */ |
985 | | if (ret != NULL) { |
986 | | *ret = atomic_add_64_nv(val, op); |
987 | | return 1; |
988 | | } |
989 | | # endif |
990 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
991 | 0 | return 0; |
992 | 0 | *val += op; |
993 | 0 | *ret = *val; |
994 | |
|
995 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
996 | 0 | return 0; |
997 | | |
998 | 0 | return 1; |
999 | 0 | } |
1000 | | |
1001 | | int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret, |
1002 | | CRYPTO_RWLOCK *lock) |
1003 | 0 | { |
1004 | 0 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
1005 | 0 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
1006 | 0 | *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL); |
1007 | 0 | return 1; |
1008 | 0 | } |
1009 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
1010 | | /* This will work for all future Solaris versions. */ |
1011 | | if (ret != NULL) { |
1012 | | *ret = atomic_and_64_nv(val, op); |
1013 | | return 1; |
1014 | | } |
1015 | | # endif |
1016 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
1017 | 0 | return 0; |
1018 | 0 | *val &= op; |
1019 | 0 | *ret = *val; |
1020 | |
|
1021 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
1022 | 0 | return 0; |
1023 | | |
1024 | 0 | return 1; |
1025 | 0 | } |
1026 | | |
1027 | | int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret, |
1028 | | CRYPTO_RWLOCK *lock) |
1029 | 3 | { |
1030 | 3 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
1031 | 3 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
1032 | 3 | *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL); |
1033 | 3 | return 1; |
1034 | 3 | } |
1035 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
1036 | | /* This will work for all future Solaris versions. */ |
1037 | | if (ret != NULL) { |
1038 | | *ret = atomic_or_64_nv(val, op); |
1039 | | return 1; |
1040 | | } |
1041 | | # endif |
1042 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
1043 | 0 | return 0; |
1044 | 0 | *val |= op; |
1045 | 0 | *ret = *val; |
1046 | |
|
1047 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
1048 | 0 | return 0; |
1049 | | |
1050 | 0 | return 1; |
1051 | 0 | } |
1052 | | |
1053 | | int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock) |
1054 | 1.32k | { |
1055 | 1.32k | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
1056 | 1.32k | if (__atomic_is_lock_free(sizeof(*val), val)) { |
1057 | 1.32k | __atomic_load(val, ret, __ATOMIC_ACQUIRE); |
1058 | 1.32k | return 1; |
1059 | 1.32k | } |
1060 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
1061 | | /* This will work for all future Solaris versions. */ |
1062 | | if (ret != NULL) { |
1063 | | *ret = atomic_or_64_nv(val, 0); |
1064 | | return 1; |
1065 | | } |
1066 | | # endif |
1067 | 0 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) |
1068 | 0 | return 0; |
1069 | 0 | *ret = *val; |
1070 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
1071 | 0 | return 0; |
1072 | | |
1073 | 0 | return 1; |
1074 | 0 | } |
1075 | | |
1076 | | int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock) |
1077 | 354 | { |
1078 | 354 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
1079 | 354 | if (__atomic_is_lock_free(sizeof(*dst), dst)) { |
1080 | 354 | __atomic_store(dst, &val, __ATOMIC_RELEASE); |
1081 | 354 | return 1; |
1082 | 354 | } |
1083 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
1084 | | /* This will work for all future Solaris versions. */ |
1085 | | if (dst != NULL) { |
1086 | | atomic_swap_64(dst, val); |
1087 | | return 1; |
1088 | | } |
1089 | | # endif |
1090 | 0 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) |
1091 | 0 | return 0; |
1092 | 0 | *dst = val; |
1093 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
1094 | 0 | return 0; |
1095 | | |
1096 | 0 | return 1; |
1097 | 0 | } |
1098 | | |
1099 | | int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock) |
1100 | 0 | { |
1101 | 0 | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS) |
1102 | 0 | if (__atomic_is_lock_free(sizeof(*val), val)) { |
1103 | 0 | __atomic_load(val, ret, __ATOMIC_ACQUIRE); |
1104 | 0 | return 1; |
1105 | 0 | } |
1106 | | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11)) |
1107 | | /* This will work for all future Solaris versions. */ |
1108 | | if (ret != NULL) { |
1109 | | *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0); |
1110 | | return 1; |
1111 | | } |
1112 | | # endif |
1113 | 0 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) |
1114 | 0 | return 0; |
1115 | 0 | *ret = *val; |
1116 | 0 | if (!CRYPTO_THREAD_unlock(lock)) |
1117 | 0 | return 0; |
1118 | | |
1119 | 0 | return 1; |
1120 | 0 | } |
1121 | | |
1122 | | # ifndef FIPS_MODULE |
1123 | | int openssl_init_fork_handlers(void) |
1124 | 0 | { |
1125 | 0 | return 1; |
1126 | 0 | } |
1127 | | # endif /* FIPS_MODULE */ |
1128 | | |
1129 | | int openssl_get_fork_id(void) |
1130 | 0 | { |
1131 | 0 | return getpid(); |
1132 | 0 | } |
1133 | | #endif |