/src/openssl/crypto/threads_pthread.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.  | 
3  |  |  *  | 
4  |  |  * Licensed under the Apache License 2.0 (the "License").  You may not use  | 
5  |  |  * this file except in compliance with the License.  You can obtain a copy  | 
6  |  |  * in the file LICENSE in the source distribution or at  | 
7  |  |  * https://www.openssl.org/source/license.html  | 
8  |  |  */  | 
9  |  |  | 
10  |  | /* We need to use the OPENSSL_fork_*() deprecated APIs */  | 
11  |  | #define OPENSSL_SUPPRESS_DEPRECATED  | 
12  |  |  | 
13  |  | #include <openssl/crypto.h>  | 
14  |  | #include <crypto/cryptlib.h>  | 
15  |  | #include "internal/cryptlib.h"  | 
16  |  | #include "internal/rcu.h"  | 
17  |  | #include "rcu_internal.h"  | 
18  |  |  | 
19  |  | #if defined(__clang__) && defined(__has_feature)  | 
20  |  | # if __has_feature(thread_sanitizer)  | 
21  |  | #  define __SANITIZE_THREAD__  | 
22  |  | # endif  | 
23  |  | #endif  | 
24  |  |  | 
25  |  | #if defined(__SANITIZE_THREAD__)  | 
26  |  | # include <sanitizer/tsan_interface.h>  | 
27  |  | # define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \  | 
28  |  | __tsan_mutex_post_unlock((x), 0)  | 
29  |  |  | 
30  |  | # define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \  | 
31  |  | __tsan_mutex_post_lock((x), 0, 0)  | 
32  |  | #else  | 
33  |  | # define TSAN_FAKE_UNLOCK(x)  | 
34  |  | # define TSAN_FAKE_LOCK(x)  | 
35  |  | #endif  | 
36  |  |  | 
37  |  | #if defined(__sun)  | 
38  |  | # include <atomic.h>  | 
39  |  | #endif  | 
40  |  |  | 
41  |  | #if defined(__apple_build_version__) && __apple_build_version__ < 6000000  | 
42  |  | /*  | 
43  |  |  * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and  | 
44  |  |  * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()  | 
45  |  |  * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).  | 
46  |  |  * All of this makes impossible to use __atomic_is_lock_free here.  | 
47  |  |  *  | 
48  |  |  * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760  | 
49  |  |  */  | 
50  |  | # define BROKEN_CLANG_ATOMICS  | 
51  |  | #endif  | 
52  |  |  | 
53  |  | #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)  | 
54  |  |  | 
55  |  | # if defined(OPENSSL_SYS_UNIX)  | 
56  |  | #  include <sys/types.h>  | 
57  |  | #  include <unistd.h>  | 
58  |  | # endif  | 
59  |  |  | 
60  |  | # include <assert.h>  | 
61  |  |  | 
62  |  | /*  | 
63  |  |  * The Non-Stop KLT thread model currently seems broken in its rwlock  | 
64  |  |  * implementation  | 
65  |  |  */  | 
66  |  | # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)  | 
67  |  | #  define USE_RWLOCK  | 
68  |  | # endif  | 
69  |  |  | 
70  |  | /*  | 
71  |  |  * For all GNU/clang atomic builtins, we also need fallbacks, to cover all  | 
72  |  |  * other compilers.  | 
73  |  |  | 
74  |  |  * Unfortunately, we can't do that with some "generic type", because there's no  | 
75  |  |  * guarantee that the chosen generic type is large enough to cover all cases.  | 
76  |  |  * Therefore, we implement fallbacks for each applicable type, with composed  | 
77  |  |  * names that include the type they handle.  | 
78  |  |  *  | 
79  |  |  * (an anecdote: we previously tried to use |void *| as the generic type, with  | 
80  |  |  * the thought that the pointer itself is the largest type.  However, this is  | 
81  |  |  * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)  | 
82  |  |  *  | 
83  |  |  * All applicable ATOMIC_ macros take the intended type as first parameter, so  | 
84  |  |  * they can map to the correct fallback function.  In the GNU/clang case, that  | 
85  |  |  * parameter is simply ignored.  | 
86  |  |  */  | 
87  |  |  | 
88  |  | /*  | 
89  |  |  * Internal types used with the ATOMIC_ macros, to make it possible to compose  | 
90  |  |  * fallback function names.  | 
91  |  |  */  | 
92  |  | typedef void *pvoid;  | 
93  |  |  | 
94  |  | # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \  | 
95  |  |     && !defined(USE_ATOMIC_FALLBACKS)  | 
96  | 2.29M  | #  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)  | 
97  | 15  | #  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)  | 
98  | 1.30k  | #  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)  | 
99  | 15  | #  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)  | 
100  | 0  | #  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)  | 
101  |  | # else  | 
102  |  | static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;  | 
103  |  |  | 
104  |  | #  define IMPL_fallback_atomic_load_n(t)                        \  | 
105  |  |     static ossl_inline t fallback_atomic_load_n_##t(t *p)            \  | 
106  |  |     {                                                           \ | 
107  |  |         t ret;                                                  \  | 
108  |  |                                                                 \  | 
109  |  |         pthread_mutex_lock(&atomic_sim_lock);                   \  | 
110  |  |         ret = *p;                                               \  | 
111  |  |         pthread_mutex_unlock(&atomic_sim_lock);                 \  | 
112  |  |         return ret;                                             \  | 
113  |  |     }  | 
114  |  | IMPL_fallback_atomic_load_n(uint32_t)  | 
115  |  | IMPL_fallback_atomic_load_n(uint64_t)  | 
116  |  | IMPL_fallback_atomic_load_n(pvoid)  | 
117  |  |  | 
118  |  | #  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)  | 
119  |  |  | 
120  |  | #  define IMPL_fallback_atomic_store_n(t)                       \  | 
121  |  |     static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \  | 
122  |  |     {                                                           \ | 
123  |  |         t ret;                                                  \  | 
124  |  |                                                                 \  | 
125  |  |         pthread_mutex_lock(&atomic_sim_lock);                   \  | 
126  |  |         ret = *p;                                               \  | 
127  |  |         *p = v;                                                 \  | 
128  |  |         pthread_mutex_unlock(&atomic_sim_lock);                 \  | 
129  |  |         return ret;                                             \  | 
130  |  |     }  | 
131  |  | IMPL_fallback_atomic_store_n(uint32_t)  | 
132  |  |  | 
133  |  | #  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)  | 
134  |  |  | 
135  |  | #  define IMPL_fallback_atomic_store(t)                         \  | 
136  |  |     static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \  | 
137  |  |     {                                                           \ | 
138  |  |         pthread_mutex_lock(&atomic_sim_lock);                   \  | 
139  |  |         *p = *v;                                                \  | 
140  |  |         pthread_mutex_unlock(&atomic_sim_lock);                 \  | 
141  |  |     }  | 
142  |  | IMPL_fallback_atomic_store(pvoid)  | 
143  |  |  | 
144  |  | #  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)  | 
145  |  |  | 
146  |  | /*  | 
147  |  |  * The fallbacks that follow don't need any per type implementation, as  | 
148  |  |  * they are designed for uint64_t only.  If there comes a time when multiple  | 
149  |  |  * types need to be covered, it's relatively easy to refactor them the same  | 
150  |  |  * way as the fallbacks above.  | 
151  |  |  */  | 
152  |  |  | 
153  |  | static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)  | 
154  |  | { | 
155  |  |     uint64_t ret;  | 
156  |  |  | 
157  |  |     pthread_mutex_lock(&atomic_sim_lock);  | 
158  |  |     *p += v;  | 
159  |  |     ret = *p;  | 
160  |  |     pthread_mutex_unlock(&atomic_sim_lock);  | 
161  |  |     return ret;  | 
162  |  | }  | 
163  |  |  | 
164  |  | #  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)  | 
165  |  |  | 
166  |  | static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)  | 
167  |  | { | 
168  |  |     uint64_t ret;  | 
169  |  |  | 
170  |  |     pthread_mutex_lock(&atomic_sim_lock);  | 
171  |  |     *p -= v;  | 
172  |  |     ret = *p;  | 
173  |  |     pthread_mutex_unlock(&atomic_sim_lock);  | 
174  |  |     return ret;  | 
175  |  | }  | 
176  |  |  | 
177  |  | #  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)  | 
178  |  | # endif  | 
179  |  |  | 
180  |  | /*  | 
181  |  |  * This is the core of an rcu lock. It tracks the readers and writers for the  | 
182  |  |  * current quiescence point for a given lock. Users is the 64 bit value that  | 
183  |  |  * stores the READERS/ID as defined above  | 
184  |  |  *  | 
185  |  |  */  | 
186  |  | struct rcu_qp { | 
187  |  |     uint64_t users;  | 
188  |  | };  | 
189  |  |  | 
190  |  | struct thread_qp { | 
191  |  |     struct rcu_qp *qp;  | 
192  |  |     unsigned int depth;  | 
193  |  |     CRYPTO_RCU_LOCK *lock;  | 
194  |  | };  | 
195  |  |  | 
196  | 0  | # define MAX_QPS 10  | 
197  |  | /*  | 
198  |  |  * This is the per thread tracking data  | 
199  |  |  * that is assigned to each thread participating  | 
200  |  |  * in an rcu qp  | 
201  |  |  *  | 
202  |  |  * qp points to the qp that it last acquired  | 
203  |  |  *  | 
204  |  |  */  | 
205  |  | struct rcu_thr_data { | 
206  |  |     struct thread_qp thread_qps[MAX_QPS];  | 
207  |  | };  | 
208  |  |  | 
209  |  | /*  | 
210  |  |  * This is the internal version of a CRYPTO_RCU_LOCK  | 
211  |  |  * it is cast from CRYPTO_RCU_LOCK  | 
212  |  |  */  | 
213  |  | struct rcu_lock_st { | 
214  |  |     /* Callbacks to call for next ossl_synchronize_rcu */  | 
215  |  |     struct rcu_cb_item *cb_items;  | 
216  |  |  | 
217  |  |     /* The context we are being created against */  | 
218  |  |     OSSL_LIB_CTX *ctx;  | 
219  |  |  | 
220  |  |     /* Array of quiescent points for synchronization */  | 
221  |  |     struct rcu_qp *qp_group;  | 
222  |  |  | 
223  |  |     /* rcu generation counter for in-order retirement */  | 
224  |  |     uint32_t id_ctr;  | 
225  |  |  | 
226  |  |     /* Number of elements in qp_group array */  | 
227  |  |     uint32_t group_count;  | 
228  |  |  | 
229  |  |     /* Index of the current qp in the qp_group array */  | 
230  |  |     uint32_t reader_idx;  | 
231  |  |  | 
232  |  |     /* value of the next id_ctr value to be retired */  | 
233  |  |     uint32_t next_to_retire;  | 
234  |  |  | 
235  |  |     /* index of the next free rcu_qp in the qp_group */  | 
236  |  |     uint32_t current_alloc_idx;  | 
237  |  |  | 
238  |  |     /* number of qp's in qp_group array currently being retired */  | 
239  |  |     uint32_t writers_alloced;  | 
240  |  |  | 
241  |  |     /* lock protecting write side operations */  | 
242  |  |     pthread_mutex_t write_lock;  | 
243  |  |  | 
244  |  |     /* lock protecting updates to writers_alloced/current_alloc_idx */  | 
245  |  |     pthread_mutex_t alloc_lock;  | 
246  |  |  | 
247  |  |     /* signal to wake threads waiting on alloc_lock */  | 
248  |  |     pthread_cond_t alloc_signal;  | 
249  |  |  | 
250  |  |     /* lock to enforce in-order retirement */  | 
251  |  |     pthread_mutex_t prior_lock;  | 
252  |  |  | 
253  |  |     /* signal to wake threads waiting on prior_lock */  | 
254  |  |     pthread_cond_t prior_signal;  | 
255  |  | };  | 
256  |  |  | 
257  |  | /* Read side acquisition of the current qp */  | 
258  |  | static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)  | 
259  | 0  | { | 
260  | 0  |     uint32_t qp_idx;  | 
261  |  |  | 
262  |  |     /* get the current qp index */  | 
263  | 0  |     for (;;) { | 
264  | 0  |         qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);  | 
265  |  |  | 
266  |  |         /*  | 
267  |  |          * Notes on use of __ATOMIC_ACQUIRE  | 
268  |  |          * We need to ensure the following:  | 
269  |  |          * 1) That subsequent operations aren't optimized by hoisting them above  | 
270  |  |          * this operation.  Specifically, we don't want the below re-load of  | 
271  |  |          * qp_idx to get optimized away  | 
272  |  |          * 2) We want to ensure that any updating of reader_idx on the write side  | 
273  |  |          * of the lock is flushed from a local cpu cache so that we see any  | 
274  |  |          * updates prior to the load.  This is a non-issue on cache coherent  | 
275  |  |          * systems like x86, but is relevant on other arches  | 
276  |  |          */  | 
277  | 0  |         ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,  | 
278  | 0  |                          __ATOMIC_ACQUIRE);  | 
279  |  |  | 
280  |  |         /* if the idx hasn't changed, we're good, else try again */  | 
281  | 0  |         if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,  | 
282  | 0  |                                     __ATOMIC_RELAXED))  | 
283  | 0  |             break;  | 
284  |  |  | 
285  | 0  |         ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,  | 
286  | 0  |                          __ATOMIC_RELAXED);  | 
287  | 0  |     }  | 
288  |  | 
  | 
289  | 0  |     return &lock->qp_group[qp_idx];  | 
290  | 0  | }  | 
291  |  |  | 
292  |  | static void ossl_rcu_free_local_data(void *arg)  | 
293  | 0  | { | 
294  | 0  |     OSSL_LIB_CTX *ctx = arg;  | 
295  | 0  |     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);  | 
296  | 0  |     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);  | 
297  |  | 
  | 
298  | 0  |     OPENSSL_free(data);  | 
299  | 0  |     CRYPTO_THREAD_set_local(lkey, NULL);  | 
300  | 0  | }  | 
301  |  |  | 
302  |  | void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)  | 
303  | 0  | { | 
304  | 0  |     struct rcu_thr_data *data;  | 
305  | 0  |     int i, available_qp = -1;  | 
306  | 0  |     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);  | 
307  |  |  | 
308  |  |     /*  | 
309  |  |      * we're going to access current_qp here so ask the  | 
310  |  |      * processor to fetch it  | 
311  |  |      */  | 
312  | 0  |     data = CRYPTO_THREAD_get_local(lkey);  | 
313  |  | 
  | 
314  | 0  |     if (data == NULL) { | 
315  | 0  |         data = OPENSSL_zalloc(sizeof(*data));  | 
316  | 0  |         OPENSSL_assert(data != NULL);  | 
317  | 0  |         CRYPTO_THREAD_set_local(lkey, data);  | 
318  | 0  |         ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);  | 
319  | 0  |     }  | 
320  |  | 
  | 
321  | 0  |     for (i = 0; i < MAX_QPS; i++) { | 
322  | 0  |         if (data->thread_qps[i].qp == NULL && available_qp == -1)  | 
323  | 0  |             available_qp = i;  | 
324  |  |         /* If we have a hold on this lock already, we're good */  | 
325  | 0  |         if (data->thread_qps[i].lock == lock) { | 
326  | 0  |             data->thread_qps[i].depth++;  | 
327  | 0  |             return;  | 
328  | 0  |         }  | 
329  | 0  |     }  | 
330  |  |  | 
331  |  |     /*  | 
332  |  |      * if we get here, then we don't have a hold on this lock yet  | 
333  |  |      */  | 
334  | 0  |     assert(available_qp != -1);  | 
335  |  | 
  | 
336  | 0  |     data->thread_qps[available_qp].qp = get_hold_current_qp(lock);  | 
337  | 0  |     data->thread_qps[available_qp].depth = 1;  | 
338  | 0  |     data->thread_qps[available_qp].lock = lock;  | 
339  | 0  | }  | 
340  |  |  | 
341  |  | void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)  | 
342  | 0  | { | 
343  | 0  |     int i;  | 
344  | 0  |     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);  | 
345  | 0  |     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);  | 
346  | 0  |     uint64_t ret;  | 
347  |  | 
  | 
348  | 0  |     assert(data != NULL);  | 
349  |  | 
  | 
350  | 0  |     for (i = 0; i < MAX_QPS; i++) { | 
351  | 0  |         if (data->thread_qps[i].lock == lock) { | 
352  |  |             /*  | 
353  |  |              * we have to use __ATOMIC_RELEASE here  | 
354  |  |              * to ensure that all preceding read instructions complete  | 
355  |  |              * before the decrement is visible to ossl_synchronize_rcu  | 
356  |  |              */  | 
357  | 0  |             data->thread_qps[i].depth--;  | 
358  | 0  |             if (data->thread_qps[i].depth == 0) { | 
359  | 0  |                 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,  | 
360  | 0  |                                        (uint64_t)1, __ATOMIC_RELEASE);  | 
361  | 0  |                 OPENSSL_assert(ret != UINT64_MAX);  | 
362  | 0  |                 data->thread_qps[i].qp = NULL;  | 
363  | 0  |                 data->thread_qps[i].lock = NULL;  | 
364  | 0  |             }  | 
365  | 0  |             return;  | 
366  | 0  |         }  | 
367  | 0  |     }  | 
368  |  |     /*  | 
369  |  |      * If we get here, we're trying to unlock a lock that we never acquired -  | 
370  |  |      * that's fatal.  | 
371  |  |      */  | 
372  | 0  |     assert(0);  | 
373  | 0  | }  | 
374  |  |  | 
375  |  | /*  | 
376  |  |  * Write side allocation routine to get the current qp  | 
377  |  |  * and replace it with a new one  | 
378  |  |  */  | 
379  |  | static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)  | 
380  | 15  | { | 
381  | 15  |     uint32_t current_idx;  | 
382  |  |  | 
383  | 15  |     pthread_mutex_lock(&lock->alloc_lock);  | 
384  |  |  | 
385  |  |     /*  | 
386  |  |      * we need at least one qp to be available with one  | 
387  |  |      * left over, so that readers can start working on  | 
388  |  |      * one that isn't yet being waited on  | 
389  |  |      */  | 
390  | 15  |     while (lock->group_count - lock->writers_alloced < 2)  | 
391  |  |         /* we have to wait for one to be free */  | 
392  | 0  |         pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);  | 
393  |  |  | 
394  | 15  |     current_idx = lock->current_alloc_idx;  | 
395  |  |  | 
396  |  |     /* Allocate the qp */  | 
397  | 15  |     lock->writers_alloced++;  | 
398  |  |  | 
399  |  |     /* increment the allocation index */  | 
400  | 15  |     lock->current_alloc_idx =  | 
401  | 15  |         (lock->current_alloc_idx + 1) % lock->group_count;  | 
402  |  |  | 
403  | 15  |     *curr_id = lock->id_ctr;  | 
404  | 15  |     lock->id_ctr++;  | 
405  |  |  | 
406  | 15  |     ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,  | 
407  | 15  |                    __ATOMIC_RELAXED);  | 
408  |  |  | 
409  |  |     /*  | 
410  |  |      * this should make sure that the new value of reader_idx is visible in  | 
411  |  |      * get_hold_current_qp, directly after incrementing the users count  | 
412  |  |      */  | 
413  | 15  |     ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,  | 
414  | 15  |                      __ATOMIC_RELEASE);  | 
415  |  |  | 
416  |  |     /* wake up any waiters */  | 
417  | 15  |     pthread_cond_signal(&lock->alloc_signal);  | 
418  | 15  |     pthread_mutex_unlock(&lock->alloc_lock);  | 
419  | 15  |     return &lock->qp_group[current_idx];  | 
420  | 15  | }  | 
421  |  |  | 
422  |  | static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)  | 
423  | 15  | { | 
424  | 15  |     pthread_mutex_lock(&lock->alloc_lock);  | 
425  | 15  |     lock->writers_alloced--;  | 
426  | 15  |     pthread_cond_signal(&lock->alloc_signal);  | 
427  | 15  |     pthread_mutex_unlock(&lock->alloc_lock);  | 
428  | 15  | }  | 
429  |  |  | 
430  |  | static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,  | 
431  |  |                                             uint32_t count)  | 
432  | 12  | { | 
433  | 12  |     struct rcu_qp *new =  | 
434  | 12  |         OPENSSL_zalloc(sizeof(*new) * count);  | 
435  |  |  | 
436  | 12  |     lock->group_count = count;  | 
437  | 12  |     return new;  | 
438  | 12  | }  | 
439  |  |  | 
440  |  | void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)  | 
441  | 9  | { | 
442  | 9  |     pthread_mutex_lock(&lock->write_lock);  | 
443  | 9  |     TSAN_FAKE_UNLOCK(&lock->write_lock);  | 
444  | 9  | }  | 
445  |  |  | 
446  |  | void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)  | 
447  | 9  | { | 
448  | 9  |     TSAN_FAKE_LOCK(&lock->write_lock);  | 
449  | 9  |     pthread_mutex_unlock(&lock->write_lock);  | 
450  | 9  | }  | 
451  |  |  | 
452  |  | void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)  | 
453  | 15  | { | 
454  | 15  |     struct rcu_qp *qp;  | 
455  | 15  |     uint64_t count;  | 
456  | 15  |     uint32_t curr_id;  | 
457  | 15  |     struct rcu_cb_item *cb_items, *tmpcb;  | 
458  |  |  | 
459  | 15  |     pthread_mutex_lock(&lock->write_lock);  | 
460  | 15  |     cb_items = lock->cb_items;  | 
461  | 15  |     lock->cb_items = NULL;  | 
462  | 15  |     pthread_mutex_unlock(&lock->write_lock);  | 
463  |  |  | 
464  | 15  |     qp = update_qp(lock, &curr_id);  | 
465  |  |  | 
466  |  |     /* retire in order */  | 
467  | 15  |     pthread_mutex_lock(&lock->prior_lock);  | 
468  | 15  |     while (lock->next_to_retire != curr_id)  | 
469  | 0  |         pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);  | 
470  |  |  | 
471  |  |     /*  | 
472  |  |      * wait for the reader count to reach zero  | 
473  |  |      * Note the use of __ATOMIC_ACQUIRE here to ensure that any  | 
474  |  |      * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock  | 
475  |  |      * is visible prior to our read  | 
476  |  |      * however this is likely just necessary to silence a tsan warning  | 
477  |  |      * because the read side should not do any write operation  | 
478  |  |      * outside the atomic itself  | 
479  |  |      */  | 
480  | 15  |     do { | 
481  | 15  |         count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);  | 
482  | 15  |     } while (count != (uint64_t)0);  | 
483  |  |  | 
484  | 15  |     lock->next_to_retire++;  | 
485  | 15  |     pthread_cond_broadcast(&lock->prior_signal);  | 
486  | 15  |     pthread_mutex_unlock(&lock->prior_lock);  | 
487  |  |  | 
488  | 15  |     retire_qp(lock, qp);  | 
489  |  |  | 
490  |  |     /* handle any callbacks that we have */  | 
491  | 18  |     while (cb_items != NULL) { | 
492  | 3  |         tmpcb = cb_items;  | 
493  | 3  |         cb_items = cb_items->next;  | 
494  | 3  |         tmpcb->fn(tmpcb->data);  | 
495  | 3  |         OPENSSL_free(tmpcb);  | 
496  | 3  |     }  | 
497  | 15  | }  | 
498  |  |  | 
499  |  | /*  | 
500  |  |  * Note: This call assumes its made under the protection of  | 
501  |  |  * ossl_rcu_write_lock  | 
502  |  |  */  | 
503  |  | int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)  | 
504  | 3  | { | 
505  | 3  |     struct rcu_cb_item *new =  | 
506  | 3  |         OPENSSL_zalloc(sizeof(*new));  | 
507  |  |  | 
508  | 3  |     if (new == NULL)  | 
509  | 0  |         return 0;  | 
510  |  |  | 
511  | 3  |     new->data = data;  | 
512  | 3  |     new->fn = cb;  | 
513  |  |  | 
514  | 3  |     new->next = lock->cb_items;  | 
515  | 3  |     lock->cb_items = new;  | 
516  |  |  | 
517  | 3  |     return 1;  | 
518  | 3  | }  | 
519  |  |  | 
520  |  | void *ossl_rcu_uptr_deref(void **p)  | 
521  | 2.29M  | { | 
522  | 2.29M  |     return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);  | 
523  | 2.29M  | }  | 
524  |  |  | 
525  |  | void ossl_rcu_assign_uptr(void **p, void **v)  | 
526  | 1.30k  | { | 
527  | 1.30k  |     ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);  | 
528  | 1.30k  | }  | 
529  |  |  | 
530  |  | CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)  | 
531  | 12  | { | 
532  | 12  |     struct rcu_lock_st *new;  | 
533  |  |  | 
534  |  |     /*  | 
535  |  |      * We need a minimum of 2 qp's  | 
536  |  |      */  | 
537  | 12  |     if (num_writers < 2)  | 
538  | 12  |         num_writers = 2;  | 
539  |  |  | 
540  | 12  |     ctx = ossl_lib_ctx_get_concrete(ctx);  | 
541  | 12  |     if (ctx == NULL)  | 
542  | 0  |         return 0;  | 
543  |  |  | 
544  | 12  |     new = OPENSSL_zalloc(sizeof(*new));  | 
545  | 12  |     if (new == NULL)  | 
546  | 0  |         return NULL;  | 
547  |  |  | 
548  | 12  |     new->ctx = ctx;  | 
549  | 12  |     pthread_mutex_init(&new->write_lock, NULL);  | 
550  | 12  |     pthread_mutex_init(&new->prior_lock, NULL);  | 
551  | 12  |     pthread_mutex_init(&new->alloc_lock, NULL);  | 
552  | 12  |     pthread_cond_init(&new->prior_signal, NULL);  | 
553  | 12  |     pthread_cond_init(&new->alloc_signal, NULL);  | 
554  |  |  | 
555  | 12  |     new->qp_group = allocate_new_qp_group(new, num_writers);  | 
556  | 12  |     if (new->qp_group == NULL) { | 
557  | 0  |         OPENSSL_free(new);  | 
558  | 0  |         new = NULL;  | 
559  | 0  |     }  | 
560  |  |  | 
561  | 12  |     return new;  | 
562  | 12  | }  | 
563  |  |  | 
564  |  | void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)  | 
565  | 6  | { | 
566  | 6  |     struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;  | 
567  |  |  | 
568  | 6  |     if (lock == NULL)  | 
569  | 0  |         return;  | 
570  |  |  | 
571  |  |     /* make sure we're synchronized */  | 
572  | 6  |     ossl_synchronize_rcu(rlock);  | 
573  |  |  | 
574  | 6  |     OPENSSL_free(rlock->qp_group);  | 
575  |  |     /* There should only be a single qp left now */  | 
576  | 6  |     OPENSSL_free(rlock);  | 
577  | 6  | }  | 
578  |  |  | 
579  |  | CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)  | 
580  | 228  | { | 
581  | 228  | # ifdef USE_RWLOCK  | 
582  | 228  |     CRYPTO_RWLOCK *lock;  | 
583  |  |  | 
584  | 228  |     if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)  | 
585  |  |         /* Don't set error, to avoid recursion blowup. */  | 
586  | 0  |         return NULL;  | 
587  |  |  | 
588  | 228  |     if (pthread_rwlock_init(lock, NULL) != 0) { | 
589  | 0  |         OPENSSL_free(lock);  | 
590  | 0  |         return NULL;  | 
591  | 0  |     }  | 
592  |  | # else  | 
593  |  |     pthread_mutexattr_t attr;  | 
594  |  |     CRYPTO_RWLOCK *lock;  | 
595  |  |  | 
596  |  |     if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)  | 
597  |  |         /* Don't set error, to avoid recursion blowup. */  | 
598  |  |         return NULL;  | 
599  |  |  | 
600  |  |     /*  | 
601  |  |      * We don't use recursive mutexes, but try to catch errors if we do.  | 
602  |  |      */  | 
603  |  |     pthread_mutexattr_init(&attr);  | 
604  |  | #  if !defined (__TANDEM) && !defined (_SPT_MODEL_)  | 
605  |  | #   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)  | 
606  |  |     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);  | 
607  |  | #   endif  | 
608  |  | #  else  | 
609  |  |     /* The SPT Thread Library does not define MUTEX attributes. */  | 
610  |  | #  endif  | 
611  |  |  | 
612  |  |     if (pthread_mutex_init(lock, &attr) != 0) { | 
613  |  |         pthread_mutexattr_destroy(&attr);  | 
614  |  |         OPENSSL_free(lock);  | 
615  |  |         return NULL;  | 
616  |  |     }  | 
617  |  |  | 
618  |  |     pthread_mutexattr_destroy(&attr);  | 
619  |  | # endif  | 
620  |  |  | 
621  | 228  |     return lock;  | 
622  | 228  | }  | 
623  |  |  | 
624  |  | __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)  | 
625  | 1.14M  | { | 
626  | 1.14M  | # ifdef USE_RWLOCK  | 
627  | 1.14M  |     if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))  | 
628  | 0  |         return 0;  | 
629  |  | # else  | 
630  |  |     if (pthread_mutex_lock(lock) != 0) { | 
631  |  |         assert(errno != EDEADLK && errno != EBUSY);  | 
632  |  |         return 0;  | 
633  |  |     }  | 
634  |  | # endif  | 
635  |  |  | 
636  | 1.14M  |     return 1;  | 
637  | 1.14M  | }  | 
638  |  |  | 
639  |  | __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)  | 
640  | 7.56k  | { | 
641  | 7.56k  | # ifdef USE_RWLOCK  | 
642  | 7.56k  |     if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))  | 
643  | 0  |         return 0;  | 
644  |  | # else  | 
645  |  |     if (pthread_mutex_lock(lock) != 0) { | 
646  |  |         assert(errno != EDEADLK && errno != EBUSY);  | 
647  |  |         return 0;  | 
648  |  |     }  | 
649  |  | # endif  | 
650  |  |  | 
651  | 7.56k  |     return 1;  | 
652  | 7.56k  | }  | 
653  |  |  | 
654  |  | int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)  | 
655  | 1.15M  | { | 
656  | 1.15M  | # ifdef USE_RWLOCK  | 
657  | 1.15M  |     if (pthread_rwlock_unlock(lock) != 0)  | 
658  | 0  |         return 0;  | 
659  |  | # else  | 
660  |  |     if (pthread_mutex_unlock(lock) != 0) { | 
661  |  |         assert(errno != EPERM);  | 
662  |  |         return 0;  | 
663  |  |     }  | 
664  |  | # endif  | 
665  |  |  | 
666  | 1.15M  |     return 1;  | 
667  | 1.15M  | }  | 
668  |  |  | 
669  |  | void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)  | 
670  | 96  | { | 
671  | 96  |     if (lock == NULL)  | 
672  | 21  |         return;  | 
673  |  |  | 
674  | 75  | # ifdef USE_RWLOCK  | 
675  | 75  |     pthread_rwlock_destroy(lock);  | 
676  |  | # else  | 
677  |  |     pthread_mutex_destroy(lock);  | 
678  |  | # endif  | 
679  | 75  |     OPENSSL_free(lock);  | 
680  |  |  | 
681  | 75  |     return;  | 
682  | 96  | }  | 
683  |  |  | 
684  |  | int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))  | 
685  | 13.2k  | { | 
686  | 13.2k  |     if (pthread_once(once, init) != 0)  | 
687  | 0  |         return 0;  | 
688  |  |  | 
689  | 13.2k  |     return 1;  | 
690  | 13.2k  | }  | 
691  |  |  | 
692  |  | int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))  | 
693  | 39  | { | 
694  | 39  |     if (pthread_key_create(key, cleanup) != 0)  | 
695  | 0  |         return 0;  | 
696  |  |  | 
697  | 39  |     return 1;  | 
698  | 39  | }  | 
699  |  |  | 
700  |  | void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)  | 
701  | 5.92k  | { | 
702  | 5.92k  |     return pthread_getspecific(*key);  | 
703  | 5.92k  | }  | 
704  |  |  | 
705  |  | int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)  | 
706  | 24  | { | 
707  | 24  |     if (pthread_setspecific(*key, val) != 0)  | 
708  | 0  |         return 0;  | 
709  |  |  | 
710  | 24  |     return 1;  | 
711  | 24  | }  | 
712  |  |  | 
713  |  | int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)  | 
714  | 21  | { | 
715  | 21  |     if (pthread_key_delete(*key) != 0)  | 
716  | 0  |         return 0;  | 
717  |  |  | 
718  | 21  |     return 1;  | 
719  | 21  | }  | 
720  |  |  | 
721  |  | CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)  | 
722  | 0  | { | 
723  | 0  |     return pthread_self();  | 
724  | 0  | }  | 
725  |  |  | 
726  |  | int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)  | 
727  | 0  | { | 
728  | 0  |     return pthread_equal(a, b);  | 
729  | 0  | }  | 
730  |  |  | 
731  |  | int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)  | 
732  | 3.92k  | { | 
733  | 3.92k  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
734  | 3.92k  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
735  | 3.92k  |         *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);  | 
736  | 3.92k  |         return 1;  | 
737  | 3.92k  |     }  | 
738  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
739  |  |     /* This will work for all future Solaris versions. */  | 
740  |  |     if (ret != NULL) { | 
741  |  |         *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);  | 
742  |  |         return 1;  | 
743  |  |     }  | 
744  |  | # endif  | 
745  | 0  |     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))  | 
746  | 0  |         return 0;  | 
747  |  |  | 
748  | 0  |     *val += amount;  | 
749  | 0  |     *ret  = *val;  | 
750  |  | 
  | 
751  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
752  | 0  |         return 0;  | 
753  |  |  | 
754  | 0  |     return 1;  | 
755  | 0  | }  | 
756  |  |  | 
757  |  | int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,  | 
758  |  |                         CRYPTO_RWLOCK *lock)  | 
759  | 0  | { | 
760  | 0  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
761  | 0  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
762  | 0  |         *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);  | 
763  | 0  |         return 1;  | 
764  | 0  |     }  | 
765  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
766  |  |     /* This will work for all future Solaris versions. */  | 
767  |  |     if (ret != NULL) { | 
768  |  |         *ret = atomic_add_64_nv(val, op);  | 
769  |  |         return 1;  | 
770  |  |     }  | 
771  |  | # endif  | 
772  | 0  |     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))  | 
773  | 0  |         return 0;  | 
774  | 0  |     *val += op;  | 
775  | 0  |     *ret  = *val;  | 
776  |  | 
  | 
777  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
778  | 0  |         return 0;  | 
779  |  |  | 
780  | 0  |     return 1;  | 
781  | 0  | }  | 
782  |  |  | 
783  |  | int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,  | 
784  |  |                       CRYPTO_RWLOCK *lock)  | 
785  | 0  | { | 
786  | 0  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
787  | 0  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
788  | 0  |         *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);  | 
789  | 0  |         return 1;  | 
790  | 0  |     }  | 
791  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
792  |  |     /* This will work for all future Solaris versions. */  | 
793  |  |     if (ret != NULL) { | 
794  |  |         *ret = atomic_and_64_nv(val, op);  | 
795  |  |         return 1;  | 
796  |  |     }  | 
797  |  | # endif  | 
798  | 0  |     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))  | 
799  | 0  |         return 0;  | 
800  | 0  |     *val &= op;  | 
801  | 0  |     *ret  = *val;  | 
802  |  | 
  | 
803  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
804  | 0  |         return 0;  | 
805  |  |  | 
806  | 0  |     return 1;  | 
807  | 0  | }  | 
808  |  |  | 
809  |  | int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,  | 
810  |  |                      CRYPTO_RWLOCK *lock)  | 
811  | 9  | { | 
812  | 9  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
813  | 9  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
814  | 9  |         *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);  | 
815  | 9  |         return 1;  | 
816  | 9  |     }  | 
817  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
818  |  |     /* This will work for all future Solaris versions. */  | 
819  |  |     if (ret != NULL) { | 
820  |  |         *ret = atomic_or_64_nv(val, op);  | 
821  |  |         return 1;  | 
822  |  |     }  | 
823  |  | # endif  | 
824  | 0  |     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))  | 
825  | 0  |         return 0;  | 
826  | 0  |     *val |= op;  | 
827  | 0  |     *ret  = *val;  | 
828  |  | 
  | 
829  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
830  | 0  |         return 0;  | 
831  |  |  | 
832  | 0  |     return 1;  | 
833  | 0  | }  | 
834  |  |  | 
835  |  | int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)  | 
836  | 1.18M  | { | 
837  | 1.18M  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
838  | 1.18M  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
839  | 1.18M  |         __atomic_load(val, ret, __ATOMIC_ACQUIRE);  | 
840  | 1.18M  |         return 1;  | 
841  | 1.18M  |     }  | 
842  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
843  |  |     /* This will work for all future Solaris versions. */  | 
844  |  |     if (ret != NULL) { | 
845  |  |         *ret = atomic_or_64_nv(val, 0);  | 
846  |  |         return 1;  | 
847  |  |     }  | 
848  |  | # endif  | 
849  | 0  |     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))  | 
850  | 0  |         return 0;  | 
851  | 0  |     *ret  = *val;  | 
852  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
853  | 0  |         return 0;  | 
854  |  |  | 
855  | 0  |     return 1;  | 
856  | 0  | }  | 
857  |  |  | 
858  |  | int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)  | 
859  | 1.29k  | { | 
860  | 1.29k  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
861  | 1.29k  |     if (__atomic_is_lock_free(sizeof(*dst), dst)) { | 
862  | 1.29k  |         __atomic_store(dst, &val, __ATOMIC_RELEASE);  | 
863  | 1.29k  |         return 1;  | 
864  | 1.29k  |     }  | 
865  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
866  |  |     /* This will work for all future Solaris versions. */  | 
867  |  |     if (dst != NULL) { | 
868  |  |         atomic_swap_64(dst, val);  | 
869  |  |         return 1;  | 
870  |  |     }  | 
871  |  | # endif  | 
872  | 0  |     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))  | 
873  | 0  |         return 0;  | 
874  | 0  |     *dst  = val;  | 
875  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
876  | 0  |         return 0;  | 
877  |  |  | 
878  | 0  |     return 1;  | 
879  | 0  | }  | 
880  |  |  | 
881  |  | int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)  | 
882  | 0  | { | 
883  | 0  | # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)  | 
884  | 0  |     if (__atomic_is_lock_free(sizeof(*val), val)) { | 
885  | 0  |         __atomic_load(val, ret, __ATOMIC_ACQUIRE);  | 
886  | 0  |         return 1;  | 
887  | 0  |     }  | 
888  |  | # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))  | 
889  |  |     /* This will work for all future Solaris versions. */  | 
890  |  |     if (ret != NULL) { | 
891  |  |         *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);  | 
892  |  |         return 1;  | 
893  |  |     }  | 
894  |  | # endif  | 
895  | 0  |     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))  | 
896  | 0  |         return 0;  | 
897  | 0  |     *ret  = *val;  | 
898  | 0  |     if (!CRYPTO_THREAD_unlock(lock))  | 
899  | 0  |         return 0;  | 
900  |  |  | 
901  | 0  |     return 1;  | 
902  | 0  | }  | 
903  |  |  | 
904  |  | # ifndef FIPS_MODULE  | 
905  |  | int openssl_init_fork_handlers(void)  | 
906  | 0  | { | 
907  | 0  |     return 1;  | 
908  | 0  | }  | 
909  |  | # endif /* FIPS_MODULE */  | 
910  |  |  | 
911  |  | int openssl_get_fork_id(void)  | 
912  | 0  | { | 
913  | 0  |     return getpid();  | 
914  | 0  | }  | 
915  |  | #endif  |