Coverage Report

Created: 2026-04-01 06:39

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl33/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__sun)
20
#include <atomic.h>
21
#endif
22
23
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24
/*
25
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28
 * All of this makes impossible to use __atomic_is_lock_free here.
29
 *
30
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31
 */
32
#define BROKEN_CLANG_ATOMICS
33
#endif
34
35
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
37
#if defined(OPENSSL_SYS_UNIX)
38
#include <sys/types.h>
39
#include <unistd.h>
40
#endif
41
42
#include <assert.h>
43
44
/*
45
 * The Non-Stop KLT thread model currently seems broken in its rwlock
46
 * implementation
47
 * Likewise is there a problem with the glibc implementation on riscv.
48
 */
49
#if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
50
    && !defined(__riscv)
51
#define USE_RWLOCK
52
#endif
53
54
/*
55
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
56
 * other compilers.
57
58
 * Unfortunately, we can't do that with some "generic type", because there's no
59
 * guarantee that the chosen generic type is large enough to cover all cases.
60
 * Therefore, we implement fallbacks for each applicable type, with composed
61
 * names that include the type they handle.
62
 *
63
 * (an anecdote: we previously tried to use |void *| as the generic type, with
64
 * the thought that the pointer itself is the largest type.  However, this is
65
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
66
 *
67
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
68
 * they can map to the correct fallback function.  In the GNU/clang case, that
69
 * parameter is simply ignored.
70
 */
71
72
/*
73
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
74
 * fallback function names.
75
 */
76
typedef void *pvoid;
77
78
#if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
79
    && !defined(USE_ATOMIC_FALLBACKS)
80
79.6M
#define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
81
933
#define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
82
39.9k
#define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
83
1.01k
#define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
84
80
#define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
85
#else
86
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
87
88
#define IMPL_fallback_atomic_load_n(t)                    \
89
    static ossl_inline t fallback_atomic_load_n_##t(t *p) \
90
    {                                                     \
91
        t ret;                                            \
92
                                                          \
93
        pthread_mutex_lock(&atomic_sim_lock);             \
94
        ret = *p;                                         \
95
        pthread_mutex_unlock(&atomic_sim_lock);           \
96
        return ret;                                       \
97
    }
98
IMPL_fallback_atomic_load_n(uint32_t)
99
    IMPL_fallback_atomic_load_n(uint64_t)
100
        IMPL_fallback_atomic_load_n(pvoid)
101
102
#define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
103
104
#define IMPL_fallback_atomic_store_n(t)                         \
105
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
106
    {                                                           \
107
        t ret;                                                  \
108
                                                                \
109
        pthread_mutex_lock(&atomic_sim_lock);                   \
110
        ret = *p;                                               \
111
        *p = v;                                                 \
112
        pthread_mutex_unlock(&atomic_sim_lock);                 \
113
        return ret;                                             \
114
    }
115
            IMPL_fallback_atomic_store_n(uint32_t)
116
117
#define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
118
119
#define IMPL_fallback_atomic_store(t)                             \
120
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
121
    {                                                             \
122
        pthread_mutex_lock(&atomic_sim_lock);                     \
123
        *p = *v;                                                  \
124
        pthread_mutex_unlock(&atomic_sim_lock);                   \
125
    }
126
                IMPL_fallback_atomic_store(pvoid)
127
128
#define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
129
130
    /*
131
     * The fallbacks that follow don't need any per type implementation, as
132
     * they are designed for uint64_t only.  If there comes a time when multiple
133
     * types need to be covered, it's relatively easy to refactor them the same
134
     * way as the fallbacks above.
135
     */
136
137
    static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
138
{
139
    uint64_t ret;
140
141
    pthread_mutex_lock(&atomic_sim_lock);
142
    *p += v;
143
    ret = *p;
144
    pthread_mutex_unlock(&atomic_sim_lock);
145
    return ret;
146
}
147
148
#define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
149
150
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
151
{
152
    uint64_t ret;
153
154
    pthread_mutex_lock(&atomic_sim_lock);
155
    *p -= v;
156
    ret = *p;
157
    pthread_mutex_unlock(&atomic_sim_lock);
158
    return ret;
159
}
160
161
#define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
162
#endif
163
164
/*
165
 * This is the core of an rcu lock. It tracks the readers and writers for the
166
 * current quiescence point for a given lock. Users is the 64 bit value that
167
 * stores the READERS/ID as defined above
168
 *
169
 */
170
struct rcu_qp {
171
    uint64_t users;
172
};
173
174
struct thread_qp {
175
    struct rcu_qp *qp;
176
    unsigned int depth;
177
    CRYPTO_RCU_LOCK *lock;
178
};
179
180
509
#define MAX_QPS 10
181
/*
182
 * This is the per thread tracking data
183
 * that is assigned to each thread participating
184
 * in an rcu qp
185
 *
186
 * qp points to the qp that it last acquired
187
 *
188
 */
189
struct rcu_thr_data {
190
    struct thread_qp thread_qps[MAX_QPS];
191
};
192
193
/*
194
 * This is the internal version of a CRYPTO_RCU_LOCK
195
 * it is cast from CRYPTO_RCU_LOCK
196
 */
197
struct rcu_lock_st {
198
    /* Callbacks to call for next ossl_synchronize_rcu */
199
    struct rcu_cb_item *cb_items;
200
201
    /* The context we are being created against */
202
    OSSL_LIB_CTX *ctx;
203
204
    /* Array of quiescent points for synchronization */
205
    struct rcu_qp *qp_group;
206
207
    /* rcu generation counter for in-order retirement */
208
    uint32_t id_ctr;
209
210
    /* Number of elements in qp_group array */
211
    uint32_t group_count;
212
213
    /* Index of the current qp in the qp_group array */
214
    uint32_t reader_idx;
215
216
    /* value of the next id_ctr value to be retired */
217
    uint32_t next_to_retire;
218
219
    /* index of the next free rcu_qp in the qp_group */
220
    uint32_t current_alloc_idx;
221
222
    /* number of qp's in qp_group array currently being retired */
223
    uint32_t writers_alloced;
224
225
    /* lock protecting write side operations */
226
    pthread_mutex_t write_lock;
227
228
    /* lock protecting updates to writers_alloced/current_alloc_idx */
229
    pthread_mutex_t alloc_lock;
230
231
    /* signal to wake threads waiting on alloc_lock */
232
    pthread_cond_t alloc_signal;
233
234
    /* lock to enforce in-order retirement */
235
    pthread_mutex_t prior_lock;
236
237
    /* signal to wake threads waiting on prior_lock */
238
    pthread_cond_t prior_signal;
239
};
240
241
/* Read side acquisition of the current qp */
242
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
243
80
{
244
80
    uint32_t qp_idx;
245
246
    /* get the current qp index */
247
80
    for (;;) {
248
80
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
249
250
        /*
251
         * Notes on use of __ATOMIC_ACQUIRE
252
         * We need to ensure the following:
253
         * 1) That subsequent operations aren't optimized by hoisting them above
254
         * this operation.  Specifically, we don't want the below re-load of
255
         * qp_idx to get optimized away
256
         * 2) We want to ensure that any updating of reader_idx on the write side
257
         * of the lock is flushed from a local cpu cache so that we see any
258
         * updates prior to the load.  This is a non-issue on cache coherent
259
         * systems like x86, but is relevant on other arches
260
         */
261
80
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
262
80
            __ATOMIC_ACQUIRE);
263
264
        /* if the idx hasn't changed, we're good, else try again */
265
80
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
266
80
            break;
267
268
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
269
0
            __ATOMIC_RELAXED);
270
0
    }
271
272
80
    return &lock->qp_group[qp_idx];
273
80
}
274
275
static void ossl_rcu_free_local_data(void *arg)
276
3
{
277
3
    OSSL_LIB_CTX *ctx = arg;
278
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
279
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
280
281
3
    OPENSSL_free(data);
282
3
    CRYPTO_THREAD_set_local(lkey, NULL);
283
3
}
284
285
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
286
39
{
287
39
    struct rcu_thr_data *data;
288
39
    int i, available_qp = -1;
289
39
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
290
291
    /*
292
     * we're going to access current_qp here so ask the
293
     * processor to fetch it
294
     */
295
39
    data = CRYPTO_THREAD_get_local(lkey);
296
297
39
    if (data == NULL) {
298
2
        data = OPENSSL_zalloc(sizeof(*data));
299
2
        OPENSSL_assert(data != NULL);
300
2
        CRYPTO_THREAD_set_local(lkey, data);
301
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
302
2
    }
303
304
429
    for (i = 0; i < MAX_QPS; i++) {
305
390
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
306
39
            available_qp = i;
307
        /* If we have a hold on this lock already, we're good */
308
390
        if (data->thread_qps[i].lock == lock) {
309
0
            data->thread_qps[i].depth++;
310
0
            return;
311
0
        }
312
390
    }
313
314
    /*
315
     * if we get here, then we don't have a hold on this lock yet
316
     */
317
39
    assert(available_qp != -1);
318
319
39
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
320
39
    data->thread_qps[available_qp].depth = 1;
321
39
    data->thread_qps[available_qp].lock = lock;
322
39
}
323
324
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
325
80
{
326
80
    int i;
327
80
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
328
80
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
329
80
    uint64_t ret;
330
331
80
    assert(data != NULL);
332
333
80
    for (i = 0; i < MAX_QPS; i++) {
334
80
        if (data->thread_qps[i].lock == lock) {
335
            /*
336
             * we have to use __ATOMIC_RELEASE here
337
             * to ensure that all preceding read instructions complete
338
             * before the decrement is visible to ossl_synchronize_rcu
339
             */
340
80
            data->thread_qps[i].depth--;
341
80
            if (data->thread_qps[i].depth == 0) {
342
80
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
343
80
                    (uint64_t)1, __ATOMIC_RELEASE);
344
80
                OPENSSL_assert(ret != UINT64_MAX);
345
80
                data->thread_qps[i].qp = NULL;
346
80
                data->thread_qps[i].lock = NULL;
347
80
            }
348
80
            return;
349
80
        }
350
80
    }
351
    /*
352
     * If we get here, we're trying to unlock a lock that we never acquired -
353
     * that's fatal.
354
     */
355
80
    assert(0);
356
0
}
357
358
/*
359
 * Write side allocation routine to get the current qp
360
 * and replace it with a new one
361
 */
362
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
363
933
{
364
933
    uint32_t current_idx;
365
366
933
    pthread_mutex_lock(&lock->alloc_lock);
367
368
    /*
369
     * we need at least one qp to be available with one
370
     * left over, so that readers can start working on
371
     * one that isn't yet being waited on
372
     */
373
933
    while (lock->group_count - lock->writers_alloced < 2)
374
        /* we have to wait for one to be free */
375
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
376
377
933
    current_idx = lock->current_alloc_idx;
378
379
    /* Allocate the qp */
380
933
    lock->writers_alloced++;
381
382
    /* increment the allocation index */
383
933
    lock->current_alloc_idx = (lock->current_alloc_idx + 1) % lock->group_count;
384
385
933
    *curr_id = lock->id_ctr;
386
933
    lock->id_ctr++;
387
388
    /*
389
     * make the current state of everything visible by this release
390
     * when get_hold_current_qp acquires the next qp
391
     */
392
933
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
393
933
        __ATOMIC_RELEASE);
394
395
    /*
396
     * this should make sure that the new value of reader_idx is visible in
397
     * get_hold_current_qp, directly after incrementing the users count
398
     */
399
933
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
400
933
        __ATOMIC_RELEASE);
401
402
    /* wake up any waiters */
403
933
    pthread_cond_signal(&lock->alloc_signal);
404
933
    pthread_mutex_unlock(&lock->alloc_lock);
405
933
    return &lock->qp_group[current_idx];
406
933
}
407
408
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
409
933
{
410
933
    pthread_mutex_lock(&lock->alloc_lock);
411
933
    lock->writers_alloced--;
412
933
    pthread_cond_signal(&lock->alloc_signal);
413
933
    pthread_mutex_unlock(&lock->alloc_lock);
414
933
}
415
416
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
417
    uint32_t count)
418
518
{
419
518
    struct rcu_qp *new = OPENSSL_zalloc(sizeof(*new) * count);
420
421
518
    lock->group_count = count;
422
518
    return new;
423
518
}
424
425
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
426
726
{
427
726
    pthread_mutex_lock(&lock->write_lock);
428
726
}
429
430
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
431
726
{
432
726
    pthread_mutex_unlock(&lock->write_lock);
433
726
}
434
435
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
436
933
{
437
933
    struct rcu_qp *qp;
438
933
    uint64_t count;
439
933
    uint32_t curr_id;
440
933
    struct rcu_cb_item *cb_items, *tmpcb;
441
442
933
    pthread_mutex_lock(&lock->write_lock);
443
933
    cb_items = lock->cb_items;
444
933
    lock->cb_items = NULL;
445
933
    pthread_mutex_unlock(&lock->write_lock);
446
447
933
    qp = update_qp(lock, &curr_id);
448
449
    /* retire in order */
450
933
    pthread_mutex_lock(&lock->prior_lock);
451
933
    while (lock->next_to_retire != curr_id)
452
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
453
454
    /*
455
     * wait for the reader count to reach zero
456
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
457
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
458
     * is visible prior to our read
459
     * however this is likely just necessary to silence a tsan warning
460
     * because the read side should not do any write operation
461
     * outside the atomic itself
462
     */
463
933
    do {
464
933
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
465
933
    } while (count != (uint64_t)0);
466
467
933
    lock->next_to_retire++;
468
933
    pthread_cond_broadcast(&lock->prior_signal);
469
933
    pthread_mutex_unlock(&lock->prior_lock);
470
471
933
    retire_qp(lock, qp);
472
473
    /* handle any callbacks that we have */
474
1.14k
    while (cb_items != NULL) {
475
207
        tmpcb = cb_items;
476
207
        cb_items = cb_items->next;
477
207
        tmpcb->fn(tmpcb->data);
478
207
        OPENSSL_free(tmpcb);
479
207
    }
480
933
}
481
482
/*
483
 * Note: This call assumes its made under the protection of
484
 * ossl_rcu_write_lock
485
 */
486
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
487
207
{
488
207
    struct rcu_cb_item *new = OPENSSL_zalloc(sizeof(*new));
489
490
207
    if (new == NULL)
491
0
        return 0;
492
493
207
    new->data = data;
494
207
    new->fn = cb;
495
496
207
    new->next = lock->cb_items;
497
207
    lock->cb_items = new;
498
499
207
    return 1;
500
207
}
501
502
void *ossl_rcu_uptr_deref(void **p)
503
79.6M
{
504
79.6M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
505
79.6M
}
506
507
void ossl_rcu_assign_uptr(void **p, void **v)
508
39.9k
{
509
39.9k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
510
39.9k
}
511
512
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
513
518
{
514
518
    struct rcu_lock_st *new;
515
518
    pthread_mutex_t *mutexes[3] = { NULL };
516
518
    pthread_cond_t *conds[2] = { NULL };
517
518
    int i;
518
519
    /*
520
     * We need a minimum of 2 qp's
521
     */
522
518
    if (num_writers < 2)
523
518
        num_writers = 2;
524
525
518
    ctx = ossl_lib_ctx_get_concrete(ctx);
526
518
    if (ctx == NULL)
527
0
        return 0;
528
529
518
    new = OPENSSL_zalloc(sizeof(*new));
530
518
    if (new == NULL)
531
0
        return NULL;
532
533
518
    new->ctx = ctx;
534
518
    i = 0;
535
518
    mutexes[i] = pthread_mutex_init(&new->write_lock, NULL) == 0 ? &new->write_lock : NULL;
536
518
    if (mutexes[i++] == NULL)
537
0
        goto err;
538
518
    mutexes[i] = pthread_mutex_init(&new->prior_lock, NULL) == 0 ? &new->prior_lock : NULL;
539
518
    if (mutexes[i++] == NULL)
540
0
        goto err;
541
518
    mutexes[i] = pthread_mutex_init(&new->alloc_lock, NULL) == 0 ? &new->alloc_lock : NULL;
542
518
    if (mutexes[i++] == NULL)
543
0
        goto err;
544
518
    conds[i - 3] = pthread_cond_init(&new->prior_signal, NULL) == 0 ? &new->prior_signal : NULL;
545
518
    if (conds[i - 3] == NULL)
546
0
        goto err;
547
518
    i++;
548
518
    conds[i - 3] = pthread_cond_init(&new->alloc_signal, NULL) == 0 ? &new->alloc_signal : NULL;
549
518
    if (conds[i - 3] == NULL)
550
0
        goto err;
551
518
    i++;
552
518
    new->qp_group = allocate_new_qp_group(new, num_writers);
553
518
    if (new->qp_group == NULL)
554
0
        goto err;
555
556
518
    return new;
557
558
0
err:
559
0
    for (i = 0; i < 3; i++)
560
0
        if (mutexes[i] != NULL)
561
0
            pthread_mutex_destroy(mutexes[i]);
562
0
    for (i = 0; i < 2; i++)
563
0
        if (conds[i] != NULL)
564
0
            pthread_cond_destroy(conds[i]);
565
0
    OPENSSL_free(new->qp_group);
566
0
    OPENSSL_free(new);
567
0
    return NULL;
568
518
}
569
570
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
571
350
{
572
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
573
574
350
    if (lock == NULL)
575
0
        return;
576
577
    /* make sure we're synchronized */
578
350
    ossl_synchronize_rcu(rlock);
579
580
350
    OPENSSL_free(rlock->qp_group);
581
    /*
582
     * Some targets (BSD) allocate heap when initializing
583
     * a mutex or condition, to prevent leaks, those need
584
     * to be destroyed here
585
     */
586
350
    pthread_mutex_destroy(&rlock->write_lock);
587
350
    pthread_mutex_destroy(&rlock->prior_lock);
588
350
    pthread_mutex_destroy(&rlock->alloc_lock);
589
350
    pthread_cond_destroy(&rlock->prior_signal);
590
350
    pthread_cond_destroy(&rlock->alloc_signal);
591
592
    /* There should only be a single qp left now */
593
350
    OPENSSL_free(rlock);
594
350
}
595
596
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
597
10.4M
{
598
10.4M
#ifdef USE_RWLOCK
599
10.4M
    CRYPTO_RWLOCK *lock;
600
601
10.4M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
602
        /* Don't set error, to avoid recursion blowup. */
603
0
        return NULL;
604
605
10.4M
    if (pthread_rwlock_init(lock, NULL) != 0) {
606
0
        OPENSSL_free(lock);
607
0
        return NULL;
608
0
    }
609
#else
610
    pthread_mutexattr_t attr;
611
    CRYPTO_RWLOCK *lock;
612
613
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
614
        /* Don't set error, to avoid recursion blowup. */
615
        return NULL;
616
617
    /*
618
     * We don't use recursive mutexes, but try to catch errors if we do.
619
     */
620
    pthread_mutexattr_init(&attr);
621
#if !defined(__TANDEM) && !defined(_SPT_MODEL_)
622
#if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
623
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
624
#endif
625
#else
626
    /* The SPT Thread Library does not define MUTEX attributes. */
627
#endif
628
629
    if (pthread_mutex_init(lock, &attr) != 0) {
630
        pthread_mutexattr_destroy(&attr);
631
        OPENSSL_free(lock);
632
        return NULL;
633
    }
634
635
    pthread_mutexattr_destroy(&attr);
636
#endif
637
638
10.4M
    return lock;
639
10.4M
}
640
641
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
642
1.04G
{
643
1.04G
#ifdef USE_RWLOCK
644
1.04G
    if (pthread_rwlock_rdlock(lock) != 0)
645
0
        return 0;
646
#else
647
    if (pthread_mutex_lock(lock) != 0) {
648
        assert(errno != EDEADLK && errno != EBUSY);
649
        return 0;
650
    }
651
#endif
652
653
1.04G
    return 1;
654
1.04G
}
655
656
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
657
54.9M
{
658
54.9M
#ifdef USE_RWLOCK
659
54.9M
    if (pthread_rwlock_wrlock(lock) != 0)
660
0
        return 0;
661
#else
662
    if (pthread_mutex_lock(lock) != 0) {
663
        assert(errno != EDEADLK && errno != EBUSY);
664
        return 0;
665
    }
666
#endif
667
668
54.9M
    return 1;
669
54.9M
}
670
671
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
672
1.25G
{
673
1.25G
#ifdef USE_RWLOCK
674
1.25G
    if (pthread_rwlock_unlock(lock) != 0)
675
0
        return 0;
676
#else
677
    if (pthread_mutex_unlock(lock) != 0) {
678
        assert(errno != EPERM);
679
        return 0;
680
    }
681
#endif
682
683
1.25G
    return 1;
684
1.25G
}
685
686
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
687
10.4M
{
688
10.4M
    if (lock == NULL)
689
2.46k
        return;
690
691
10.4M
#ifdef USE_RWLOCK
692
10.4M
    pthread_rwlock_destroy(lock);
693
#else
694
    pthread_mutex_destroy(lock);
695
#endif
696
10.4M
    OPENSSL_free(lock);
697
698
10.4M
    return;
699
10.4M
}
700
701
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
702
2.49G
{
703
2.49G
    if (pthread_once(once, init) != 0)
704
0
        return 0;
705
706
2.49G
    return 1;
707
2.49G
}
708
709
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
710
1.54k
{
711
712
1.54k
#ifndef FIPS_MODULE
713
1.54k
    if (!ossl_init_thread())
714
0
        return 0;
715
1.54k
#endif
716
717
1.54k
    if (pthread_key_create(key, cleanup) != 0)
718
0
        return 0;
719
720
1.54k
    return 1;
721
1.54k
}
722
723
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
724
2.13G
{
725
2.13G
    return pthread_getspecific(*key);
726
2.13G
}
727
728
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
729
1.79k
{
730
1.79k
    if (pthread_setspecific(*key, val) != 0)
731
0
        return 0;
732
733
1.79k
    return 1;
734
1.79k
}
735
736
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
737
1.38k
{
738
1.38k
    if (pthread_key_delete(*key) != 0)
739
0
        return 0;
740
741
1.38k
    return 1;
742
1.38k
}
743
744
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
745
208k
{
746
208k
    return pthread_self();
747
208k
}
748
749
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
750
12.6k
{
751
12.6k
    return pthread_equal(a, b);
752
12.6k
}
753
754
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
755
12.0M
{
756
12.0M
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
757
12.0M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
758
12.0M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
759
12.0M
        return 1;
760
12.0M
    }
761
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
762
    /* This will work for all future Solaris versions. */
763
    if (ret != NULL) {
764
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
765
        return 1;
766
    }
767
#endif
768
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
769
0
        return 0;
770
771
0
    *val += amount;
772
0
    *ret = *val;
773
774
0
    if (!CRYPTO_THREAD_unlock(lock))
775
0
        return 0;
776
777
0
    return 1;
778
0
}
779
780
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
781
    CRYPTO_RWLOCK *lock)
782
716
{
783
716
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
784
716
    if (__atomic_is_lock_free(sizeof(*val), val)) {
785
716
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
786
716
        return 1;
787
716
    }
788
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
789
    /* This will work for all future Solaris versions. */
790
    if (ret != NULL) {
791
        *ret = atomic_or_64_nv(val, op);
792
        return 1;
793
    }
794
#endif
795
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
796
0
        return 0;
797
0
    *val |= op;
798
0
    *ret = *val;
799
800
0
    if (!CRYPTO_THREAD_unlock(lock))
801
0
        return 0;
802
803
0
    return 1;
804
0
}
805
806
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
807
2.89G
{
808
2.89G
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
809
2.89G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
810
2.89G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
811
2.89G
        return 1;
812
2.89G
    }
813
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
814
    /* This will work for all future Solaris versions. */
815
    if (ret != NULL) {
816
        *ret = atomic_or_64_nv(val, 0);
817
        return 1;
818
    }
819
#endif
820
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
821
0
        return 0;
822
0
    *ret = *val;
823
0
    if (!CRYPTO_THREAD_unlock(lock))
824
0
        return 0;
825
826
0
    return 1;
827
0
}
828
829
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
830
0
{
831
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
832
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
833
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
834
0
        return 1;
835
0
    }
836
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
837
    /* This will work for all future Solaris versions. */
838
    if (ret != NULL) {
839
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
840
        return 1;
841
    }
842
#endif
843
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
844
0
        return 0;
845
0
    *ret = *val;
846
0
    if (!CRYPTO_THREAD_unlock(lock))
847
0
        return 0;
848
849
0
    return 1;
850
0
}
851
852
#ifndef FIPS_MODULE
853
int openssl_init_fork_handlers(void)
854
0
{
855
0
    return 1;
856
0
}
857
#endif /* FIPS_MODULE */
858
859
int openssl_get_fork_id(void)
860
164k
{
861
164k
    return getpid();
862
164k
}
863
#endif