Coverage Report

Created: 2025-12-04 06:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl35/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
# if __has_feature(thread_sanitizer)
21
#  define __SANITIZE_THREAD__
22
# endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
# include <sanitizer/tsan_interface.h>
27
# define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28
__tsan_mutex_post_unlock((x), 0)
29
30
# define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31
__tsan_mutex_post_lock((x), 0, 0)
32
#else
33
# define TSAN_FAKE_UNLOCK(x)
34
# define TSAN_FAKE_LOCK(x)
35
#endif
36
37
#if defined(__sun)
38
# include <atomic.h>
39
#endif
40
41
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42
/*
43
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46
 * All of this makes impossible to use __atomic_is_lock_free here.
47
 *
48
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49
 */
50
# define BROKEN_CLANG_ATOMICS
51
#endif
52
53
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54
55
# if defined(OPENSSL_SYS_UNIX)
56
#  include <sys/types.h>
57
#  include <unistd.h>
58
# endif
59
60
# include <assert.h>
61
62
/*
63
 * The Non-Stop KLT thread model currently seems broken in its rwlock
64
 * implementation
65
 * Likewise is there a problem with the glibc implementation on riscv.
66
 */
67
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
68
                                         && !defined(__riscv)
69
#  define USE_RWLOCK
70
# endif
71
72
/*
73
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
74
 * other compilers.
75
76
 * Unfortunately, we can't do that with some "generic type", because there's no
77
 * guarantee that the chosen generic type is large enough to cover all cases.
78
 * Therefore, we implement fallbacks for each applicable type, with composed
79
 * names that include the type they handle.
80
 *
81
 * (an anecdote: we previously tried to use |void *| as the generic type, with
82
 * the thought that the pointer itself is the largest type.  However, this is
83
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
84
 *
85
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
86
 * they can map to the correct fallback function.  In the GNU/clang case, that
87
 * parameter is simply ignored.
88
 */
89
90
/*
91
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
92
 * fallback function names.
93
 */
94
typedef void *pvoid;
95
96
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97
    && !defined(USE_ATOMIC_FALLBACKS)
98
65.0M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99
1.19k
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100
39.4k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101
1.25k
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102
59
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
103
# else
104
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
105
106
#  define IMPL_fallback_atomic_load_n(t)                        \
107
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
108
    {                                                           \
109
        t ret;                                                  \
110
                                                                \
111
        pthread_mutex_lock(&atomic_sim_lock);                   \
112
        ret = *p;                                               \
113
        pthread_mutex_unlock(&atomic_sim_lock);                 \
114
        return ret;                                             \
115
    }
116
IMPL_fallback_atomic_load_n(uint32_t)
117
IMPL_fallback_atomic_load_n(uint64_t)
118
IMPL_fallback_atomic_load_n(pvoid)
119
120
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
121
122
#  define IMPL_fallback_atomic_store_n(t)                       \
123
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
124
    {                                                           \
125
        t ret;                                                  \
126
                                                                \
127
        pthread_mutex_lock(&atomic_sim_lock);                   \
128
        ret = *p;                                               \
129
        *p = v;                                                 \
130
        pthread_mutex_unlock(&atomic_sim_lock);                 \
131
        return ret;                                             \
132
    }
133
IMPL_fallback_atomic_store_n(uint32_t)
134
135
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
136
137
#  define IMPL_fallback_atomic_store(t)                         \
138
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
139
    {                                                           \
140
        pthread_mutex_lock(&atomic_sim_lock);                   \
141
        *p = *v;                                                \
142
        pthread_mutex_unlock(&atomic_sim_lock);                 \
143
    }
144
IMPL_fallback_atomic_store(pvoid)
145
146
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
147
148
/*
149
 * The fallbacks that follow don't need any per type implementation, as
150
 * they are designed for uint64_t only.  If there comes a time when multiple
151
 * types need to be covered, it's relatively easy to refactor them the same
152
 * way as the fallbacks above.
153
 */
154
155
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
156
{
157
    uint64_t ret;
158
159
    pthread_mutex_lock(&atomic_sim_lock);
160
    *p += v;
161
    ret = *p;
162
    pthread_mutex_unlock(&atomic_sim_lock);
163
    return ret;
164
}
165
166
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
167
168
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
169
{
170
    uint64_t ret;
171
172
    pthread_mutex_lock(&atomic_sim_lock);
173
    *p -= v;
174
    ret = *p;
175
    pthread_mutex_unlock(&atomic_sim_lock);
176
    return ret;
177
}
178
179
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
180
# endif
181
182
/*
183
 * This is the core of an rcu lock. It tracks the readers and writers for the
184
 * current quiescence point for a given lock. Users is the 64 bit value that
185
 * stores the READERS/ID as defined above
186
 *
187
 */
188
struct rcu_qp {
189
    uint64_t users;
190
};
191
192
struct thread_qp {
193
    struct rcu_qp *qp;
194
    unsigned int depth;
195
    CRYPTO_RCU_LOCK *lock;
196
};
197
198
345
# define MAX_QPS 10
199
/*
200
 * This is the per thread tracking data
201
 * that is assigned to each thread participating
202
 * in an rcu qp
203
 *
204
 * qp points to the qp that it last acquired
205
 *
206
 */
207
struct rcu_thr_data {
208
    struct thread_qp thread_qps[MAX_QPS];
209
};
210
211
/*
212
 * This is the internal version of a CRYPTO_RCU_LOCK
213
 * it is cast from CRYPTO_RCU_LOCK
214
 */
215
struct rcu_lock_st {
216
    /* Callbacks to call for next ossl_synchronize_rcu */
217
    struct rcu_cb_item *cb_items;
218
219
    /* The context we are being created against */
220
    OSSL_LIB_CTX *ctx;
221
222
    /* Array of quiescent points for synchronization */
223
    struct rcu_qp *qp_group;
224
225
    /* rcu generation counter for in-order retirement */
226
    uint32_t id_ctr;
227
228
    /* Number of elements in qp_group array */
229
    uint32_t group_count;
230
231
    /* Index of the current qp in the qp_group array */
232
    uint32_t reader_idx;
233
234
    /* value of the next id_ctr value to be retired */
235
    uint32_t next_to_retire;
236
237
    /* index of the next free rcu_qp in the qp_group */
238
    uint32_t current_alloc_idx;
239
240
    /* number of qp's in qp_group array currently being retired */
241
    uint32_t writers_alloced;
242
243
    /* lock protecting write side operations */
244
    pthread_mutex_t write_lock;
245
246
    /* lock protecting updates to writers_alloced/current_alloc_idx */
247
    pthread_mutex_t alloc_lock;
248
249
    /* signal to wake threads waiting on alloc_lock */
250
    pthread_cond_t alloc_signal;
251
252
    /* lock to enforce in-order retirement */
253
    pthread_mutex_t prior_lock;
254
255
    /* signal to wake threads waiting on prior_lock */
256
    pthread_cond_t prior_signal;
257
};
258
259
/* Read side acquisition of the current qp */
260
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
261
59
{
262
59
    uint32_t qp_idx;
263
264
    /* get the current qp index */
265
59
    for (;;) {
266
59
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
267
268
        /*
269
         * Notes on use of __ATOMIC_ACQUIRE
270
         * We need to ensure the following:
271
         * 1) That subsequent operations aren't optimized by hoisting them above
272
         * this operation.  Specifically, we don't want the below re-load of
273
         * qp_idx to get optimized away
274
         * 2) We want to ensure that any updating of reader_idx on the write side
275
         * of the lock is flushed from a local cpu cache so that we see any
276
         * updates prior to the load.  This is a non-issue on cache coherent
277
         * systems like x86, but is relevant on other arches
278
         */
279
59
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
280
59
                         __ATOMIC_ACQUIRE);
281
282
        /* if the idx hasn't changed, we're good, else try again */
283
59
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
284
59
                                    __ATOMIC_ACQUIRE))
285
59
            break;
286
287
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
288
0
                         __ATOMIC_RELAXED);
289
0
    }
290
291
59
    return &lock->qp_group[qp_idx];
292
59
}
293
294
static void ossl_rcu_free_local_data(void *arg)
295
4
{
296
4
    OSSL_LIB_CTX *ctx = arg;
297
4
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
298
4
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
299
300
4
    OPENSSL_free(data);
301
4
    CRYPTO_THREAD_set_local(lkey, NULL);
302
4
}
303
304
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
305
26
{
306
26
    struct rcu_thr_data *data;
307
26
    int i, available_qp = -1;
308
26
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
309
310
    /*
311
     * we're going to access current_qp here so ask the
312
     * processor to fetch it
313
     */
314
26
    data = CRYPTO_THREAD_get_local(lkey);
315
316
26
    if (data == NULL) {
317
2
        data = OPENSSL_zalloc(sizeof(*data));
318
2
        OPENSSL_assert(data != NULL);
319
2
        CRYPTO_THREAD_set_local(lkey, data);
320
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
321
2
    }
322
323
286
    for (i = 0; i < MAX_QPS; i++) {
324
260
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
325
26
            available_qp = i;
326
        /* If we have a hold on this lock already, we're good */
327
260
        if (data->thread_qps[i].lock == lock) {
328
0
            data->thread_qps[i].depth++;
329
0
            return;
330
0
        }
331
260
    }
332
333
    /*
334
     * if we get here, then we don't have a hold on this lock yet
335
     */
336
26
    assert(available_qp != -1);
337
338
26
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
339
26
    data->thread_qps[available_qp].depth = 1;
340
26
    data->thread_qps[available_qp].lock = lock;
341
26
}
342
343
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
344
59
{
345
59
    int i;
346
59
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
347
59
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
348
59
    uint64_t ret;
349
350
59
    assert(data != NULL);
351
352
59
    for (i = 0; i < MAX_QPS; i++) {
353
59
        if (data->thread_qps[i].lock == lock) {
354
            /*
355
             * we have to use __ATOMIC_RELEASE here
356
             * to ensure that all preceding read instructions complete
357
             * before the decrement is visible to ossl_synchronize_rcu
358
             */
359
59
            data->thread_qps[i].depth--;
360
59
            if (data->thread_qps[i].depth == 0) {
361
59
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
362
59
                                       (uint64_t)1, __ATOMIC_RELEASE);
363
59
                OPENSSL_assert(ret != UINT64_MAX);
364
59
                data->thread_qps[i].qp = NULL;
365
59
                data->thread_qps[i].lock = NULL;
366
59
            }
367
59
            return;
368
59
        }
369
59
    }
370
    /*
371
     * If we get here, we're trying to unlock a lock that we never acquired -
372
     * that's fatal.
373
     */
374
59
    assert(0);
375
0
}
376
377
/*
378
 * Write side allocation routine to get the current qp
379
 * and replace it with a new one
380
 */
381
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
382
1.19k
{
383
1.19k
    uint32_t current_idx;
384
385
1.19k
    pthread_mutex_lock(&lock->alloc_lock);
386
387
    /*
388
     * we need at least one qp to be available with one
389
     * left over, so that readers can start working on
390
     * one that isn't yet being waited on
391
     */
392
1.19k
    while (lock->group_count - lock->writers_alloced < 2)
393
        /* we have to wait for one to be free */
394
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
395
396
1.19k
    current_idx = lock->current_alloc_idx;
397
398
    /* Allocate the qp */
399
1.19k
    lock->writers_alloced++;
400
401
    /* increment the allocation index */
402
1.19k
    lock->current_alloc_idx =
403
1.19k
        (lock->current_alloc_idx + 1) % lock->group_count;
404
405
1.19k
    *curr_id = lock->id_ctr;
406
1.19k
    lock->id_ctr++;
407
408
    /*
409
     * make the current state of everything visible by this release
410
     * when get_hold_current_qp acquires the next qp
411
     */
412
1.19k
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413
1.19k
                   __ATOMIC_RELEASE);
414
415
    /*
416
     * this should make sure that the new value of reader_idx is visible in
417
     * get_hold_current_qp, directly after incrementing the users count
418
     */
419
1.19k
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420
1.19k
                     __ATOMIC_RELEASE);
421
422
    /* wake up any waiters */
423
1.19k
    pthread_cond_signal(&lock->alloc_signal);
424
1.19k
    pthread_mutex_unlock(&lock->alloc_lock);
425
1.19k
    return &lock->qp_group[current_idx];
426
1.19k
}
427
428
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429
1.19k
{
430
1.19k
    pthread_mutex_lock(&lock->alloc_lock);
431
1.19k
    lock->writers_alloced--;
432
1.19k
    pthread_cond_signal(&lock->alloc_signal);
433
1.19k
    pthread_mutex_unlock(&lock->alloc_lock);
434
1.19k
}
435
436
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437
                                            uint32_t count)
438
574
{
439
574
    struct rcu_qp *new =
440
574
        OPENSSL_zalloc(sizeof(*new) * count);
441
442
574
    lock->group_count = count;
443
574
    return new;
444
574
}
445
446
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
447
880
{
448
880
    pthread_mutex_lock(&lock->write_lock);
449
880
    TSAN_FAKE_UNLOCK(&lock->write_lock);
450
880
}
451
452
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
453
880
{
454
880
    TSAN_FAKE_LOCK(&lock->write_lock);
455
880
    pthread_mutex_unlock(&lock->write_lock);
456
880
}
457
458
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
459
1.19k
{
460
1.19k
    struct rcu_qp *qp;
461
1.19k
    uint64_t count;
462
1.19k
    uint32_t curr_id;
463
1.19k
    struct rcu_cb_item *cb_items, *tmpcb;
464
465
1.19k
    pthread_mutex_lock(&lock->write_lock);
466
1.19k
    cb_items = lock->cb_items;
467
1.19k
    lock->cb_items = NULL;
468
1.19k
    pthread_mutex_unlock(&lock->write_lock);
469
470
1.19k
    qp = update_qp(lock, &curr_id);
471
472
    /* retire in order */
473
1.19k
    pthread_mutex_lock(&lock->prior_lock);
474
1.19k
    while (lock->next_to_retire != curr_id)
475
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
476
477
    /*
478
     * wait for the reader count to reach zero
479
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
480
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
481
     * is visible prior to our read
482
     * however this is likely just necessary to silence a tsan warning
483
     * because the read side should not do any write operation
484
     * outside the atomic itself
485
     */
486
1.19k
    do {
487
1.19k
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
488
1.19k
    } while (count != (uint64_t)0);
489
490
1.19k
    lock->next_to_retire++;
491
1.19k
    pthread_cond_broadcast(&lock->prior_signal);
492
1.19k
    pthread_mutex_unlock(&lock->prior_lock);
493
494
1.19k
    retire_qp(lock, qp);
495
496
    /* handle any callbacks that we have */
497
1.44k
    while (cb_items != NULL) {
498
248
        tmpcb = cb_items;
499
248
        cb_items = cb_items->next;
500
248
        tmpcb->fn(tmpcb->data);
501
248
        OPENSSL_free(tmpcb);
502
248
    }
503
1.19k
}
504
505
/*
506
 * Note: This call assumes its made under the protection of
507
 * ossl_rcu_write_lock
508
 */
509
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
510
248
{
511
248
    struct rcu_cb_item *new =
512
248
        OPENSSL_zalloc(sizeof(*new));
513
514
248
    if (new == NULL)
515
0
        return 0;
516
517
248
    new->data = data;
518
248
    new->fn = cb;
519
520
248
    new->next = lock->cb_items;
521
248
    lock->cb_items = new;
522
523
248
    return 1;
524
248
}
525
526
void *ossl_rcu_uptr_deref(void **p)
527
65.0M
{
528
65.0M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
529
65.0M
}
530
531
void ossl_rcu_assign_uptr(void **p, void **v)
532
39.4k
{
533
39.4k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
534
39.4k
}
535
536
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
537
574
{
538
574
    struct rcu_lock_st *new;
539
540
    /*
541
     * We need a minimum of 2 qp's
542
     */
543
574
    if (num_writers < 2)
544
574
        num_writers = 2;
545
546
574
    ctx = ossl_lib_ctx_get_concrete(ctx);
547
574
    if (ctx == NULL)
548
0
        return 0;
549
550
574
    new = OPENSSL_zalloc(sizeof(*new));
551
574
    if (new == NULL)
552
0
        return NULL;
553
554
574
    new->ctx = ctx;
555
574
    pthread_mutex_init(&new->write_lock, NULL);
556
574
    pthread_mutex_init(&new->prior_lock, NULL);
557
574
    pthread_mutex_init(&new->alloc_lock, NULL);
558
574
    pthread_cond_init(&new->prior_signal, NULL);
559
574
    pthread_cond_init(&new->alloc_signal, NULL);
560
561
574
    new->qp_group = allocate_new_qp_group(new, num_writers);
562
574
    if (new->qp_group == NULL) {
563
0
        OPENSSL_free(new);
564
0
        new = NULL;
565
0
    }
566
567
574
    return new;
568
574
}
569
570
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
571
456
{
572
456
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
573
574
456
    if (lock == NULL)
575
0
        return;
576
577
    /* make sure we're synchronized */
578
456
    ossl_synchronize_rcu(rlock);
579
580
456
    OPENSSL_free(rlock->qp_group);
581
    /* There should only be a single qp left now */
582
456
    OPENSSL_free(rlock);
583
456
}
584
585
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
586
10.5M
{
587
10.5M
# ifdef USE_RWLOCK
588
10.5M
    CRYPTO_RWLOCK *lock;
589
590
10.5M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
591
        /* Don't set error, to avoid recursion blowup. */
592
0
        return NULL;
593
594
10.5M
    if (pthread_rwlock_init(lock, NULL) != 0) {
595
0
        OPENSSL_free(lock);
596
0
        return NULL;
597
0
    }
598
# else
599
    pthread_mutexattr_t attr;
600
    CRYPTO_RWLOCK *lock;
601
602
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
603
        /* Don't set error, to avoid recursion blowup. */
604
        return NULL;
605
606
    /*
607
     * We don't use recursive mutexes, but try to catch errors if we do.
608
     */
609
    pthread_mutexattr_init(&attr);
610
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
611
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
612
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
613
#   endif
614
#  else
615
    /* The SPT Thread Library does not define MUTEX attributes. */
616
#  endif
617
618
    if (pthread_mutex_init(lock, &attr) != 0) {
619
        pthread_mutexattr_destroy(&attr);
620
        OPENSSL_free(lock);
621
        return NULL;
622
    }
623
624
    pthread_mutexattr_destroy(&attr);
625
# endif
626
627
10.5M
    return lock;
628
10.5M
}
629
630
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
631
73.8M
{
632
73.8M
# ifdef USE_RWLOCK
633
73.8M
    if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
634
0
        return 0;
635
# else
636
    if (pthread_mutex_lock(lock) != 0) {
637
        assert(errno != EDEADLK && errno != EBUSY);
638
        return 0;
639
    }
640
# endif
641
642
73.8M
    return 1;
643
73.8M
}
644
645
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
646
35.4M
{
647
35.4M
# ifdef USE_RWLOCK
648
35.4M
    if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
649
0
        return 0;
650
# else
651
    if (pthread_mutex_lock(lock) != 0) {
652
        assert(errno != EDEADLK && errno != EBUSY);
653
        return 0;
654
    }
655
# endif
656
657
35.4M
    return 1;
658
35.4M
}
659
660
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
661
1.29G
{
662
1.29G
# ifdef USE_RWLOCK
663
1.29G
    if (pthread_rwlock_unlock(lock) != 0)
664
0
        return 0;
665
# else
666
    if (pthread_mutex_unlock(lock) != 0) {
667
        assert(errno != EPERM);
668
        return 0;
669
    }
670
# endif
671
672
1.29G
    return 1;
673
1.29G
}
674
675
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
676
10.5M
{
677
10.5M
    if (lock == NULL)
678
2.74k
        return;
679
680
10.5M
# ifdef USE_RWLOCK
681
10.5M
    pthread_rwlock_destroy(lock);
682
# else
683
    pthread_mutex_destroy(lock);
684
# endif
685
10.5M
    OPENSSL_free(lock);
686
687
10.5M
    return;
688
10.5M
}
689
690
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
691
3.13G
{
692
3.13G
    if (pthread_once(once, init) != 0)
693
0
        return 0;
694
695
3.13G
    return 1;
696
3.13G
}
697
698
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
699
1.54k
{
700
701
1.54k
# ifndef FIPS_MODULE
702
1.54k
    if (!ossl_init_thread())
703
0
        return 0;
704
1.54k
# endif
705
706
1.54k
    if (pthread_key_create(key, cleanup) != 0)
707
0
        return 0;
708
709
1.54k
    return 1;
710
1.54k
}
711
712
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
713
2.15G
{
714
2.15G
    return pthread_getspecific(*key);
715
2.15G
}
716
717
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
718
2.16k
{
719
2.16k
    if (pthread_setspecific(*key, val) != 0)
720
0
        return 0;
721
722
2.16k
    return 1;
723
2.16k
}
724
725
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
726
1.73k
{
727
1.73k
    if (pthread_key_delete(*key) != 0)
728
0
        return 0;
729
730
1.73k
    return 1;
731
1.73k
}
732
733
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
734
201k
{
735
201k
    return pthread_self();
736
201k
}
737
738
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
739
13.3k
{
740
13.3k
    return pthread_equal(a, b);
741
13.3k
}
742
743
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
744
12.0M
{
745
12.0M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
746
12.0M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
747
12.0M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
748
12.0M
        return 1;
749
12.0M
    }
750
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
751
    /* This will work for all future Solaris versions. */
752
    if (ret != NULL) {
753
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
754
        return 1;
755
    }
756
# endif
757
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
758
0
        return 0;
759
760
0
    *val += amount;
761
0
    *ret  = *val;
762
763
0
    if (!CRYPTO_THREAD_unlock(lock))
764
0
        return 0;
765
766
0
    return 1;
767
0
}
768
769
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
770
                        CRYPTO_RWLOCK *lock)
771
0
{
772
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
773
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
774
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
775
0
        return 1;
776
0
    }
777
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
778
    /* This will work for all future Solaris versions. */
779
    if (ret != NULL) {
780
        *ret = atomic_add_64_nv(val, op);
781
        return 1;
782
    }
783
# endif
784
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
785
0
        return 0;
786
0
    *val += op;
787
0
    *ret  = *val;
788
789
0
    if (!CRYPTO_THREAD_unlock(lock))
790
0
        return 0;
791
792
0
    return 1;
793
0
}
794
795
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
796
                      CRYPTO_RWLOCK *lock)
797
0
{
798
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
799
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
800
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
801
0
        return 1;
802
0
    }
803
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
804
    /* This will work for all future Solaris versions. */
805
    if (ret != NULL) {
806
        *ret = atomic_and_64_nv(val, op);
807
        return 1;
808
    }
809
# endif
810
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
811
0
        return 0;
812
0
    *val &= op;
813
0
    *ret  = *val;
814
815
0
    if (!CRYPTO_THREAD_unlock(lock))
816
0
        return 0;
817
818
0
    return 1;
819
0
}
820
821
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
822
                     CRYPTO_RWLOCK *lock)
823
785
{
824
785
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
825
785
    if (__atomic_is_lock_free(sizeof(*val), val)) {
826
785
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
827
785
        return 1;
828
785
    }
829
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
830
    /* This will work for all future Solaris versions. */
831
    if (ret != NULL) {
832
        *ret = atomic_or_64_nv(val, op);
833
        return 1;
834
    }
835
# endif
836
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
837
0
        return 0;
838
0
    *val |= op;
839
0
    *ret  = *val;
840
841
0
    if (!CRYPTO_THREAD_unlock(lock))
842
0
        return 0;
843
844
0
    return 1;
845
0
}
846
847
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
848
3.25G
{
849
3.25G
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
850
3.25G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
851
3.25G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
852
3.25G
        return 1;
853
3.25G
    }
854
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
855
    /* This will work for all future Solaris versions. */
856
    if (ret != NULL) {
857
        *ret = atomic_or_64_nv(val, 0);
858
        return 1;
859
    }
860
# endif
861
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
862
0
        return 0;
863
0
    *ret  = *val;
864
0
    if (!CRYPTO_THREAD_unlock(lock))
865
0
        return 0;
866
867
0
    return 1;
868
0
}
869
870
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
871
38.7k
{
872
38.7k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
873
38.7k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
874
38.7k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
875
38.7k
        return 1;
876
38.7k
    }
877
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
878
    /* This will work for all future Solaris versions. */
879
    if (dst != NULL) {
880
        atomic_swap_64(dst, val);
881
        return 1;
882
    }
883
# endif
884
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
885
0
        return 0;
886
0
    *dst  = val;
887
0
    if (!CRYPTO_THREAD_unlock(lock))
888
0
        return 0;
889
890
0
    return 1;
891
0
}
892
893
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
894
0
{
895
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
896
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
897
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
898
0
        return 1;
899
0
    }
900
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
901
    /* This will work for all future Solaris versions. */
902
    if (ret != NULL) {
903
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
904
        return 1;
905
    }
906
# endif
907
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
908
0
        return 0;
909
0
    *ret  = *val;
910
0
    if (!CRYPTO_THREAD_unlock(lock))
911
0
        return 0;
912
913
0
    return 1;
914
0
}
915
916
# ifndef FIPS_MODULE
917
int openssl_init_fork_handlers(void)
918
0
{
919
0
    return 1;
920
0
}
921
# endif /* FIPS_MODULE */
922
923
int openssl_get_fork_id(void)
924
76.6k
{
925
76.6k
    return getpid();
926
76.6k
}
927
#endif