Coverage Report

Created: 2025-12-31 07:12

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl-3.5.4/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
# if __has_feature(thread_sanitizer)
21
#  define __SANITIZE_THREAD__
22
# endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
# include <sanitizer/tsan_interface.h>
27
# define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28
__tsan_mutex_post_unlock((x), 0)
29
30
# define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31
__tsan_mutex_post_lock((x), 0, 0)
32
#else
33
# define TSAN_FAKE_UNLOCK(x)
34
# define TSAN_FAKE_LOCK(x)
35
#endif
36
37
#if defined(__sun)
38
# include <atomic.h>
39
#endif
40
41
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42
/*
43
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46
 * All of this makes impossible to use __atomic_is_lock_free here.
47
 *
48
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49
 */
50
# define BROKEN_CLANG_ATOMICS
51
#endif
52
53
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54
55
# if defined(OPENSSL_SYS_UNIX)
56
#  include <sys/types.h>
57
#  include <unistd.h>
58
# endif
59
60
# include <assert.h>
61
62
/*
63
 * The Non-Stop KLT thread model currently seems broken in its rwlock
64
 * implementation
65
 * Likewise is there a problem with the glibc implementation on riscv.
66
 */
67
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
68
                                         && !defined(__riscv)
69
#  define USE_RWLOCK
70
# endif
71
72
/*
73
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
74
 * other compilers.
75
76
 * Unfortunately, we can't do that with some "generic type", because there's no
77
 * guarantee that the chosen generic type is large enough to cover all cases.
78
 * Therefore, we implement fallbacks for each applicable type, with composed
79
 * names that include the type they handle.
80
 *
81
 * (an anecdote: we previously tried to use |void *| as the generic type, with
82
 * the thought that the pointer itself is the largest type.  However, this is
83
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
84
 *
85
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
86
 * they can map to the correct fallback function.  In the GNU/clang case, that
87
 * parameter is simply ignored.
88
 */
89
90
/*
91
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
92
 * fallback function names.
93
 */
94
typedef void *pvoid;
95
96
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97
    && !defined(USE_ATOMIC_FALLBACKS)
98
6.03k
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99
5
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100
353
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101
5
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102
0
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
103
# else
104
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
105
106
#  define IMPL_fallback_atomic_load_n(t)                        \
107
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
108
    {                                                           \
109
        t ret;                                                  \
110
                                                                \
111
        pthread_mutex_lock(&atomic_sim_lock);                   \
112
        ret = *p;                                               \
113
        pthread_mutex_unlock(&atomic_sim_lock);                 \
114
        return ret;                                             \
115
    }
116
IMPL_fallback_atomic_load_n(uint32_t)
117
IMPL_fallback_atomic_load_n(uint64_t)
118
IMPL_fallback_atomic_load_n(pvoid)
119
120
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
121
122
#  define IMPL_fallback_atomic_store_n(t)                       \
123
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
124
    {                                                           \
125
        t ret;                                                  \
126
                                                                \
127
        pthread_mutex_lock(&atomic_sim_lock);                   \
128
        ret = *p;                                               \
129
        *p = v;                                                 \
130
        pthread_mutex_unlock(&atomic_sim_lock);                 \
131
        return ret;                                             \
132
    }
133
IMPL_fallback_atomic_store_n(uint32_t)
134
135
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
136
137
#  define IMPL_fallback_atomic_store(t)                         \
138
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
139
    {                                                           \
140
        pthread_mutex_lock(&atomic_sim_lock);                   \
141
        *p = *v;                                                \
142
        pthread_mutex_unlock(&atomic_sim_lock);                 \
143
    }
144
IMPL_fallback_atomic_store(pvoid)
145
146
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
147
148
/*
149
 * The fallbacks that follow don't need any per type implementation, as
150
 * they are designed for uint64_t only.  If there comes a time when multiple
151
 * types need to be covered, it's relatively easy to refactor them the same
152
 * way as the fallbacks above.
153
 */
154
155
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
156
{
157
    uint64_t ret;
158
159
    pthread_mutex_lock(&atomic_sim_lock);
160
    *p += v;
161
    ret = *p;
162
    pthread_mutex_unlock(&atomic_sim_lock);
163
    return ret;
164
}
165
166
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
167
168
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
169
{
170
    uint64_t ret;
171
172
    pthread_mutex_lock(&atomic_sim_lock);
173
    *p -= v;
174
    ret = *p;
175
    pthread_mutex_unlock(&atomic_sim_lock);
176
    return ret;
177
}
178
179
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
180
# endif
181
182
/*
183
 * This is the core of an rcu lock. It tracks the readers and writers for the
184
 * current quiescence point for a given lock. Users is the 64 bit value that
185
 * stores the READERS/ID as defined above
186
 *
187
 */
188
struct rcu_qp {
189
    uint64_t users;
190
};
191
192
struct thread_qp {
193
    struct rcu_qp *qp;
194
    unsigned int depth;
195
    CRYPTO_RCU_LOCK *lock;
196
};
197
198
0
# define MAX_QPS 10
199
/*
200
 * This is the per thread tracking data
201
 * that is assigned to each thread participating
202
 * in an rcu qp
203
 *
204
 * qp points to the qp that it last acquired
205
 *
206
 */
207
struct rcu_thr_data {
208
    struct thread_qp thread_qps[MAX_QPS];
209
};
210
211
/*
212
 * This is the internal version of a CRYPTO_RCU_LOCK
213
 * it is cast from CRYPTO_RCU_LOCK
214
 */
215
struct rcu_lock_st {
216
    /* Callbacks to call for next ossl_synchronize_rcu */
217
    struct rcu_cb_item *cb_items;
218
219
    /* The context we are being created against */
220
    OSSL_LIB_CTX *ctx;
221
222
    /* Array of quiescent points for synchronization */
223
    struct rcu_qp *qp_group;
224
225
    /* rcu generation counter for in-order retirement */
226
    uint32_t id_ctr;
227
228
    /* Number of elements in qp_group array */
229
    uint32_t group_count;
230
231
    /* Index of the current qp in the qp_group array */
232
    uint32_t reader_idx;
233
234
    /* value of the next id_ctr value to be retired */
235
    uint32_t next_to_retire;
236
237
    /* index of the next free rcu_qp in the qp_group */
238
    uint32_t current_alloc_idx;
239
240
    /* number of qp's in qp_group array currently being retired */
241
    uint32_t writers_alloced;
242
243
    /* lock protecting write side operations */
244
    pthread_mutex_t write_lock;
245
246
    /* lock protecting updates to writers_alloced/current_alloc_idx */
247
    pthread_mutex_t alloc_lock;
248
249
    /* signal to wake threads waiting on alloc_lock */
250
    pthread_cond_t alloc_signal;
251
252
    /* lock to enforce in-order retirement */
253
    pthread_mutex_t prior_lock;
254
255
    /* signal to wake threads waiting on prior_lock */
256
    pthread_cond_t prior_signal;
257
};
258
259
/* Read side acquisition of the current qp */
260
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
261
0
{
262
0
    uint32_t qp_idx;
263
264
    /* get the current qp index */
265
0
    for (;;) {
266
0
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
267
268
        /*
269
         * Notes on use of __ATOMIC_ACQUIRE
270
         * We need to ensure the following:
271
         * 1) That subsequent operations aren't optimized by hoisting them above
272
         * this operation.  Specifically, we don't want the below re-load of
273
         * qp_idx to get optimized away
274
         * 2) We want to ensure that any updating of reader_idx on the write side
275
         * of the lock is flushed from a local cpu cache so that we see any
276
         * updates prior to the load.  This is a non-issue on cache coherent
277
         * systems like x86, but is relevant on other arches
278
         */
279
0
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
280
0
                         __ATOMIC_ACQUIRE);
281
282
        /* if the idx hasn't changed, we're good, else try again */
283
0
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
284
0
                                    __ATOMIC_ACQUIRE))
285
0
            break;
286
287
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
288
0
                         __ATOMIC_RELAXED);
289
0
    }
290
291
0
    return &lock->qp_group[qp_idx];
292
0
}
293
294
static void ossl_rcu_free_local_data(void *arg)
295
0
{
296
0
    OSSL_LIB_CTX *ctx = arg;
297
0
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
298
0
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
299
300
0
    OPENSSL_free(data);
301
0
    CRYPTO_THREAD_set_local(lkey, NULL);
302
0
}
303
304
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
305
0
{
306
0
    struct rcu_thr_data *data;
307
0
    int i, available_qp = -1;
308
0
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
309
310
    /*
311
     * we're going to access current_qp here so ask the
312
     * processor to fetch it
313
     */
314
0
    data = CRYPTO_THREAD_get_local(lkey);
315
316
0
    if (data == NULL) {
317
0
        data = OPENSSL_zalloc(sizeof(*data));
318
0
        OPENSSL_assert(data != NULL);
319
0
        CRYPTO_THREAD_set_local(lkey, data);
320
0
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
321
0
    }
322
323
0
    for (i = 0; i < MAX_QPS; i++) {
324
0
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
325
0
            available_qp = i;
326
        /* If we have a hold on this lock already, we're good */
327
0
        if (data->thread_qps[i].lock == lock) {
328
0
            data->thread_qps[i].depth++;
329
0
            return;
330
0
        }
331
0
    }
332
333
    /*
334
     * if we get here, then we don't have a hold on this lock yet
335
     */
336
0
    assert(available_qp != -1);
337
338
0
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
339
0
    data->thread_qps[available_qp].depth = 1;
340
0
    data->thread_qps[available_qp].lock = lock;
341
0
}
342
343
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
344
0
{
345
0
    int i;
346
0
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
347
0
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
348
0
    uint64_t ret;
349
350
0
    assert(data != NULL);
351
352
0
    for (i = 0; i < MAX_QPS; i++) {
353
0
        if (data->thread_qps[i].lock == lock) {
354
            /*
355
             * we have to use __ATOMIC_RELEASE here
356
             * to ensure that all preceding read instructions complete
357
             * before the decrement is visible to ossl_synchronize_rcu
358
             */
359
0
            data->thread_qps[i].depth--;
360
0
            if (data->thread_qps[i].depth == 0) {
361
0
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
362
0
                                       (uint64_t)1, __ATOMIC_RELEASE);
363
0
                OPENSSL_assert(ret != UINT64_MAX);
364
0
                data->thread_qps[i].qp = NULL;
365
0
                data->thread_qps[i].lock = NULL;
366
0
            }
367
0
            return;
368
0
        }
369
0
    }
370
    /*
371
     * If we get here, we're trying to unlock a lock that we never acquired -
372
     * that's fatal.
373
     */
374
0
    assert(0);
375
0
}
376
377
/*
378
 * Write side allocation routine to get the current qp
379
 * and replace it with a new one
380
 */
381
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
382
5
{
383
5
    uint32_t current_idx;
384
385
5
    pthread_mutex_lock(&lock->alloc_lock);
386
387
    /*
388
     * we need at least one qp to be available with one
389
     * left over, so that readers can start working on
390
     * one that isn't yet being waited on
391
     */
392
5
    while (lock->group_count - lock->writers_alloced < 2)
393
        /* we have to wait for one to be free */
394
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
395
396
5
    current_idx = lock->current_alloc_idx;
397
398
    /* Allocate the qp */
399
5
    lock->writers_alloced++;
400
401
    /* increment the allocation index */
402
5
    lock->current_alloc_idx =
403
5
        (lock->current_alloc_idx + 1) % lock->group_count;
404
405
5
    *curr_id = lock->id_ctr;
406
5
    lock->id_ctr++;
407
408
    /*
409
     * make the current state of everything visible by this release
410
     * when get_hold_current_qp acquires the next qp
411
     */
412
5
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413
5
                   __ATOMIC_RELEASE);
414
415
    /*
416
     * this should make sure that the new value of reader_idx is visible in
417
     * get_hold_current_qp, directly after incrementing the users count
418
     */
419
5
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420
5
                     __ATOMIC_RELEASE);
421
422
    /* wake up any waiters */
423
5
    pthread_cond_signal(&lock->alloc_signal);
424
5
    pthread_mutex_unlock(&lock->alloc_lock);
425
5
    return &lock->qp_group[current_idx];
426
5
}
427
428
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429
5
{
430
5
    pthread_mutex_lock(&lock->alloc_lock);
431
5
    lock->writers_alloced--;
432
5
    pthread_cond_signal(&lock->alloc_signal);
433
5
    pthread_mutex_unlock(&lock->alloc_lock);
434
5
}
435
436
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437
                                            uint32_t count)
438
2
{
439
2
    struct rcu_qp *new =
440
2
        OPENSSL_zalloc(sizeof(*new) * count);
441
442
2
    lock->group_count = count;
443
2
    return new;
444
2
}
445
446
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
447
3
{
448
3
    pthread_mutex_lock(&lock->write_lock);
449
3
    TSAN_FAKE_UNLOCK(&lock->write_lock);
450
3
}
451
452
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
453
3
{
454
3
    TSAN_FAKE_LOCK(&lock->write_lock);
455
3
    pthread_mutex_unlock(&lock->write_lock);
456
3
}
457
458
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
459
5
{
460
5
    struct rcu_qp *qp;
461
5
    uint64_t count;
462
5
    uint32_t curr_id;
463
5
    struct rcu_cb_item *cb_items, *tmpcb;
464
465
5
    pthread_mutex_lock(&lock->write_lock);
466
5
    cb_items = lock->cb_items;
467
5
    lock->cb_items = NULL;
468
5
    pthread_mutex_unlock(&lock->write_lock);
469
470
5
    qp = update_qp(lock, &curr_id);
471
472
    /* retire in order */
473
5
    pthread_mutex_lock(&lock->prior_lock);
474
5
    while (lock->next_to_retire != curr_id)
475
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
476
477
    /*
478
     * wait for the reader count to reach zero
479
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
480
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
481
     * is visible prior to our read
482
     * however this is likely just necessary to silence a tsan warning
483
     * because the read side should not do any write operation
484
     * outside the atomic itself
485
     */
486
5
    do {
487
5
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
488
5
    } while (count != (uint64_t)0);
489
490
5
    lock->next_to_retire++;
491
5
    pthread_cond_broadcast(&lock->prior_signal);
492
5
    pthread_mutex_unlock(&lock->prior_lock);
493
494
5
    retire_qp(lock, qp);
495
496
    /* handle any callbacks that we have */
497
6
    while (cb_items != NULL) {
498
1
        tmpcb = cb_items;
499
1
        cb_items = cb_items->next;
500
1
        tmpcb->fn(tmpcb->data);
501
1
        OPENSSL_free(tmpcb);
502
1
    }
503
5
}
504
505
/*
506
 * Note: This call assumes its made under the protection of
507
 * ossl_rcu_write_lock
508
 */
509
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
510
1
{
511
1
    struct rcu_cb_item *new =
512
1
        OPENSSL_zalloc(sizeof(*new));
513
514
1
    if (new == NULL)
515
0
        return 0;
516
517
1
    new->data = data;
518
1
    new->fn = cb;
519
520
1
    new->next = lock->cb_items;
521
1
    lock->cb_items = new;
522
523
1
    return 1;
524
1
}
525
526
void *ossl_rcu_uptr_deref(void **p)
527
6.03k
{
528
6.03k
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
529
6.03k
}
530
531
void ossl_rcu_assign_uptr(void **p, void **v)
532
353
{
533
353
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
534
353
}
535
536
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
537
2
{
538
2
    struct rcu_lock_st *new;
539
540
    /*
541
     * We need a minimum of 2 qp's
542
     */
543
2
    if (num_writers < 2)
544
2
        num_writers = 2;
545
546
2
    ctx = ossl_lib_ctx_get_concrete(ctx);
547
2
    if (ctx == NULL)
548
0
        return 0;
549
550
2
    new = OPENSSL_zalloc(sizeof(*new));
551
2
    if (new == NULL)
552
0
        return NULL;
553
554
2
    new->ctx = ctx;
555
2
    pthread_mutex_init(&new->write_lock, NULL);
556
2
    pthread_mutex_init(&new->prior_lock, NULL);
557
2
    pthread_mutex_init(&new->alloc_lock, NULL);
558
2
    pthread_cond_init(&new->prior_signal, NULL);
559
2
    pthread_cond_init(&new->alloc_signal, NULL);
560
561
2
    new->qp_group = allocate_new_qp_group(new, num_writers);
562
2
    if (new->qp_group == NULL) {
563
0
        OPENSSL_free(new);
564
0
        new = NULL;
565
0
    }
566
567
2
    return new;
568
2
}
569
570
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
571
2
{
572
2
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
573
574
2
    if (lock == NULL)
575
0
        return;
576
577
    /* make sure we're synchronized */
578
2
    ossl_synchronize_rcu(rlock);
579
580
2
    OPENSSL_free(rlock->qp_group);
581
    /* There should only be a single qp left now */
582
2
    OPENSSL_free(rlock);
583
2
}
584
585
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
586
28
{
587
28
# ifdef USE_RWLOCK
588
28
    CRYPTO_RWLOCK *lock;
589
590
28
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
591
        /* Don't set error, to avoid recursion blowup. */
592
0
        return NULL;
593
594
28
    if (pthread_rwlock_init(lock, NULL) != 0) {
595
0
        OPENSSL_free(lock);
596
0
        return NULL;
597
0
    }
598
# else
599
    pthread_mutexattr_t attr;
600
    CRYPTO_RWLOCK *lock;
601
602
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
603
        /* Don't set error, to avoid recursion blowup. */
604
        return NULL;
605
606
    /*
607
     * We don't use recursive mutexes, but try to catch errors if we do.
608
     */
609
    pthread_mutexattr_init(&attr);
610
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
611
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
612
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
613
#   endif
614
#  else
615
    /* The SPT Thread Library does not define MUTEX attributes. */
616
#  endif
617
618
    if (pthread_mutex_init(lock, &attr) != 0) {
619
        pthread_mutexattr_destroy(&attr);
620
        OPENSSL_free(lock);
621
        return NULL;
622
    }
623
624
    pthread_mutexattr_destroy(&attr);
625
# endif
626
627
28
    return lock;
628
28
}
629
630
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
631
2.17k
{
632
2.17k
# ifdef USE_RWLOCK
633
2.17k
    if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
634
0
        return 0;
635
# else
636
    if (pthread_mutex_lock(lock) != 0) {
637
        assert(errno != EDEADLK && errno != EBUSY);
638
        return 0;
639
    }
640
# endif
641
642
2.17k
    return 1;
643
2.17k
}
644
645
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
646
1.66k
{
647
1.66k
# ifdef USE_RWLOCK
648
1.66k
    if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
649
0
        return 0;
650
# else
651
    if (pthread_mutex_lock(lock) != 0) {
652
        assert(errno != EDEADLK && errno != EBUSY);
653
        return 0;
654
    }
655
# endif
656
657
1.66k
    return 1;
658
1.66k
}
659
660
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
661
3.83k
{
662
3.83k
# ifdef USE_RWLOCK
663
3.83k
    if (pthread_rwlock_unlock(lock) != 0)
664
0
        return 0;
665
# else
666
    if (pthread_mutex_unlock(lock) != 0) {
667
        assert(errno != EPERM);
668
        return 0;
669
    }
670
# endif
671
672
3.83k
    return 1;
673
3.83k
}
674
675
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
676
34
{
677
34
    if (lock == NULL)
678
6
        return;
679
680
28
# ifdef USE_RWLOCK
681
28
    pthread_rwlock_destroy(lock);
682
# else
683
    pthread_mutex_destroy(lock);
684
# endif
685
28
    OPENSSL_free(lock);
686
687
28
    return;
688
34
}
689
690
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
691
4.22k
{
692
4.22k
    if (pthread_once(once, init) != 0)
693
0
        return 0;
694
695
4.22k
    return 1;
696
4.22k
}
697
698
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
699
7
{
700
7
    if (pthread_key_create(key, cleanup) != 0)
701
0
        return 0;
702
703
7
    return 1;
704
7
}
705
706
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
707
3.00k
{
708
3.00k
    return pthread_getspecific(*key);
709
3.00k
}
710
711
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
712
8
{
713
8
    if (pthread_setspecific(*key, val) != 0)
714
0
        return 0;
715
716
8
    return 1;
717
8
}
718
719
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
720
7
{
721
7
    if (pthread_key_delete(*key) != 0)
722
0
        return 0;
723
724
7
    return 1;
725
7
}
726
727
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
728
0
{
729
0
    return pthread_self();
730
0
}
731
732
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
733
0
{
734
0
    return pthread_equal(a, b);
735
0
}
736
737
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
738
4
{
739
4
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
740
4
    if (__atomic_is_lock_free(sizeof(*val), val)) {
741
4
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
742
4
        return 1;
743
4
    }
744
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
745
    /* This will work for all future Solaris versions. */
746
    if (ret != NULL) {
747
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
748
        return 1;
749
    }
750
# endif
751
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
752
0
        return 0;
753
754
0
    *val += amount;
755
0
    *ret  = *val;
756
757
0
    if (!CRYPTO_THREAD_unlock(lock))
758
0
        return 0;
759
760
0
    return 1;
761
0
}
762
763
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
764
                        CRYPTO_RWLOCK *lock)
765
0
{
766
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
767
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
768
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
769
0
        return 1;
770
0
    }
771
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
772
    /* This will work for all future Solaris versions. */
773
    if (ret != NULL) {
774
        *ret = atomic_add_64_nv(val, op);
775
        return 1;
776
    }
777
# endif
778
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
779
0
        return 0;
780
0
    *val += op;
781
0
    *ret  = *val;
782
783
0
    if (!CRYPTO_THREAD_unlock(lock))
784
0
        return 0;
785
786
0
    return 1;
787
0
}
788
789
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
790
                      CRYPTO_RWLOCK *lock)
791
0
{
792
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
793
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
794
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
795
0
        return 1;
796
0
    }
797
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
798
    /* This will work for all future Solaris versions. */
799
    if (ret != NULL) {
800
        *ret = atomic_and_64_nv(val, op);
801
        return 1;
802
    }
803
# endif
804
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
805
0
        return 0;
806
0
    *val &= op;
807
0
    *ret  = *val;
808
809
0
    if (!CRYPTO_THREAD_unlock(lock))
810
0
        return 0;
811
812
0
    return 1;
813
0
}
814
815
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
816
                     CRYPTO_RWLOCK *lock)
817
4
{
818
4
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
819
4
    if (__atomic_is_lock_free(sizeof(*val), val)) {
820
4
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
821
4
        return 1;
822
4
    }
823
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
824
    /* This will work for all future Solaris versions. */
825
    if (ret != NULL) {
826
        *ret = atomic_or_64_nv(val, op);
827
        return 1;
828
    }
829
# endif
830
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
831
0
        return 0;
832
0
    *val |= op;
833
0
    *ret  = *val;
834
835
0
    if (!CRYPTO_THREAD_unlock(lock))
836
0
        return 0;
837
838
0
    return 1;
839
0
}
840
841
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
842
4.97k
{
843
4.97k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
844
4.97k
    if (__atomic_is_lock_free(sizeof(*val), val)) {
845
4.97k
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
846
4.97k
        return 1;
847
4.97k
    }
848
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
849
    /* This will work for all future Solaris versions. */
850
    if (ret != NULL) {
851
        *ret = atomic_or_64_nv(val, 0);
852
        return 1;
853
    }
854
# endif
855
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
856
0
        return 0;
857
0
    *ret  = *val;
858
0
    if (!CRYPTO_THREAD_unlock(lock))
859
0
        return 0;
860
861
0
    return 1;
862
0
}
863
864
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
865
350
{
866
350
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
867
350
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
868
350
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
869
350
        return 1;
870
350
    }
871
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
872
    /* This will work for all future Solaris versions. */
873
    if (dst != NULL) {
874
        atomic_swap_64(dst, val);
875
        return 1;
876
    }
877
# endif
878
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
879
0
        return 0;
880
0
    *dst  = val;
881
0
    if (!CRYPTO_THREAD_unlock(lock))
882
0
        return 0;
883
884
0
    return 1;
885
0
}
886
887
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
888
0
{
889
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
890
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
891
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
892
0
        return 1;
893
0
    }
894
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
895
    /* This will work for all future Solaris versions. */
896
    if (ret != NULL) {
897
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
898
        return 1;
899
    }
900
# endif
901
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
902
0
        return 0;
903
0
    *ret  = *val;
904
0
    if (!CRYPTO_THREAD_unlock(lock))
905
0
        return 0;
906
907
0
    return 1;
908
0
}
909
910
# ifndef FIPS_MODULE
911
int openssl_init_fork_handlers(void)
912
0
{
913
0
    return 1;
914
0
}
915
# endif /* FIPS_MODULE */
916
917
int openssl_get_fork_id(void)
918
0
{
919
0
    return getpid();
920
0
}
921
#endif