Coverage Report

Created: 2025-12-31 06:58

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl34/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
#if __has_feature(thread_sanitizer)
21
#define __SANITIZE_THREAD__
22
#endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
#include <sanitizer/tsan_interface.h>
27
#define TSAN_FAKE_UNLOCK(x)          \
28
    __tsan_mutex_pre_unlock((x), 0); \
29
    __tsan_mutex_post_unlock((x), 0)
30
31
#define TSAN_FAKE_LOCK(x)          \
32
    __tsan_mutex_pre_lock((x), 0); \
33
    __tsan_mutex_post_lock((x), 0, 0)
34
#else
35
#define TSAN_FAKE_UNLOCK(x)
36
#define TSAN_FAKE_LOCK(x)
37
#endif
38
39
#if defined(__sun)
40
#include <atomic.h>
41
#endif
42
43
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
44
/*
45
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
46
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
47
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
48
 * All of this makes impossible to use __atomic_is_lock_free here.
49
 *
50
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
51
 */
52
#define BROKEN_CLANG_ATOMICS
53
#endif
54
55
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
56
57
#if defined(OPENSSL_SYS_UNIX)
58
#include <sys/types.h>
59
#include <unistd.h>
60
#endif
61
62
#include <assert.h>
63
64
/*
65
 * The Non-Stop KLT thread model currently seems broken in its rwlock
66
 * implementation
67
 * Likewise is there a problem with the glibc implementation on riscv.
68
 */
69
#if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
70
    && !defined(__riscv)
71
#define USE_RWLOCK
72
#endif
73
74
/*
75
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
76
 * other compilers.
77
78
 * Unfortunately, we can't do that with some "generic type", because there's no
79
 * guarantee that the chosen generic type is large enough to cover all cases.
80
 * Therefore, we implement fallbacks for each applicable type, with composed
81
 * names that include the type they handle.
82
 *
83
 * (an anecdote: we previously tried to use |void *| as the generic type, with
84
 * the thought that the pointer itself is the largest type.  However, this is
85
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
86
 *
87
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
88
 * they can map to the correct fallback function.  In the GNU/clang case, that
89
 * parameter is simply ignored.
90
 */
91
92
/*
93
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
94
 * fallback function names.
95
 */
96
typedef void *pvoid;
97
98
#if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
99
    && !defined(USE_ATOMIC_FALLBACKS)
100
76.6M
#define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
101
934
#define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
102
39.4k
#define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
103
998
#define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
104
64
#define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
105
#else
106
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
107
108
#define IMPL_fallback_atomic_load_n(t)                    \
109
    static ossl_inline t fallback_atomic_load_n_##t(t *p) \
110
    {                                                     \
111
        t ret;                                            \
112
                                                          \
113
        pthread_mutex_lock(&atomic_sim_lock);             \
114
        ret = *p;                                         \
115
        pthread_mutex_unlock(&atomic_sim_lock);           \
116
        return ret;                                       \
117
    }
118
IMPL_fallback_atomic_load_n(uint32_t)
119
    IMPL_fallback_atomic_load_n(uint64_t)
120
        IMPL_fallback_atomic_load_n(pvoid)
121
122
#define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
123
124
#define IMPL_fallback_atomic_store_n(t)                         \
125
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
126
    {                                                           \
127
        t ret;                                                  \
128
                                                                \
129
        pthread_mutex_lock(&atomic_sim_lock);                   \
130
        ret = *p;                                               \
131
        *p = v;                                                 \
132
        pthread_mutex_unlock(&atomic_sim_lock);                 \
133
        return ret;                                             \
134
    }
135
            IMPL_fallback_atomic_store_n(uint32_t)
136
137
#define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
138
139
#define IMPL_fallback_atomic_store(t)                             \
140
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
141
    {                                                             \
142
        pthread_mutex_lock(&atomic_sim_lock);                     \
143
        *p = *v;                                                  \
144
        pthread_mutex_unlock(&atomic_sim_lock);                   \
145
    }
146
                IMPL_fallback_atomic_store(pvoid)
147
148
#define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
149
150
    /*
151
     * The fallbacks that follow don't need any per type implementation, as
152
     * they are designed for uint64_t only.  If there comes a time when multiple
153
     * types need to be covered, it's relatively easy to refactor them the same
154
     * way as the fallbacks above.
155
     */
156
157
    static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
158
{
159
    uint64_t ret;
160
161
    pthread_mutex_lock(&atomic_sim_lock);
162
    *p += v;
163
    ret = *p;
164
    pthread_mutex_unlock(&atomic_sim_lock);
165
    return ret;
166
}
167
168
#define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
169
170
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
171
{
172
    uint64_t ret;
173
174
    pthread_mutex_lock(&atomic_sim_lock);
175
    *p -= v;
176
    ret = *p;
177
    pthread_mutex_unlock(&atomic_sim_lock);
178
    return ret;
179
}
180
181
#define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
182
#endif
183
184
/*
185
 * This is the core of an rcu lock. It tracks the readers and writers for the
186
 * current quiescence point for a given lock. Users is the 64 bit value that
187
 * stores the READERS/ID as defined above
188
 *
189
 */
190
struct rcu_qp {
191
    uint64_t users;
192
};
193
194
struct thread_qp {
195
    struct rcu_qp *qp;
196
    unsigned int depth;
197
    CRYPTO_RCU_LOCK *lock;
198
};
199
200
372
#define MAX_QPS 10
201
/*
202
 * This is the per thread tracking data
203
 * that is assigned to each thread participating
204
 * in an rcu qp
205
 *
206
 * qp points to the qp that it last acquired
207
 *
208
 */
209
struct rcu_thr_data {
210
    struct thread_qp thread_qps[MAX_QPS];
211
};
212
213
/*
214
 * This is the internal version of a CRYPTO_RCU_LOCK
215
 * it is cast from CRYPTO_RCU_LOCK
216
 */
217
struct rcu_lock_st {
218
    /* Callbacks to call for next ossl_synchronize_rcu */
219
    struct rcu_cb_item *cb_items;
220
221
    /* The context we are being created against */
222
    OSSL_LIB_CTX *ctx;
223
224
    /* Array of quiescent points for synchronization */
225
    struct rcu_qp *qp_group;
226
227
    /* rcu generation counter for in-order retirement */
228
    uint32_t id_ctr;
229
230
    /* Number of elements in qp_group array */
231
    uint32_t group_count;
232
233
    /* Index of the current qp in the qp_group array */
234
    uint32_t reader_idx;
235
236
    /* value of the next id_ctr value to be retired */
237
    uint32_t next_to_retire;
238
239
    /* index of the next free rcu_qp in the qp_group */
240
    uint32_t current_alloc_idx;
241
242
    /* number of qp's in qp_group array currently being retired */
243
    uint32_t writers_alloced;
244
245
    /* lock protecting write side operations */
246
    pthread_mutex_t write_lock;
247
248
    /* lock protecting updates to writers_alloced/current_alloc_idx */
249
    pthread_mutex_t alloc_lock;
250
251
    /* signal to wake threads waiting on alloc_lock */
252
    pthread_cond_t alloc_signal;
253
254
    /* lock to enforce in-order retirement */
255
    pthread_mutex_t prior_lock;
256
257
    /* signal to wake threads waiting on prior_lock */
258
    pthread_cond_t prior_signal;
259
};
260
261
/* Read side acquisition of the current qp */
262
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
263
64
{
264
64
    uint32_t qp_idx;
265
266
    /* get the current qp index */
267
64
    for (;;) {
268
64
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
269
270
        /*
271
         * Notes on use of __ATOMIC_ACQUIRE
272
         * We need to ensure the following:
273
         * 1) That subsequent operations aren't optimized by hoisting them above
274
         * this operation.  Specifically, we don't want the below re-load of
275
         * qp_idx to get optimized away
276
         * 2) We want to ensure that any updating of reader_idx on the write side
277
         * of the lock is flushed from a local cpu cache so that we see any
278
         * updates prior to the load.  This is a non-issue on cache coherent
279
         * systems like x86, but is relevant on other arches
280
         */
281
64
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
282
64
            __ATOMIC_ACQUIRE);
283
284
        /* if the idx hasn't changed, we're good, else try again */
285
64
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
286
64
            break;
287
288
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
289
0
            __ATOMIC_RELAXED);
290
0
    }
291
292
64
    return &lock->qp_group[qp_idx];
293
64
}
294
295
static void ossl_rcu_free_local_data(void *arg)
296
3
{
297
3
    OSSL_LIB_CTX *ctx = arg;
298
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
299
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
300
301
3
    OPENSSL_free(data);
302
3
    CRYPTO_THREAD_set_local(lkey, NULL);
303
3
}
304
305
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
306
28
{
307
28
    struct rcu_thr_data *data;
308
28
    int i, available_qp = -1;
309
28
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
310
311
    /*
312
     * we're going to access current_qp here so ask the
313
     * processor to fetch it
314
     */
315
28
    data = CRYPTO_THREAD_get_local(lkey);
316
317
28
    if (data == NULL) {
318
2
        data = OPENSSL_zalloc(sizeof(*data));
319
2
        OPENSSL_assert(data != NULL);
320
2
        CRYPTO_THREAD_set_local(lkey, data);
321
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
322
2
    }
323
324
308
    for (i = 0; i < MAX_QPS; i++) {
325
280
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
326
28
            available_qp = i;
327
        /* If we have a hold on this lock already, we're good */
328
280
        if (data->thread_qps[i].lock == lock) {
329
0
            data->thread_qps[i].depth++;
330
0
            return;
331
0
        }
332
280
    }
333
334
    /*
335
     * if we get here, then we don't have a hold on this lock yet
336
     */
337
28
    assert(available_qp != -1);
338
339
28
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
340
28
    data->thread_qps[available_qp].depth = 1;
341
28
    data->thread_qps[available_qp].lock = lock;
342
28
}
343
344
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
345
64
{
346
64
    int i;
347
64
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
348
64
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
349
64
    uint64_t ret;
350
351
64
    assert(data != NULL);
352
353
64
    for (i = 0; i < MAX_QPS; i++) {
354
64
        if (data->thread_qps[i].lock == lock) {
355
            /*
356
             * we have to use __ATOMIC_RELEASE here
357
             * to ensure that all preceding read instructions complete
358
             * before the decrement is visible to ossl_synchronize_rcu
359
             */
360
64
            data->thread_qps[i].depth--;
361
64
            if (data->thread_qps[i].depth == 0) {
362
64
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
363
64
                    (uint64_t)1, __ATOMIC_RELEASE);
364
64
                OPENSSL_assert(ret != UINT64_MAX);
365
64
                data->thread_qps[i].qp = NULL;
366
64
                data->thread_qps[i].lock = NULL;
367
64
            }
368
64
            return;
369
64
        }
370
64
    }
371
    /*
372
     * If we get here, we're trying to unlock a lock that we never acquired -
373
     * that's fatal.
374
     */
375
64
    assert(0);
376
0
}
377
378
/*
379
 * Write side allocation routine to get the current qp
380
 * and replace it with a new one
381
 */
382
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
383
934
{
384
934
    uint32_t current_idx;
385
386
934
    pthread_mutex_lock(&lock->alloc_lock);
387
388
    /*
389
     * we need at least one qp to be available with one
390
     * left over, so that readers can start working on
391
     * one that isn't yet being waited on
392
     */
393
934
    while (lock->group_count - lock->writers_alloced < 2)
394
        /* we have to wait for one to be free */
395
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
396
397
934
    current_idx = lock->current_alloc_idx;
398
399
    /* Allocate the qp */
400
934
    lock->writers_alloced++;
401
402
    /* increment the allocation index */
403
934
    lock->current_alloc_idx = (lock->current_alloc_idx + 1) % lock->group_count;
404
405
934
    *curr_id = lock->id_ctr;
406
934
    lock->id_ctr++;
407
408
    /*
409
     * make the current state of everything visible by this release
410
     * when get_hold_current_qp acquires the next qp
411
     */
412
934
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413
934
        __ATOMIC_RELEASE);
414
415
    /*
416
     * this should make sure that the new value of reader_idx is visible in
417
     * get_hold_current_qp, directly after incrementing the users count
418
     */
419
934
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420
934
        __ATOMIC_RELEASE);
421
422
    /* wake up any waiters */
423
934
    pthread_cond_signal(&lock->alloc_signal);
424
934
    pthread_mutex_unlock(&lock->alloc_lock);
425
934
    return &lock->qp_group[current_idx];
426
934
}
427
428
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429
934
{
430
934
    pthread_mutex_lock(&lock->alloc_lock);
431
934
    lock->writers_alloced--;
432
934
    pthread_cond_signal(&lock->alloc_signal);
433
934
    pthread_mutex_unlock(&lock->alloc_lock);
434
934
}
435
436
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437
    uint32_t count)
438
516
{
439
516
    struct rcu_qp *new = OPENSSL_zalloc(sizeof(*new) * count);
440
441
516
    lock->group_count = count;
442
516
    return new;
443
516
}
444
445
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
446
710
{
447
710
    pthread_mutex_lock(&lock->write_lock);
448
710
    TSAN_FAKE_UNLOCK(&lock->write_lock);
449
710
}
450
451
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
452
710
{
453
710
    TSAN_FAKE_LOCK(&lock->write_lock);
454
710
    pthread_mutex_unlock(&lock->write_lock);
455
710
}
456
457
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
458
934
{
459
934
    struct rcu_qp *qp;
460
934
    uint64_t count;
461
934
    uint32_t curr_id;
462
934
    struct rcu_cb_item *cb_items, *tmpcb;
463
464
934
    pthread_mutex_lock(&lock->write_lock);
465
934
    cb_items = lock->cb_items;
466
934
    lock->cb_items = NULL;
467
934
    pthread_mutex_unlock(&lock->write_lock);
468
469
934
    qp = update_qp(lock, &curr_id);
470
471
    /* retire in order */
472
934
    pthread_mutex_lock(&lock->prior_lock);
473
934
    while (lock->next_to_retire != curr_id)
474
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
475
476
    /*
477
     * wait for the reader count to reach zero
478
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
479
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
480
     * is visible prior to our read
481
     * however this is likely just necessary to silence a tsan warning
482
     * because the read side should not do any write operation
483
     * outside the atomic itself
484
     */
485
934
    do {
486
934
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
487
934
    } while (count != (uint64_t)0);
488
489
934
    lock->next_to_retire++;
490
934
    pthread_cond_broadcast(&lock->prior_signal);
491
934
    pthread_mutex_unlock(&lock->prior_lock);
492
493
934
    retire_qp(lock, qp);
494
495
    /* handle any callbacks that we have */
496
1.13k
    while (cb_items != NULL) {
497
200
        tmpcb = cb_items;
498
200
        cb_items = cb_items->next;
499
200
        tmpcb->fn(tmpcb->data);
500
200
        OPENSSL_free(tmpcb);
501
200
    }
502
934
}
503
504
/*
505
 * Note: This call assumes its made under the protection of
506
 * ossl_rcu_write_lock
507
 */
508
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
509
200
{
510
200
    struct rcu_cb_item *new = OPENSSL_zalloc(sizeof(*new));
511
512
200
    if (new == NULL)
513
0
        return 0;
514
515
200
    new->data = data;
516
200
    new->fn = cb;
517
518
200
    new->next = lock->cb_items;
519
200
    lock->cb_items = new;
520
521
200
    return 1;
522
200
}
523
524
void *ossl_rcu_uptr_deref(void **p)
525
76.6M
{
526
76.6M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
527
76.6M
}
528
529
void ossl_rcu_assign_uptr(void **p, void **v)
530
39.4k
{
531
39.4k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
532
39.4k
}
533
534
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
535
516
{
536
516
    struct rcu_lock_st *new;
537
538
    /*
539
     * We need a minimum of 2 qp's
540
     */
541
516
    if (num_writers < 2)
542
516
        num_writers = 2;
543
544
516
    ctx = ossl_lib_ctx_get_concrete(ctx);
545
516
    if (ctx == NULL)
546
0
        return 0;
547
548
516
    new = OPENSSL_zalloc(sizeof(*new));
549
516
    if (new == NULL)
550
0
        return NULL;
551
552
516
    new->ctx = ctx;
553
516
    pthread_mutex_init(&new->write_lock, NULL);
554
516
    pthread_mutex_init(&new->prior_lock, NULL);
555
516
    pthread_mutex_init(&new->alloc_lock, NULL);
556
516
    pthread_cond_init(&new->prior_signal, NULL);
557
516
    pthread_cond_init(&new->alloc_signal, NULL);
558
559
516
    new->qp_group = allocate_new_qp_group(new, num_writers);
560
516
    if (new->qp_group == NULL) {
561
0
        OPENSSL_free(new);
562
0
        new = NULL;
563
0
    }
564
565
516
    return new;
566
516
}
567
568
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
569
350
{
570
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
571
572
350
    if (lock == NULL)
573
0
        return;
574
575
    /* make sure we're synchronized */
576
350
    ossl_synchronize_rcu(rlock);
577
578
350
    OPENSSL_free(rlock->qp_group);
579
    /* There should only be a single qp left now */
580
350
    OPENSSL_free(rlock);
581
350
}
582
583
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
584
10.3M
{
585
10.3M
#ifdef USE_RWLOCK
586
10.3M
    CRYPTO_RWLOCK *lock;
587
588
10.3M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
589
        /* Don't set error, to avoid recursion blowup. */
590
0
        return NULL;
591
592
10.3M
    if (pthread_rwlock_init(lock, NULL) != 0) {
593
0
        OPENSSL_free(lock);
594
0
        return NULL;
595
0
    }
596
#else
597
    pthread_mutexattr_t attr;
598
    CRYPTO_RWLOCK *lock;
599
600
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
601
        /* Don't set error, to avoid recursion blowup. */
602
        return NULL;
603
604
    /*
605
     * We don't use recursive mutexes, but try to catch errors if we do.
606
     */
607
    pthread_mutexattr_init(&attr);
608
#if !defined(__TANDEM) && !defined(_SPT_MODEL_)
609
#if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
610
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
611
#endif
612
#else
613
    /* The SPT Thread Library does not define MUTEX attributes. */
614
#endif
615
616
    if (pthread_mutex_init(lock, &attr) != 0) {
617
        pthread_mutexattr_destroy(&attr);
618
        OPENSSL_free(lock);
619
        return NULL;
620
    }
621
622
    pthread_mutexattr_destroy(&attr);
623
#endif
624
625
10.3M
    return lock;
626
10.3M
}
627
628
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
629
1.11G
{
630
1.11G
#ifdef USE_RWLOCK
631
1.11G
    if (pthread_rwlock_rdlock(lock) != 0)
632
0
        return 0;
633
#else
634
    if (pthread_mutex_lock(lock) != 0) {
635
        assert(errno != EDEADLK && errno != EBUSY);
636
        return 0;
637
    }
638
#endif
639
640
1.11G
    return 1;
641
1.11G
}
642
643
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
644
57.1M
{
645
57.1M
#ifdef USE_RWLOCK
646
57.1M
    if (pthread_rwlock_wrlock(lock) != 0)
647
0
        return 0;
648
#else
649
    if (pthread_mutex_lock(lock) != 0) {
650
        assert(errno != EDEADLK && errno != EBUSY);
651
        return 0;
652
    }
653
#endif
654
655
57.1M
    return 1;
656
57.1M
}
657
658
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
659
1.31G
{
660
1.31G
#ifdef USE_RWLOCK
661
1.31G
    if (pthread_rwlock_unlock(lock) != 0)
662
0
        return 0;
663
#else
664
    if (pthread_mutex_unlock(lock) != 0) {
665
        assert(errno != EPERM);
666
        return 0;
667
    }
668
#endif
669
670
1.31G
    return 1;
671
1.31G
}
672
673
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
674
10.3M
{
675
10.3M
    if (lock == NULL)
676
2.35k
        return;
677
678
10.3M
#ifdef USE_RWLOCK
679
10.3M
    pthread_rwlock_destroy(lock);
680
#else
681
    pthread_mutex_destroy(lock);
682
#endif
683
10.3M
    OPENSSL_free(lock);
684
685
10.3M
    return;
686
10.3M
}
687
688
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
689
2.67G
{
690
2.67G
    if (pthread_once(once, init) != 0)
691
0
        return 0;
692
693
2.67G
    return 1;
694
2.67G
}
695
696
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
697
1.54k
{
698
699
1.54k
#ifndef FIPS_MODULE
700
1.54k
    if (!ossl_init_thread())
701
0
        return 0;
702
1.54k
#endif
703
704
1.54k
    if (pthread_key_create(key, cleanup) != 0)
705
0
        return 0;
706
707
1.54k
    return 1;
708
1.54k
}
709
710
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
711
2.30G
{
712
2.30G
    return pthread_getspecific(*key);
713
2.30G
}
714
715
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
716
1.78k
{
717
1.78k
    if (pthread_setspecific(*key, val) != 0)
718
0
        return 0;
719
720
1.78k
    return 1;
721
1.78k
}
722
723
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
724
1.38k
{
725
1.38k
    if (pthread_key_delete(*key) != 0)
726
0
        return 0;
727
728
1.38k
    return 1;
729
1.38k
}
730
731
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
732
208k
{
733
208k
    return pthread_self();
734
208k
}
735
736
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
737
12.5k
{
738
12.5k
    return pthread_equal(a, b);
739
12.5k
}
740
741
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
742
11.7M
{
743
11.7M
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
744
11.7M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
745
11.7M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
746
11.7M
        return 1;
747
11.7M
    }
748
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
749
    /* This will work for all future Solaris versions. */
750
    if (ret != NULL) {
751
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
752
        return 1;
753
    }
754
#endif
755
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
756
0
        return 0;
757
758
0
    *val += amount;
759
0
    *ret = *val;
760
761
0
    if (!CRYPTO_THREAD_unlock(lock))
762
0
        return 0;
763
764
0
    return 1;
765
0
}
766
767
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
768
    CRYPTO_RWLOCK *lock)
769
0
{
770
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
771
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
772
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
773
0
        return 1;
774
0
    }
775
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
776
    /* This will work for all future Solaris versions. */
777
    if (ret != NULL) {
778
        *ret = atomic_add_64_nv(val, op);
779
        return 1;
780
    }
781
#endif
782
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
783
0
        return 0;
784
0
    *val += op;
785
0
    *ret = *val;
786
787
0
    if (!CRYPTO_THREAD_unlock(lock))
788
0
        return 0;
789
790
0
    return 1;
791
0
}
792
793
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
794
    CRYPTO_RWLOCK *lock)
795
0
{
796
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
797
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
798
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
799
0
        return 1;
800
0
    }
801
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
802
    /* This will work for all future Solaris versions. */
803
    if (ret != NULL) {
804
        *ret = atomic_and_64_nv(val, op);
805
        return 1;
806
    }
807
#endif
808
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
809
0
        return 0;
810
0
    *val &= op;
811
0
    *ret = *val;
812
813
0
    if (!CRYPTO_THREAD_unlock(lock))
814
0
        return 0;
815
816
0
    return 1;
817
0
}
818
819
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
820
    CRYPTO_RWLOCK *lock)
821
712
{
822
712
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
823
712
    if (__atomic_is_lock_free(sizeof(*val), val)) {
824
712
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
825
712
        return 1;
826
712
    }
827
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
828
    /* This will work for all future Solaris versions. */
829
    if (ret != NULL) {
830
        *ret = atomic_or_64_nv(val, op);
831
        return 1;
832
    }
833
#endif
834
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
835
0
        return 0;
836
0
    *val |= op;
837
0
    *ret = *val;
838
839
0
    if (!CRYPTO_THREAD_unlock(lock))
840
0
        return 0;
841
842
0
    return 1;
843
0
}
844
845
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
846
3.12G
{
847
3.12G
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
848
3.12G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
849
3.12G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
850
3.12G
        return 1;
851
3.12G
    }
852
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
853
    /* This will work for all future Solaris versions. */
854
    if (ret != NULL) {
855
        *ret = atomic_or_64_nv(val, 0);
856
        return 1;
857
    }
858
#endif
859
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
860
0
        return 0;
861
0
    *ret = *val;
862
0
    if (!CRYPTO_THREAD_unlock(lock))
863
0
        return 0;
864
865
0
    return 1;
866
0
}
867
868
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
869
38.9k
{
870
38.9k
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
871
38.9k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
872
38.9k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
873
38.9k
        return 1;
874
38.9k
    }
875
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
876
    /* This will work for all future Solaris versions. */
877
    if (dst != NULL) {
878
        atomic_swap_64(dst, val);
879
        return 1;
880
    }
881
#endif
882
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
883
0
        return 0;
884
0
    *dst = val;
885
0
    if (!CRYPTO_THREAD_unlock(lock))
886
0
        return 0;
887
888
0
    return 1;
889
0
}
890
891
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
892
0
{
893
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
894
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
895
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
896
0
        return 1;
897
0
    }
898
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
899
    /* This will work for all future Solaris versions. */
900
    if (ret != NULL) {
901
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
902
        return 1;
903
    }
904
#endif
905
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
906
0
        return 0;
907
0
    *ret = *val;
908
0
    if (!CRYPTO_THREAD_unlock(lock))
909
0
        return 0;
910
911
0
    return 1;
912
0
}
913
914
#ifndef FIPS_MODULE
915
int openssl_init_fork_handlers(void)
916
0
{
917
0
    return 1;
918
0
}
919
#endif /* FIPS_MODULE */
920
921
int openssl_get_fork_id(void)
922
116k
{
923
116k
    return getpid();
924
116k
}
925
#endif