Coverage Report

Created: 2026-04-01 06:39

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl34/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
#if __has_feature(thread_sanitizer)
21
#define __SANITIZE_THREAD__
22
#endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
#include <sanitizer/tsan_interface.h>
27
#define TSAN_FAKE_UNLOCK(x)          \
28
    __tsan_mutex_pre_unlock((x), 0); \
29
    __tsan_mutex_post_unlock((x), 0)
30
31
#define TSAN_FAKE_LOCK(x)          \
32
    __tsan_mutex_pre_lock((x), 0); \
33
    __tsan_mutex_post_lock((x), 0, 0)
34
#else
35
#define TSAN_FAKE_UNLOCK(x)
36
#define TSAN_FAKE_LOCK(x)
37
#endif
38
39
#if defined(__sun)
40
#include <atomic.h>
41
#endif
42
43
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
44
/*
45
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
46
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
47
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
48
 * All of this makes impossible to use __atomic_is_lock_free here.
49
 *
50
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
51
 */
52
#define BROKEN_CLANG_ATOMICS
53
#endif
54
55
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
56
57
#if defined(OPENSSL_SYS_UNIX)
58
#include <sys/types.h>
59
#include <unistd.h>
60
#endif
61
62
#include <assert.h>
63
64
/*
65
 * The Non-Stop KLT thread model currently seems broken in its rwlock
66
 * implementation
67
 * Likewise is there a problem with the glibc implementation on riscv.
68
 */
69
#if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
70
    && !defined(__riscv)
71
#define USE_RWLOCK
72
#endif
73
74
/*
75
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
76
 * other compilers.
77
78
 * Unfortunately, we can't do that with some "generic type", because there's no
79
 * guarantee that the chosen generic type is large enough to cover all cases.
80
 * Therefore, we implement fallbacks for each applicable type, with composed
81
 * names that include the type they handle.
82
 *
83
 * (an anecdote: we previously tried to use |void *| as the generic type, with
84
 * the thought that the pointer itself is the largest type.  However, this is
85
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
86
 *
87
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
88
 * they can map to the correct fallback function.  In the GNU/clang case, that
89
 * parameter is simply ignored.
90
 */
91
92
/*
93
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
94
 * fallback function names.
95
 */
96
typedef void *pvoid;
97
98
#if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
99
    && !defined(USE_ATOMIC_FALLBACKS)
100
79.6M
#define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
101
933
#define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
102
39.9k
#define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
103
1.01k
#define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
104
80
#define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
105
#else
106
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
107
108
#define IMPL_fallback_atomic_load_n(t)                    \
109
    static ossl_inline t fallback_atomic_load_n_##t(t *p) \
110
    {                                                     \
111
        t ret;                                            \
112
                                                          \
113
        pthread_mutex_lock(&atomic_sim_lock);             \
114
        ret = *p;                                         \
115
        pthread_mutex_unlock(&atomic_sim_lock);           \
116
        return ret;                                       \
117
    }
118
IMPL_fallback_atomic_load_n(uint32_t)
119
    IMPL_fallback_atomic_load_n(uint64_t)
120
        IMPL_fallback_atomic_load_n(pvoid)
121
122
#define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
123
124
#define IMPL_fallback_atomic_store_n(t)                         \
125
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
126
    {                                                           \
127
        t ret;                                                  \
128
                                                                \
129
        pthread_mutex_lock(&atomic_sim_lock);                   \
130
        ret = *p;                                               \
131
        *p = v;                                                 \
132
        pthread_mutex_unlock(&atomic_sim_lock);                 \
133
        return ret;                                             \
134
    }
135
            IMPL_fallback_atomic_store_n(uint32_t)
136
137
#define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
138
139
#define IMPL_fallback_atomic_store(t)                             \
140
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
141
    {                                                             \
142
        pthread_mutex_lock(&atomic_sim_lock);                     \
143
        *p = *v;                                                  \
144
        pthread_mutex_unlock(&atomic_sim_lock);                   \
145
    }
146
                IMPL_fallback_atomic_store(pvoid)
147
148
#define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
149
150
    /*
151
     * The fallbacks that follow don't need any per type implementation, as
152
     * they are designed for uint64_t only.  If there comes a time when multiple
153
     * types need to be covered, it's relatively easy to refactor them the same
154
     * way as the fallbacks above.
155
     */
156
157
    static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
158
{
159
    uint64_t ret;
160
161
    pthread_mutex_lock(&atomic_sim_lock);
162
    *p += v;
163
    ret = *p;
164
    pthread_mutex_unlock(&atomic_sim_lock);
165
    return ret;
166
}
167
168
#define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
169
170
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
171
{
172
    uint64_t ret;
173
174
    pthread_mutex_lock(&atomic_sim_lock);
175
    *p -= v;
176
    ret = *p;
177
    pthread_mutex_unlock(&atomic_sim_lock);
178
    return ret;
179
}
180
181
#define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
182
#endif
183
184
/*
185
 * This is the core of an rcu lock. It tracks the readers and writers for the
186
 * current quiescence point for a given lock. Users is the 64 bit value that
187
 * stores the READERS/ID as defined above
188
 *
189
 */
190
struct rcu_qp {
191
    uint64_t users;
192
};
193
194
struct thread_qp {
195
    struct rcu_qp *qp;
196
    unsigned int depth;
197
    CRYPTO_RCU_LOCK *lock;
198
};
199
200
509
#define MAX_QPS 10
201
/*
202
 * This is the per thread tracking data
203
 * that is assigned to each thread participating
204
 * in an rcu qp
205
 *
206
 * qp points to the qp that it last acquired
207
 *
208
 */
209
struct rcu_thr_data {
210
    struct thread_qp thread_qps[MAX_QPS];
211
};
212
213
/*
214
 * This is the internal version of a CRYPTO_RCU_LOCK
215
 * it is cast from CRYPTO_RCU_LOCK
216
 */
217
struct rcu_lock_st {
218
    /* Callbacks to call for next ossl_synchronize_rcu */
219
    struct rcu_cb_item *cb_items;
220
221
    /* The context we are being created against */
222
    OSSL_LIB_CTX *ctx;
223
224
    /* Array of quiescent points for synchronization */
225
    struct rcu_qp *qp_group;
226
227
    /* rcu generation counter for in-order retirement */
228
    uint32_t id_ctr;
229
230
    /* Number of elements in qp_group array */
231
    uint32_t group_count;
232
233
    /* Index of the current qp in the qp_group array */
234
    uint32_t reader_idx;
235
236
    /* value of the next id_ctr value to be retired */
237
    uint32_t next_to_retire;
238
239
    /* index of the next free rcu_qp in the qp_group */
240
    uint32_t current_alloc_idx;
241
242
    /* number of qp's in qp_group array currently being retired */
243
    uint32_t writers_alloced;
244
245
    /* lock protecting write side operations */
246
    pthread_mutex_t write_lock;
247
248
    /* lock protecting updates to writers_alloced/current_alloc_idx */
249
    pthread_mutex_t alloc_lock;
250
251
    /* signal to wake threads waiting on alloc_lock */
252
    pthread_cond_t alloc_signal;
253
254
    /* lock to enforce in-order retirement */
255
    pthread_mutex_t prior_lock;
256
257
    /* signal to wake threads waiting on prior_lock */
258
    pthread_cond_t prior_signal;
259
};
260
261
/* Read side acquisition of the current qp */
262
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
263
80
{
264
80
    uint32_t qp_idx;
265
266
    /* get the current qp index */
267
80
    for (;;) {
268
80
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
269
270
        /*
271
         * Notes on use of __ATOMIC_ACQUIRE
272
         * We need to ensure the following:
273
         * 1) That subsequent operations aren't optimized by hoisting them above
274
         * this operation.  Specifically, we don't want the below re-load of
275
         * qp_idx to get optimized away
276
         * 2) We want to ensure that any updating of reader_idx on the write side
277
         * of the lock is flushed from a local cpu cache so that we see any
278
         * updates prior to the load.  This is a non-issue on cache coherent
279
         * systems like x86, but is relevant on other arches
280
         */
281
80
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
282
80
            __ATOMIC_ACQUIRE);
283
284
        /* if the idx hasn't changed, we're good, else try again */
285
80
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
286
80
            break;
287
288
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
289
0
            __ATOMIC_RELAXED);
290
0
    }
291
292
80
    return &lock->qp_group[qp_idx];
293
80
}
294
295
static void ossl_rcu_free_local_data(void *arg)
296
3
{
297
3
    OSSL_LIB_CTX *ctx = arg;
298
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
299
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
300
301
3
    OPENSSL_free(data);
302
3
    CRYPTO_THREAD_set_local(lkey, NULL);
303
3
}
304
305
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
306
39
{
307
39
    struct rcu_thr_data *data;
308
39
    int i, available_qp = -1;
309
39
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
310
311
    /*
312
     * we're going to access current_qp here so ask the
313
     * processor to fetch it
314
     */
315
39
    data = CRYPTO_THREAD_get_local(lkey);
316
317
39
    if (data == NULL) {
318
2
        data = OPENSSL_zalloc(sizeof(*data));
319
2
        OPENSSL_assert(data != NULL);
320
2
        CRYPTO_THREAD_set_local(lkey, data);
321
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
322
2
    }
323
324
429
    for (i = 0; i < MAX_QPS; i++) {
325
390
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
326
39
            available_qp = i;
327
        /* If we have a hold on this lock already, we're good */
328
390
        if (data->thread_qps[i].lock == lock) {
329
0
            data->thread_qps[i].depth++;
330
0
            return;
331
0
        }
332
390
    }
333
334
    /*
335
     * if we get here, then we don't have a hold on this lock yet
336
     */
337
39
    assert(available_qp != -1);
338
339
39
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
340
39
    data->thread_qps[available_qp].depth = 1;
341
39
    data->thread_qps[available_qp].lock = lock;
342
39
}
343
344
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
345
80
{
346
80
    int i;
347
80
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
348
80
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
349
80
    uint64_t ret;
350
351
80
    assert(data != NULL);
352
353
80
    for (i = 0; i < MAX_QPS; i++) {
354
80
        if (data->thread_qps[i].lock == lock) {
355
            /*
356
             * we have to use __ATOMIC_RELEASE here
357
             * to ensure that all preceding read instructions complete
358
             * before the decrement is visible to ossl_synchronize_rcu
359
             */
360
80
            data->thread_qps[i].depth--;
361
80
            if (data->thread_qps[i].depth == 0) {
362
80
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
363
80
                    (uint64_t)1, __ATOMIC_RELEASE);
364
80
                OPENSSL_assert(ret != UINT64_MAX);
365
80
                data->thread_qps[i].qp = NULL;
366
80
                data->thread_qps[i].lock = NULL;
367
80
            }
368
80
            return;
369
80
        }
370
80
    }
371
    /*
372
     * If we get here, we're trying to unlock a lock that we never acquired -
373
     * that's fatal.
374
     */
375
80
    assert(0);
376
0
}
377
378
/*
379
 * Write side allocation routine to get the current qp
380
 * and replace it with a new one
381
 */
382
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
383
933
{
384
933
    uint32_t current_idx;
385
386
933
    pthread_mutex_lock(&lock->alloc_lock);
387
388
    /*
389
     * we need at least one qp to be available with one
390
     * left over, so that readers can start working on
391
     * one that isn't yet being waited on
392
     */
393
933
    while (lock->group_count - lock->writers_alloced < 2)
394
        /* we have to wait for one to be free */
395
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
396
397
933
    current_idx = lock->current_alloc_idx;
398
399
    /* Allocate the qp */
400
933
    lock->writers_alloced++;
401
402
    /* increment the allocation index */
403
933
    lock->current_alloc_idx = (lock->current_alloc_idx + 1) % lock->group_count;
404
405
933
    *curr_id = lock->id_ctr;
406
933
    lock->id_ctr++;
407
408
    /*
409
     * make the current state of everything visible by this release
410
     * when get_hold_current_qp acquires the next qp
411
     */
412
933
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413
933
        __ATOMIC_RELEASE);
414
415
    /*
416
     * this should make sure that the new value of reader_idx is visible in
417
     * get_hold_current_qp, directly after incrementing the users count
418
     */
419
933
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420
933
        __ATOMIC_RELEASE);
421
422
    /* wake up any waiters */
423
933
    pthread_cond_signal(&lock->alloc_signal);
424
933
    pthread_mutex_unlock(&lock->alloc_lock);
425
933
    return &lock->qp_group[current_idx];
426
933
}
427
428
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429
933
{
430
933
    pthread_mutex_lock(&lock->alloc_lock);
431
933
    lock->writers_alloced--;
432
933
    pthread_cond_signal(&lock->alloc_signal);
433
933
    pthread_mutex_unlock(&lock->alloc_lock);
434
933
}
435
436
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437
    uint32_t count)
438
518
{
439
518
    struct rcu_qp *new = OPENSSL_zalloc(sizeof(*new) * count);
440
441
518
    lock->group_count = count;
442
518
    return new;
443
518
}
444
445
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
446
726
{
447
726
    pthread_mutex_lock(&lock->write_lock);
448
726
    TSAN_FAKE_UNLOCK(&lock->write_lock);
449
726
}
450
451
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
452
726
{
453
726
    TSAN_FAKE_LOCK(&lock->write_lock);
454
726
    pthread_mutex_unlock(&lock->write_lock);
455
726
}
456
457
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
458
933
{
459
933
    struct rcu_qp *qp;
460
933
    uint64_t count;
461
933
    uint32_t curr_id;
462
933
    struct rcu_cb_item *cb_items, *tmpcb;
463
464
933
    pthread_mutex_lock(&lock->write_lock);
465
933
    cb_items = lock->cb_items;
466
933
    lock->cb_items = NULL;
467
933
    pthread_mutex_unlock(&lock->write_lock);
468
469
933
    qp = update_qp(lock, &curr_id);
470
471
    /* retire in order */
472
933
    pthread_mutex_lock(&lock->prior_lock);
473
933
    while (lock->next_to_retire != curr_id)
474
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
475
476
    /*
477
     * wait for the reader count to reach zero
478
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
479
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
480
     * is visible prior to our read
481
     * however this is likely just necessary to silence a tsan warning
482
     * because the read side should not do any write operation
483
     * outside the atomic itself
484
     */
485
933
    do {
486
933
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
487
933
    } while (count != (uint64_t)0);
488
489
933
    lock->next_to_retire++;
490
933
    pthread_cond_broadcast(&lock->prior_signal);
491
933
    pthread_mutex_unlock(&lock->prior_lock);
492
493
933
    retire_qp(lock, qp);
494
495
    /* handle any callbacks that we have */
496
1.14k
    while (cb_items != NULL) {
497
207
        tmpcb = cb_items;
498
207
        cb_items = cb_items->next;
499
207
        tmpcb->fn(tmpcb->data);
500
207
        OPENSSL_free(tmpcb);
501
207
    }
502
933
}
503
504
/*
505
 * Note: This call assumes its made under the protection of
506
 * ossl_rcu_write_lock
507
 */
508
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
509
207
{
510
207
    struct rcu_cb_item *new = OPENSSL_zalloc(sizeof(*new));
511
512
207
    if (new == NULL)
513
0
        return 0;
514
515
207
    new->data = data;
516
207
    new->fn = cb;
517
518
207
    new->next = lock->cb_items;
519
207
    lock->cb_items = new;
520
521
207
    return 1;
522
207
}
523
524
void *ossl_rcu_uptr_deref(void **p)
525
79.6M
{
526
79.6M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
527
79.6M
}
528
529
void ossl_rcu_assign_uptr(void **p, void **v)
530
39.9k
{
531
39.9k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
532
39.9k
}
533
534
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
535
518
{
536
518
    struct rcu_lock_st *new;
537
518
    pthread_mutex_t *mutexes[3] = { NULL };
538
518
    pthread_cond_t *conds[2] = { NULL };
539
518
    int i;
540
541
    /*
542
     * We need a minimum of 2 qp's
543
     */
544
518
    if (num_writers < 2)
545
518
        num_writers = 2;
546
547
518
    ctx = ossl_lib_ctx_get_concrete(ctx);
548
518
    if (ctx == NULL)
549
0
        return 0;
550
551
518
    new = OPENSSL_zalloc(sizeof(*new));
552
518
    if (new == NULL)
553
0
        return NULL;
554
555
518
    new->ctx = ctx;
556
518
    i = 0;
557
518
    mutexes[i] = pthread_mutex_init(&new->write_lock, NULL) == 0 ? &new->write_lock : NULL;
558
518
    if (mutexes[i++] == NULL)
559
0
        goto err;
560
518
    mutexes[i] = pthread_mutex_init(&new->prior_lock, NULL) == 0 ? &new->prior_lock : NULL;
561
518
    if (mutexes[i++] == NULL)
562
0
        goto err;
563
518
    mutexes[i] = pthread_mutex_init(&new->alloc_lock, NULL) == 0 ? &new->alloc_lock : NULL;
564
518
    if (mutexes[i++] == NULL)
565
0
        goto err;
566
518
    conds[i - 3] = pthread_cond_init(&new->prior_signal, NULL) == 0 ? &new->prior_signal : NULL;
567
518
    if (conds[i - 3] == NULL)
568
0
        goto err;
569
518
    i++;
570
518
    conds[i - 3] = pthread_cond_init(&new->alloc_signal, NULL) == 0 ? &new->alloc_signal : NULL;
571
518
    if (conds[i - 3] == NULL)
572
0
        goto err;
573
518
    i++;
574
518
    new->qp_group = allocate_new_qp_group(new, num_writers);
575
518
    if (new->qp_group == NULL)
576
0
        goto err;
577
578
518
    return new;
579
580
0
err:
581
0
    for (i = 0; i < 3; i++)
582
0
        if (mutexes[i] != NULL)
583
0
            pthread_mutex_destroy(mutexes[i]);
584
0
    for (i = 0; i < 2; i++)
585
0
        if (conds[i] != NULL)
586
0
            pthread_cond_destroy(conds[i]);
587
0
    OPENSSL_free(new->qp_group);
588
0
    OPENSSL_free(new);
589
0
    return NULL;
590
518
}
591
592
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
593
350
{
594
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
595
596
350
    if (lock == NULL)
597
0
        return;
598
599
    /* make sure we're synchronized */
600
350
    ossl_synchronize_rcu(rlock);
601
602
350
    OPENSSL_free(rlock->qp_group);
603
    /*
604
     * Some targets (BSD) allocate heap when initializing
605
     * a mutex or condition, to prevent leaks, those need
606
     * to be destroyed here
607
     */
608
350
    pthread_mutex_destroy(&rlock->write_lock);
609
350
    pthread_mutex_destroy(&rlock->prior_lock);
610
350
    pthread_mutex_destroy(&rlock->alloc_lock);
611
350
    pthread_cond_destroy(&rlock->prior_signal);
612
350
    pthread_cond_destroy(&rlock->alloc_signal);
613
614
    /* There should only be a single qp left now */
615
350
    OPENSSL_free(rlock);
616
350
}
617
618
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
619
10.4M
{
620
10.4M
#ifdef USE_RWLOCK
621
10.4M
    CRYPTO_RWLOCK *lock;
622
623
10.4M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
624
        /* Don't set error, to avoid recursion blowup. */
625
0
        return NULL;
626
627
10.4M
    if (pthread_rwlock_init(lock, NULL) != 0) {
628
0
        OPENSSL_free(lock);
629
0
        return NULL;
630
0
    }
631
#else
632
    pthread_mutexattr_t attr;
633
    CRYPTO_RWLOCK *lock;
634
635
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
636
        /* Don't set error, to avoid recursion blowup. */
637
        return NULL;
638
639
    /*
640
     * We don't use recursive mutexes, but try to catch errors if we do.
641
     */
642
    pthread_mutexattr_init(&attr);
643
#if !defined(__TANDEM) && !defined(_SPT_MODEL_)
644
#if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
645
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
646
#endif
647
#else
648
    /* The SPT Thread Library does not define MUTEX attributes. */
649
#endif
650
651
    if (pthread_mutex_init(lock, &attr) != 0) {
652
        pthread_mutexattr_destroy(&attr);
653
        OPENSSL_free(lock);
654
        return NULL;
655
    }
656
657
    pthread_mutexattr_destroy(&attr);
658
#endif
659
660
10.4M
    return lock;
661
10.4M
}
662
663
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
664
1.04G
{
665
1.04G
#ifdef USE_RWLOCK
666
1.04G
    if (pthread_rwlock_rdlock(lock) != 0)
667
0
        return 0;
668
#else
669
    if (pthread_mutex_lock(lock) != 0) {
670
        assert(errno != EDEADLK && errno != EBUSY);
671
        return 0;
672
    }
673
#endif
674
675
1.04G
    return 1;
676
1.04G
}
677
678
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
679
54.9M
{
680
54.9M
#ifdef USE_RWLOCK
681
54.9M
    if (pthread_rwlock_wrlock(lock) != 0)
682
0
        return 0;
683
#else
684
    if (pthread_mutex_lock(lock) != 0) {
685
        assert(errno != EDEADLK && errno != EBUSY);
686
        return 0;
687
    }
688
#endif
689
690
54.9M
    return 1;
691
54.9M
}
692
693
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
694
1.25G
{
695
1.25G
#ifdef USE_RWLOCK
696
1.25G
    if (pthread_rwlock_unlock(lock) != 0)
697
0
        return 0;
698
#else
699
    if (pthread_mutex_unlock(lock) != 0) {
700
        assert(errno != EPERM);
701
        return 0;
702
    }
703
#endif
704
705
1.25G
    return 1;
706
1.25G
}
707
708
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
709
10.4M
{
710
10.4M
    if (lock == NULL)
711
2.46k
        return;
712
713
10.4M
#ifdef USE_RWLOCK
714
10.4M
    pthread_rwlock_destroy(lock);
715
#else
716
    pthread_mutex_destroy(lock);
717
#endif
718
10.4M
    OPENSSL_free(lock);
719
720
10.4M
    return;
721
10.4M
}
722
723
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
724
2.49G
{
725
2.49G
    if (pthread_once(once, init) != 0)
726
0
        return 0;
727
728
2.49G
    return 1;
729
2.49G
}
730
731
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
732
1.54k
{
733
734
1.54k
#ifndef FIPS_MODULE
735
1.54k
    if (!ossl_init_thread())
736
0
        return 0;
737
1.54k
#endif
738
739
1.54k
    if (pthread_key_create(key, cleanup) != 0)
740
0
        return 0;
741
742
1.54k
    return 1;
743
1.54k
}
744
745
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
746
2.13G
{
747
2.13G
    return pthread_getspecific(*key);
748
2.13G
}
749
750
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
751
1.79k
{
752
1.79k
    if (pthread_setspecific(*key, val) != 0)
753
0
        return 0;
754
755
1.79k
    return 1;
756
1.79k
}
757
758
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
759
1.38k
{
760
1.38k
    if (pthread_key_delete(*key) != 0)
761
0
        return 0;
762
763
1.38k
    return 1;
764
1.38k
}
765
766
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
767
208k
{
768
208k
    return pthread_self();
769
208k
}
770
771
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
772
12.6k
{
773
12.6k
    return pthread_equal(a, b);
774
12.6k
}
775
776
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
777
12.0M
{
778
12.0M
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
779
12.0M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
780
12.0M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
781
12.0M
        return 1;
782
12.0M
    }
783
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
784
    /* This will work for all future Solaris versions. */
785
    if (ret != NULL) {
786
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
787
        return 1;
788
    }
789
#endif
790
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
791
0
        return 0;
792
793
0
    *val += amount;
794
0
    *ret = *val;
795
796
0
    if (!CRYPTO_THREAD_unlock(lock))
797
0
        return 0;
798
799
0
    return 1;
800
0
}
801
802
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
803
    CRYPTO_RWLOCK *lock)
804
0
{
805
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
806
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
807
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
808
0
        return 1;
809
0
    }
810
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
811
    /* This will work for all future Solaris versions. */
812
    if (ret != NULL) {
813
        *ret = atomic_add_64_nv(val, op);
814
        return 1;
815
    }
816
#endif
817
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
818
0
        return 0;
819
0
    *val += op;
820
0
    *ret = *val;
821
822
0
    if (!CRYPTO_THREAD_unlock(lock))
823
0
        return 0;
824
825
0
    return 1;
826
0
}
827
828
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
829
    CRYPTO_RWLOCK *lock)
830
0
{
831
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
832
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
833
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
834
0
        return 1;
835
0
    }
836
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
837
    /* This will work for all future Solaris versions. */
838
    if (ret != NULL) {
839
        *ret = atomic_and_64_nv(val, op);
840
        return 1;
841
    }
842
#endif
843
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
844
0
        return 0;
845
0
    *val &= op;
846
0
    *ret = *val;
847
848
0
    if (!CRYPTO_THREAD_unlock(lock))
849
0
        return 0;
850
851
0
    return 1;
852
0
}
853
854
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
855
    CRYPTO_RWLOCK *lock)
856
716
{
857
716
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
858
716
    if (__atomic_is_lock_free(sizeof(*val), val)) {
859
716
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
860
716
        return 1;
861
716
    }
862
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
863
    /* This will work for all future Solaris versions. */
864
    if (ret != NULL) {
865
        *ret = atomic_or_64_nv(val, op);
866
        return 1;
867
    }
868
#endif
869
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
870
0
        return 0;
871
0
    *val |= op;
872
0
    *ret = *val;
873
874
0
    if (!CRYPTO_THREAD_unlock(lock))
875
0
        return 0;
876
877
0
    return 1;
878
0
}
879
880
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
881
2.89G
{
882
2.89G
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
883
2.89G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
884
2.89G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
885
2.89G
        return 1;
886
2.89G
    }
887
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
888
    /* This will work for all future Solaris versions. */
889
    if (ret != NULL) {
890
        *ret = atomic_or_64_nv(val, 0);
891
        return 1;
892
    }
893
#endif
894
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
895
0
        return 0;
896
0
    *ret = *val;
897
0
    if (!CRYPTO_THREAD_unlock(lock))
898
0
        return 0;
899
900
0
    return 1;
901
0
}
902
903
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
904
39.3k
{
905
39.3k
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
906
39.3k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
907
39.3k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
908
39.3k
        return 1;
909
39.3k
    }
910
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
911
    /* This will work for all future Solaris versions. */
912
    if (dst != NULL) {
913
        atomic_swap_64(dst, val);
914
        return 1;
915
    }
916
#endif
917
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
918
0
        return 0;
919
0
    *dst = val;
920
0
    if (!CRYPTO_THREAD_unlock(lock))
921
0
        return 0;
922
923
0
    return 1;
924
0
}
925
926
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
927
0
{
928
0
#if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
929
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
930
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
931
0
        return 1;
932
0
    }
933
#elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
934
    /* This will work for all future Solaris versions. */
935
    if (ret != NULL) {
936
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
937
        return 1;
938
    }
939
#endif
940
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
941
0
        return 0;
942
0
    *ret = *val;
943
0
    if (!CRYPTO_THREAD_unlock(lock))
944
0
        return 0;
945
946
0
    return 1;
947
0
}
948
949
#ifndef FIPS_MODULE
950
int openssl_init_fork_handlers(void)
951
0
{
952
0
    return 1;
953
0
}
954
#endif /* FIPS_MODULE */
955
956
int openssl_get_fork_id(void)
957
164k
{
958
164k
    return getpid();
959
164k
}
960
#endif