Coverage Report

Created: 2025-11-16 06:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl33/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__sun)
20
# include <atomic.h>
21
#endif
22
23
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24
/*
25
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28
 * All of this makes impossible to use __atomic_is_lock_free here.
29
 *
30
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31
 */
32
# define BROKEN_CLANG_ATOMICS
33
#endif
34
35
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
37
# if defined(OPENSSL_SYS_UNIX)
38
#  include <sys/types.h>
39
#  include <unistd.h>
40
# endif
41
42
# include <assert.h>
43
44
/*
45
 * The Non-Stop KLT thread model currently seems broken in its rwlock
46
 * implementation
47
 * Likewise is there a problem with the glibc implementation on riscv.
48
 */
49
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
50
                                         && !defined(__riscv)
51
#  define USE_RWLOCK
52
# endif
53
54
/*
55
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
56
 * other compilers.
57
58
 * Unfortunately, we can't do that with some "generic type", because there's no
59
 * guarantee that the chosen generic type is large enough to cover all cases.
60
 * Therefore, we implement fallbacks for each applicable type, with composed
61
 * names that include the type they handle.
62
 *
63
 * (an anecdote: we previously tried to use |void *| as the generic type, with
64
 * the thought that the pointer itself is the largest type.  However, this is
65
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
66
 *
67
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
68
 * they can map to the correct fallback function.  In the GNU/clang case, that
69
 * parameter is simply ignored.
70
 */
71
72
/*
73
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
74
 * fallback function names.
75
 */
76
typedef void *pvoid;
77
78
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
79
    && !defined(USE_ATOMIC_FALLBACKS)
80
54.9M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
81
916
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
82
28.7k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
83
961
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
84
45
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
85
# else
86
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
87
88
#  define IMPL_fallback_atomic_load_n(t)                        \
89
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
90
    {                                                           \
91
        t ret;                                                  \
92
                                                                \
93
        pthread_mutex_lock(&atomic_sim_lock);                   \
94
        ret = *p;                                               \
95
        pthread_mutex_unlock(&atomic_sim_lock);                 \
96
        return ret;                                             \
97
    }
98
IMPL_fallback_atomic_load_n(uint32_t)
99
IMPL_fallback_atomic_load_n(uint64_t)
100
IMPL_fallback_atomic_load_n(pvoid)
101
102
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
103
104
#  define IMPL_fallback_atomic_store_n(t)                       \
105
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
106
    {                                                           \
107
        t ret;                                                  \
108
                                                                \
109
        pthread_mutex_lock(&atomic_sim_lock);                   \
110
        ret = *p;                                               \
111
        *p = v;                                                 \
112
        pthread_mutex_unlock(&atomic_sim_lock);                 \
113
        return ret;                                             \
114
    }
115
IMPL_fallback_atomic_store_n(uint32_t)
116
117
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
118
119
#  define IMPL_fallback_atomic_store(t)                         \
120
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
121
    {                                                           \
122
        pthread_mutex_lock(&atomic_sim_lock);                   \
123
        *p = *v;                                                \
124
        pthread_mutex_unlock(&atomic_sim_lock);                 \
125
    }
126
IMPL_fallback_atomic_store(pvoid)
127
128
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
129
130
/*
131
 * The fallbacks that follow don't need any per type implementation, as
132
 * they are designed for uint64_t only.  If there comes a time when multiple
133
 * types need to be covered, it's relatively easy to refactor them the same
134
 * way as the fallbacks above.
135
 */
136
137
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
138
{
139
    uint64_t ret;
140
141
    pthread_mutex_lock(&atomic_sim_lock);
142
    *p += v;
143
    ret = *p;
144
    pthread_mutex_unlock(&atomic_sim_lock);
145
    return ret;
146
}
147
148
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
149
150
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
151
{
152
    uint64_t ret;
153
154
    pthread_mutex_lock(&atomic_sim_lock);
155
    *p -= v;
156
    ret = *p;
157
    pthread_mutex_unlock(&atomic_sim_lock);
158
    return ret;
159
}
160
161
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
162
# endif
163
164
/*
165
 * This is the core of an rcu lock. It tracks the readers and writers for the
166
 * current quiescence point for a given lock. Users is the 64 bit value that
167
 * stores the READERS/ID as defined above
168
 *
169
 */
170
struct rcu_qp {
171
    uint64_t users;
172
};
173
174
struct thread_qp {
175
    struct rcu_qp *qp;
176
    unsigned int depth;
177
    CRYPTO_RCU_LOCK *lock;
178
};
179
180
364
# define MAX_QPS 10
181
/*
182
 * This is the per thread tracking data
183
 * that is assigned to each thread participating
184
 * in an rcu qp
185
 *
186
 * qp points to the qp that it last acquired
187
 *
188
 */
189
struct rcu_thr_data {
190
    struct thread_qp thread_qps[MAX_QPS];
191
};
192
193
/*
194
 * This is the internal version of a CRYPTO_RCU_LOCK
195
 * it is cast from CRYPTO_RCU_LOCK
196
 */
197
struct rcu_lock_st {
198
    /* Callbacks to call for next ossl_synchronize_rcu */
199
    struct rcu_cb_item *cb_items;
200
201
    /* The context we are being created against */
202
    OSSL_LIB_CTX *ctx;
203
204
    /* Array of quiescent points for synchronization */
205
    struct rcu_qp *qp_group;
206
207
    /* rcu generation counter for in-order retirement */
208
    uint32_t id_ctr;
209
210
    /* Number of elements in qp_group array */
211
    uint32_t group_count;
212
213
    /* Index of the current qp in the qp_group array */
214
    uint32_t reader_idx;
215
216
    /* value of the next id_ctr value to be retired */
217
    uint32_t next_to_retire;
218
219
    /* index of the next free rcu_qp in the qp_group */
220
    uint32_t current_alloc_idx;
221
222
    /* number of qp's in qp_group array currently being retired */
223
    uint32_t writers_alloced;
224
225
    /* lock protecting write side operations */
226
    pthread_mutex_t write_lock;
227
228
    /* lock protecting updates to writers_alloced/current_alloc_idx */
229
    pthread_mutex_t alloc_lock;
230
231
    /* signal to wake threads waiting on alloc_lock */
232
    pthread_cond_t alloc_signal;
233
234
    /* lock to enforce in-order retirement */
235
    pthread_mutex_t prior_lock;
236
237
    /* signal to wake threads waiting on prior_lock */
238
    pthread_cond_t prior_signal;
239
};
240
241
/* Read side acquisition of the current qp */
242
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
243
45
{
244
45
    uint32_t qp_idx;
245
246
    /* get the current qp index */
247
45
    for (;;) {
248
45
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
249
250
        /*
251
         * Notes on use of __ATOMIC_ACQUIRE
252
         * We need to ensure the following:
253
         * 1) That subsequent operations aren't optimized by hoisting them above
254
         * this operation.  Specifically, we don't want the below re-load of
255
         * qp_idx to get optimized away
256
         * 2) We want to ensure that any updating of reader_idx on the write side
257
         * of the lock is flushed from a local cpu cache so that we see any
258
         * updates prior to the load.  This is a non-issue on cache coherent
259
         * systems like x86, but is relevant on other arches
260
         */
261
45
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
262
45
                         __ATOMIC_ACQUIRE);
263
264
        /* if the idx hasn't changed, we're good, else try again */
265
45
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
266
45
                                    __ATOMIC_ACQUIRE))
267
45
            break;
268
269
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
270
0
                         __ATOMIC_RELAXED);
271
0
    }
272
273
45
    return &lock->qp_group[qp_idx];
274
45
}
275
276
static void ossl_rcu_free_local_data(void *arg)
277
3
{
278
3
    OSSL_LIB_CTX *ctx = arg;
279
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
280
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
281
282
3
    OPENSSL_free(data);
283
3
    CRYPTO_THREAD_set_local(lkey, NULL);
284
3
}
285
286
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
287
29
{
288
29
    struct rcu_thr_data *data;
289
29
    int i, available_qp = -1;
290
29
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
291
292
    /*
293
     * we're going to access current_qp here so ask the
294
     * processor to fetch it
295
     */
296
29
    data = CRYPTO_THREAD_get_local(lkey);
297
298
29
    if (data == NULL) {
299
2
        data = OPENSSL_zalloc(sizeof(*data));
300
2
        OPENSSL_assert(data != NULL);
301
2
        CRYPTO_THREAD_set_local(lkey, data);
302
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
303
2
    }
304
305
319
    for (i = 0; i < MAX_QPS; i++) {
306
290
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
307
29
            available_qp = i;
308
        /* If we have a hold on this lock already, we're good */
309
290
        if (data->thread_qps[i].lock == lock) {
310
0
            data->thread_qps[i].depth++;
311
0
            return;
312
0
        }
313
290
    }
314
315
    /*
316
     * if we get here, then we don't have a hold on this lock yet
317
     */
318
29
    assert(available_qp != -1);
319
320
29
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
321
29
    data->thread_qps[available_qp].depth = 1;
322
29
    data->thread_qps[available_qp].lock = lock;
323
29
}
324
325
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
326
45
{
327
45
    int i;
328
45
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
329
45
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
330
45
    uint64_t ret;
331
332
45
    assert(data != NULL);
333
334
45
    for (i = 0; i < MAX_QPS; i++) {
335
45
        if (data->thread_qps[i].lock == lock) {
336
            /*
337
             * we have to use __ATOMIC_RELEASE here
338
             * to ensure that all preceding read instructions complete
339
             * before the decrement is visible to ossl_synchronize_rcu
340
             */
341
45
            data->thread_qps[i].depth--;
342
45
            if (data->thread_qps[i].depth == 0) {
343
45
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
344
45
                                       (uint64_t)1, __ATOMIC_RELEASE);
345
45
                OPENSSL_assert(ret != UINT64_MAX);
346
45
                data->thread_qps[i].qp = NULL;
347
45
                data->thread_qps[i].lock = NULL;
348
45
            }
349
45
            return;
350
45
        }
351
45
    }
352
    /*
353
     * If we get here, we're trying to unlock a lock that we never acquired -
354
     * that's fatal.
355
     */
356
45
    assert(0);
357
0
}
358
359
/*
360
 * Write side allocation routine to get the current qp
361
 * and replace it with a new one
362
 */
363
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
364
916
{
365
916
    uint32_t current_idx;
366
367
916
    pthread_mutex_lock(&lock->alloc_lock);
368
369
    /*
370
     * we need at least one qp to be available with one
371
     * left over, so that readers can start working on
372
     * one that isn't yet being waited on
373
     */
374
916
    while (lock->group_count - lock->writers_alloced < 2)
375
        /* we have to wait for one to be free */
376
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
377
378
916
    current_idx = lock->current_alloc_idx;
379
380
    /* Allocate the qp */
381
916
    lock->writers_alloced++;
382
383
    /* increment the allocation index */
384
916
    lock->current_alloc_idx =
385
916
        (lock->current_alloc_idx + 1) % lock->group_count;
386
387
916
    *curr_id = lock->id_ctr;
388
916
    lock->id_ctr++;
389
390
    /*
391
     * make the current state of everything visible by this release
392
     * when get_hold_current_qp acquires the next qp
393
     */
394
916
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
395
916
                   __ATOMIC_RELEASE);
396
397
    /*
398
     * this should make sure that the new value of reader_idx is visible in
399
     * get_hold_current_qp, directly after incrementing the users count
400
     */
401
916
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
402
916
                     __ATOMIC_RELEASE);
403
404
    /* wake up any waiters */
405
916
    pthread_cond_signal(&lock->alloc_signal);
406
916
    pthread_mutex_unlock(&lock->alloc_lock);
407
916
    return &lock->qp_group[current_idx];
408
916
}
409
410
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
411
916
{
412
916
    pthread_mutex_lock(&lock->alloc_lock);
413
916
    lock->writers_alloced--;
414
916
    pthread_cond_signal(&lock->alloc_signal);
415
916
    pthread_mutex_unlock(&lock->alloc_lock);
416
916
}
417
418
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
419
                                            uint32_t count)
420
438
{
421
438
    struct rcu_qp *new =
422
438
        OPENSSL_zalloc(sizeof(*new) * count);
423
424
438
    lock->group_count = count;
425
438
    return new;
426
438
}
427
428
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
429
666
{
430
666
    pthread_mutex_lock(&lock->write_lock);
431
666
}
432
433
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
434
666
{
435
666
    pthread_mutex_unlock(&lock->write_lock);
436
666
}
437
438
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
439
916
{
440
916
    struct rcu_qp *qp;
441
916
    uint64_t count;
442
916
    uint32_t curr_id;
443
916
    struct rcu_cb_item *cb_items, *tmpcb;
444
445
916
    pthread_mutex_lock(&lock->write_lock);
446
916
    cb_items = lock->cb_items;
447
916
    lock->cb_items = NULL;
448
916
    pthread_mutex_unlock(&lock->write_lock);
449
450
916
    qp = update_qp(lock, &curr_id);
451
452
    /* retire in order */
453
916
    pthread_mutex_lock(&lock->prior_lock);
454
916
    while (lock->next_to_retire != curr_id)
455
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
456
457
    /*
458
     * wait for the reader count to reach zero
459
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
460
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
461
     * is visible prior to our read
462
     * however this is likely just necessary to silence a tsan warning
463
     * because the read side should not do any write operation
464
     * outside the atomic itself
465
     */
466
916
    do {
467
916
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
468
916
    } while (count != (uint64_t)0);
469
470
916
    lock->next_to_retire++;
471
916
    pthread_cond_broadcast(&lock->prior_signal);
472
916
    pthread_mutex_unlock(&lock->prior_lock);
473
474
916
    retire_qp(lock, qp);
475
476
    /* handle any callbacks that we have */
477
1.09k
    while (cb_items != NULL) {
478
179
        tmpcb = cb_items;
479
179
        cb_items = cb_items->next;
480
179
        tmpcb->fn(tmpcb->data);
481
179
        OPENSSL_free(tmpcb);
482
179
    }
483
916
}
484
485
/*
486
 * Note: This call assumes its made under the protection of
487
 * ossl_rcu_write_lock
488
 */
489
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
490
179
{
491
179
    struct rcu_cb_item *new =
492
179
        OPENSSL_zalloc(sizeof(*new));
493
494
179
    if (new == NULL)
495
0
        return 0;
496
497
179
    new->data = data;
498
179
    new->fn = cb;
499
500
179
    new->next = lock->cb_items;
501
179
    lock->cb_items = new;
502
503
179
    return 1;
504
179
}
505
506
void *ossl_rcu_uptr_deref(void **p)
507
54.9M
{
508
54.9M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
509
54.9M
}
510
511
void ossl_rcu_assign_uptr(void **p, void **v)
512
28.7k
{
513
28.7k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
514
28.7k
}
515
516
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
517
438
{
518
438
    struct rcu_lock_st *new;
519
520
    /*
521
     * We need a minimum of 2 qp's
522
     */
523
438
    if (num_writers < 2)
524
438
        num_writers = 2;
525
526
438
    ctx = ossl_lib_ctx_get_concrete(ctx);
527
438
    if (ctx == NULL)
528
0
        return 0;
529
530
438
    new = OPENSSL_zalloc(sizeof(*new));
531
438
    if (new == NULL)
532
0
        return NULL;
533
534
438
    new->ctx = ctx;
535
438
    pthread_mutex_init(&new->write_lock, NULL);
536
438
    pthread_mutex_init(&new->prior_lock, NULL);
537
438
    pthread_mutex_init(&new->alloc_lock, NULL);
538
438
    pthread_cond_init(&new->prior_signal, NULL);
539
438
    pthread_cond_init(&new->alloc_signal, NULL);
540
541
438
    new->qp_group = allocate_new_qp_group(new, num_writers);
542
438
    if (new->qp_group == NULL) {
543
0
        OPENSSL_free(new);
544
0
        new = NULL;
545
0
    }
546
547
438
    return new;
548
438
}
549
550
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
551
350
{
552
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
553
554
350
    if (lock == NULL)
555
0
        return;
556
557
    /* make sure we're synchronized */
558
350
    ossl_synchronize_rcu(rlock);
559
560
350
    OPENSSL_free(rlock->qp_group);
561
    /* There should only be a single qp left now */
562
350
    OPENSSL_free(rlock);
563
350
}
564
565
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
566
9.94M
{
567
9.94M
# ifdef USE_RWLOCK
568
9.94M
    CRYPTO_RWLOCK *lock;
569
570
9.94M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
571
        /* Don't set error, to avoid recursion blowup. */
572
0
        return NULL;
573
574
9.94M
    if (pthread_rwlock_init(lock, NULL) != 0) {
575
0
        OPENSSL_free(lock);
576
0
        return NULL;
577
0
    }
578
# else
579
    pthread_mutexattr_t attr;
580
    CRYPTO_RWLOCK *lock;
581
582
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
583
        /* Don't set error, to avoid recursion blowup. */
584
        return NULL;
585
586
    /*
587
     * We don't use recursive mutexes, but try to catch errors if we do.
588
     */
589
    pthread_mutexattr_init(&attr);
590
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
591
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
592
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
593
#   endif
594
#  else
595
    /* The SPT Thread Library does not define MUTEX attributes. */
596
#  endif
597
598
    if (pthread_mutex_init(lock, &attr) != 0) {
599
        pthread_mutexattr_destroy(&attr);
600
        OPENSSL_free(lock);
601
        return NULL;
602
    }
603
604
    pthread_mutexattr_destroy(&attr);
605
# endif
606
607
9.94M
    return lock;
608
9.94M
}
609
610
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
611
1.09G
{
612
1.09G
# ifdef USE_RWLOCK
613
1.09G
    if (pthread_rwlock_rdlock(lock) != 0)
614
0
        return 0;
615
# else
616
    if (pthread_mutex_lock(lock) != 0) {
617
        assert(errno != EDEADLK && errno != EBUSY);
618
        return 0;
619
    }
620
# endif
621
622
1.09G
    return 1;
623
1.09G
}
624
625
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
626
77.7M
{
627
77.7M
# ifdef USE_RWLOCK
628
77.7M
    if (pthread_rwlock_wrlock(lock) != 0)
629
0
        return 0;
630
# else
631
    if (pthread_mutex_lock(lock) != 0) {
632
        assert(errno != EDEADLK && errno != EBUSY);
633
        return 0;
634
    }
635
# endif
636
637
77.7M
    return 1;
638
77.7M
}
639
640
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
641
1.26G
{
642
1.26G
# ifdef USE_RWLOCK
643
1.26G
    if (pthread_rwlock_unlock(lock) != 0)
644
0
        return 0;
645
# else
646
    if (pthread_mutex_unlock(lock) != 0) {
647
        assert(errno != EPERM);
648
        return 0;
649
    }
650
# endif
651
652
1.26G
    return 1;
653
1.26G
}
654
655
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
656
9.94M
{
657
9.94M
    if (lock == NULL)
658
2.20k
        return;
659
660
9.94M
# ifdef USE_RWLOCK
661
9.94M
    pthread_rwlock_destroy(lock);
662
# else
663
    pthread_mutex_destroy(lock);
664
# endif
665
9.94M
    OPENSSL_free(lock);
666
667
9.94M
    return;
668
9.94M
}
669
670
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
671
2.92G
{
672
2.92G
    if (pthread_once(once, init) != 0)
673
0
        return 0;
674
675
2.92G
    return 1;
676
2.92G
}
677
678
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
679
1.89k
{
680
1.89k
    if (pthread_key_create(key, cleanup) != 0)
681
0
        return 0;
682
683
1.89k
    return 1;
684
1.89k
}
685
686
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
687
1.83G
{
688
1.83G
    return pthread_getspecific(*key);
689
1.83G
}
690
691
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
692
1.91k
{
693
1.91k
    if (pthread_setspecific(*key, val) != 0)
694
0
        return 0;
695
696
1.91k
    return 1;
697
1.91k
}
698
699
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
700
1.57k
{
701
1.57k
    if (pthread_key_delete(*key) != 0)
702
0
        return 0;
703
704
1.57k
    return 1;
705
1.57k
}
706
707
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
708
183k
{
709
183k
    return pthread_self();
710
183k
}
711
712
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
713
11.8k
{
714
11.8k
    return pthread_equal(a, b);
715
11.8k
}
716
717
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
718
10.9M
{
719
10.9M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
720
10.9M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
721
10.9M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
722
10.9M
        return 1;
723
10.9M
    }
724
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
725
    /* This will work for all future Solaris versions. */
726
    if (ret != NULL) {
727
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
728
        return 1;
729
    }
730
# endif
731
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
732
0
        return 0;
733
734
0
    *val += amount;
735
0
    *ret  = *val;
736
737
0
    if (!CRYPTO_THREAD_unlock(lock))
738
0
        return 0;
739
740
0
    return 1;
741
0
}
742
743
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
744
                     CRYPTO_RWLOCK *lock)
745
644
{
746
644
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
747
644
    if (__atomic_is_lock_free(sizeof(*val), val)) {
748
644
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
749
644
        return 1;
750
644
    }
751
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
752
    /* This will work for all future Solaris versions. */
753
    if (ret != NULL) {
754
        *ret = atomic_or_64_nv(val, op);
755
        return 1;
756
    }
757
# endif
758
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
759
0
        return 0;
760
0
    *val |= op;
761
0
    *ret  = *val;
762
763
0
    if (!CRYPTO_THREAD_unlock(lock))
764
0
        return 0;
765
766
0
    return 1;
767
0
}
768
769
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
770
2.43G
{
771
2.43G
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
772
2.43G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
773
2.43G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
774
2.43G
        return 1;
775
2.43G
    }
776
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
777
    /* This will work for all future Solaris versions. */
778
    if (ret != NULL) {
779
        *ret = atomic_or_64_nv(val, 0);
780
        return 1;
781
    }
782
# endif
783
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
784
0
        return 0;
785
0
    *ret  = *val;
786
0
    if (!CRYPTO_THREAD_unlock(lock))
787
0
        return 0;
788
789
0
    return 1;
790
0
}
791
792
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
793
0
{
794
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
795
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
796
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
797
0
        return 1;
798
0
    }
799
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
800
    /* This will work for all future Solaris versions. */
801
    if (ret != NULL) {
802
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
803
        return 1;
804
    }
805
# endif
806
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
807
0
        return 0;
808
0
    *ret  = *val;
809
0
    if (!CRYPTO_THREAD_unlock(lock))
810
0
        return 0;
811
812
0
    return 1;
813
0
}
814
815
# ifndef FIPS_MODULE
816
int openssl_init_fork_handlers(void)
817
0
{
818
0
    return 1;
819
0
}
820
# endif /* FIPS_MODULE */
821
822
int openssl_get_fork_id(void)
823
74.8k
{
824
74.8k
    return getpid();
825
74.8k
}
826
#endif