Coverage Report

Created: 2025-08-28 07:07

/src/openssl33/crypto/threads_pthread.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__sun)
20
# include <atomic.h>
21
#endif
22
23
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24
/*
25
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28
 * All of this makes impossible to use __atomic_is_lock_free here.
29
 *
30
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31
 */
32
# define BROKEN_CLANG_ATOMICS
33
#endif
34
35
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
37
# if defined(OPENSSL_SYS_UNIX)
38
#  include <sys/types.h>
39
#  include <unistd.h>
40
# endif
41
42
# include <assert.h>
43
44
# ifdef PTHREAD_RWLOCK_INITIALIZER
45
#  define USE_RWLOCK
46
# endif
47
48
/*
49
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
50
 * other compilers.
51
52
 * Unfortunately, we can't do that with some "generic type", because there's no
53
 * guarantee that the chosen generic type is large enough to cover all cases.
54
 * Therefore, we implement fallbacks for each applicable type, with composed
55
 * names that include the type they handle.
56
 *
57
 * (an anecdote: we previously tried to use |void *| as the generic type, with
58
 * the thought that the pointer itself is the largest type.  However, this is
59
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
60
 *
61
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
62
 * they can map to the correct fallback function.  In the GNU/clang case, that
63
 * parameter is simply ignored.
64
 */
65
66
/*
67
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
68
 * fallback function names.
69
 */
70
typedef void *pvoid;
71
72
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
73
    && !defined(USE_ATOMIC_FALLBACKS)
74
52.5M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
75
921
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
76
28.7k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
77
978
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
78
57
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
79
# else
80
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
81
82
#  define IMPL_fallback_atomic_load_n(t)                        \
83
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
84
    {                                                           \
85
        t ret;                                                  \
86
                                                                \
87
        pthread_mutex_lock(&atomic_sim_lock);                   \
88
        ret = *p;                                               \
89
        pthread_mutex_unlock(&atomic_sim_lock);                 \
90
        return ret;                                             \
91
    }
92
IMPL_fallback_atomic_load_n(uint32_t)
93
IMPL_fallback_atomic_load_n(uint64_t)
94
IMPL_fallback_atomic_load_n(pvoid)
95
96
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
97
98
#  define IMPL_fallback_atomic_store_n(t)                       \
99
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
100
    {                                                           \
101
        t ret;                                                  \
102
                                                                \
103
        pthread_mutex_lock(&atomic_sim_lock);                   \
104
        ret = *p;                                               \
105
        *p = v;                                                 \
106
        pthread_mutex_unlock(&atomic_sim_lock);                 \
107
        return ret;                                             \
108
    }
109
IMPL_fallback_atomic_store_n(uint32_t)
110
111
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
112
113
#  define IMPL_fallback_atomic_store(t)                         \
114
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
115
    {                                                           \
116
        pthread_mutex_lock(&atomic_sim_lock);                   \
117
        *p = *v;                                                \
118
        pthread_mutex_unlock(&atomic_sim_lock);                 \
119
    }
120
IMPL_fallback_atomic_store(pvoid)
121
122
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
123
124
/*
125
 * The fallbacks that follow don't need any per type implementation, as
126
 * they are designed for uint64_t only.  If there comes a time when multiple
127
 * types need to be covered, it's relatively easy to refactor them the same
128
 * way as the fallbacks above.
129
 */
130
131
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
132
{
133
    uint64_t ret;
134
135
    pthread_mutex_lock(&atomic_sim_lock);
136
    *p += v;
137
    ret = *p;
138
    pthread_mutex_unlock(&atomic_sim_lock);
139
    return ret;
140
}
141
142
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
143
144
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
145
{
146
    uint64_t ret;
147
148
    pthread_mutex_lock(&atomic_sim_lock);
149
    *p -= v;
150
    ret = *p;
151
    pthread_mutex_unlock(&atomic_sim_lock);
152
    return ret;
153
}
154
155
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
156
# endif
157
158
/*
159
 * This is the core of an rcu lock. It tracks the readers and writers for the
160
 * current quiescence point for a given lock. Users is the 64 bit value that
161
 * stores the READERS/ID as defined above
162
 *
163
 */
164
struct rcu_qp {
165
    uint64_t users;
166
};
167
168
struct thread_qp {
169
    struct rcu_qp *qp;
170
    unsigned int depth;
171
    CRYPTO_RCU_LOCK *lock;
172
};
173
174
486
# define MAX_QPS 10
175
/*
176
 * This is the per thread tracking data
177
 * that is assigned to each thread participating
178
 * in an rcu qp
179
 *
180
 * qp points to the qp that it last acquired
181
 *
182
 */
183
struct rcu_thr_data {
184
    struct thread_qp thread_qps[MAX_QPS];
185
};
186
187
/*
188
 * This is the internal version of a CRYPTO_RCU_LOCK
189
 * it is cast from CRYPTO_RCU_LOCK
190
 */
191
struct rcu_lock_st {
192
    /* Callbacks to call for next ossl_synchronize_rcu */
193
    struct rcu_cb_item *cb_items;
194
195
    /* The context we are being created against */
196
    OSSL_LIB_CTX *ctx;
197
198
    /* Array of quiescent points for synchronization */
199
    struct rcu_qp *qp_group;
200
201
    /* rcu generation counter for in-order retirement */
202
    uint32_t id_ctr;
203
204
    /* Number of elements in qp_group array */
205
    uint32_t group_count;
206
207
    /* Index of the current qp in the qp_group array */
208
    uint32_t reader_idx;
209
210
    /* value of the next id_ctr value to be retired */
211
    uint32_t next_to_retire;
212
213
    /* index of the next free rcu_qp in the qp_group */
214
    uint32_t current_alloc_idx;
215
216
    /* number of qp's in qp_group array currently being retired */
217
    uint32_t writers_alloced;
218
219
    /* lock protecting write side operations */
220
    pthread_mutex_t write_lock;
221
222
    /* lock protecting updates to writers_alloced/current_alloc_idx */
223
    pthread_mutex_t alloc_lock;
224
225
    /* signal to wake threads waiting on alloc_lock */
226
    pthread_cond_t alloc_signal;
227
228
    /* lock to enforce in-order retirement */
229
    pthread_mutex_t prior_lock;
230
231
    /* signal to wake threads waiting on prior_lock */
232
    pthread_cond_t prior_signal;
233
};
234
235
/* Read side acquisition of the current qp */
236
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
237
57
{
238
57
    uint32_t qp_idx;
239
240
    /* get the current qp index */
241
57
    for (;;) {
242
57
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
243
244
        /*
245
         * Notes on use of __ATOMIC_ACQUIRE
246
         * We need to ensure the following:
247
         * 1) That subsequent operations aren't optimized by hoisting them above
248
         * this operation.  Specifically, we don't want the below re-load of
249
         * qp_idx to get optimized away
250
         * 2) We want to ensure that any updating of reader_idx on the write side
251
         * of the lock is flushed from a local cpu cache so that we see any
252
         * updates prior to the load.  This is a non-issue on cache coherent
253
         * systems like x86, but is relevant on other arches
254
         */
255
57
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
256
57
                         __ATOMIC_ACQUIRE);
257
258
        /* if the idx hasn't changed, we're good, else try again */
259
57
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
260
57
                                    __ATOMIC_RELAXED))
261
57
            break;
262
263
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
264
0
                         __ATOMIC_RELAXED);
265
0
    }
266
267
57
    return &lock->qp_group[qp_idx];
268
57
}
269
270
static void ossl_rcu_free_local_data(void *arg)
271
3
{
272
3
    OSSL_LIB_CTX *ctx = arg;
273
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
274
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
275
276
3
    OPENSSL_free(data);
277
3
    CRYPTO_THREAD_set_local(lkey, NULL);
278
3
}
279
280
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
281
39
{
282
39
    struct rcu_thr_data *data;
283
39
    int i, available_qp = -1;
284
39
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
285
286
    /*
287
     * we're going to access current_qp here so ask the
288
     * processor to fetch it
289
     */
290
39
    data = CRYPTO_THREAD_get_local(lkey);
291
292
39
    if (data == NULL) {
293
2
        data = OPENSSL_zalloc(sizeof(*data));
294
2
        OPENSSL_assert(data != NULL);
295
2
        CRYPTO_THREAD_set_local(lkey, data);
296
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
297
2
    }
298
299
429
    for (i = 0; i < MAX_QPS; i++) {
300
390
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
301
39
            available_qp = i;
302
        /* If we have a hold on this lock already, we're good */
303
390
        if (data->thread_qps[i].lock == lock) {
304
0
            data->thread_qps[i].depth++;
305
0
            return;
306
0
        }
307
390
    }
308
309
    /*
310
     * if we get here, then we don't have a hold on this lock yet
311
     */
312
39
    assert(available_qp != -1);
313
314
39
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
315
39
    data->thread_qps[available_qp].depth = 1;
316
39
    data->thread_qps[available_qp].lock = lock;
317
39
}
318
319
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
320
57
{
321
57
    int i;
322
57
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
323
57
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
324
57
    uint64_t ret;
325
326
57
    assert(data != NULL);
327
328
57
    for (i = 0; i < MAX_QPS; i++) {
329
57
        if (data->thread_qps[i].lock == lock) {
330
            /*
331
             * we have to use __ATOMIC_RELEASE here
332
             * to ensure that all preceding read instructions complete
333
             * before the decrement is visible to ossl_synchronize_rcu
334
             */
335
57
            data->thread_qps[i].depth--;
336
57
            if (data->thread_qps[i].depth == 0) {
337
57
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
338
57
                                       (uint64_t)1, __ATOMIC_RELEASE);
339
57
                OPENSSL_assert(ret != UINT64_MAX);
340
57
                data->thread_qps[i].qp = NULL;
341
57
                data->thread_qps[i].lock = NULL;
342
57
            }
343
57
            return;
344
57
        }
345
57
    }
346
    /*
347
     * If we get here, we're trying to unlock a lock that we never acquired -
348
     * that's fatal.
349
     */
350
0
    assert(0);
351
0
}
352
353
/*
354
 * Write side allocation routine to get the current qp
355
 * and replace it with a new one
356
 */
357
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
358
921
{
359
921
    uint32_t current_idx;
360
361
921
    pthread_mutex_lock(&lock->alloc_lock);
362
363
    /*
364
     * we need at least one qp to be available with one
365
     * left over, so that readers can start working on
366
     * one that isn't yet being waited on
367
     */
368
921
    while (lock->group_count - lock->writers_alloced < 2)
369
        /* we have to wait for one to be free */
370
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
371
372
921
    current_idx = lock->current_alloc_idx;
373
374
    /* Allocate the qp */
375
921
    lock->writers_alloced++;
376
377
    /* increment the allocation index */
378
921
    lock->current_alloc_idx =
379
921
        (lock->current_alloc_idx + 1) % lock->group_count;
380
381
921
    *curr_id = lock->id_ctr;
382
921
    lock->id_ctr++;
383
384
921
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
385
921
                   __ATOMIC_RELAXED);
386
387
    /*
388
     * this should make sure that the new value of reader_idx is visible in
389
     * get_hold_current_qp, directly after incrementing the users count
390
     */
391
921
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
392
921
                     __ATOMIC_RELEASE);
393
394
    /* wake up any waiters */
395
921
    pthread_cond_signal(&lock->alloc_signal);
396
921
    pthread_mutex_unlock(&lock->alloc_lock);
397
921
    return &lock->qp_group[current_idx];
398
921
}
399
400
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
401
921
{
402
921
    pthread_mutex_lock(&lock->alloc_lock);
403
921
    lock->writers_alloced--;
404
921
    pthread_cond_signal(&lock->alloc_signal);
405
921
    pthread_mutex_unlock(&lock->alloc_lock);
406
921
}
407
408
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
409
                                            uint32_t count)
410
438
{
411
438
    struct rcu_qp *new =
412
438
        OPENSSL_zalloc(sizeof(*new) * count);
413
414
438
    lock->group_count = count;
415
438
    return new;
416
438
}
417
418
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
419
670
{
420
670
    pthread_mutex_lock(&lock->write_lock);
421
670
}
422
423
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
424
670
{
425
670
    pthread_mutex_unlock(&lock->write_lock);
426
670
}
427
428
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
429
921
{
430
921
    struct rcu_qp *qp;
431
921
    uint64_t count;
432
921
    uint32_t curr_id;
433
921
    struct rcu_cb_item *cb_items, *tmpcb;
434
435
921
    pthread_mutex_lock(&lock->write_lock);
436
921
    cb_items = lock->cb_items;
437
921
    lock->cb_items = NULL;
438
921
    pthread_mutex_unlock(&lock->write_lock);
439
440
921
    qp = update_qp(lock, &curr_id);
441
442
    /* retire in order */
443
921
    pthread_mutex_lock(&lock->prior_lock);
444
921
    while (lock->next_to_retire != curr_id)
445
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
446
447
    /*
448
     * wait for the reader count to reach zero
449
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
450
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
451
     * is visible prior to our read
452
     * however this is likely just necessary to silence a tsan warning
453
     * because the read side should not do any write operation
454
     * outside the atomic itself
455
     */
456
921
    do {
457
921
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
458
921
    } while (count != (uint64_t)0);
459
460
921
    lock->next_to_retire++;
461
921
    pthread_cond_broadcast(&lock->prior_signal);
462
921
    pthread_mutex_unlock(&lock->prior_lock);
463
464
921
    retire_qp(lock, qp);
465
466
    /* handle any callbacks that we have */
467
1.10k
    while (cb_items != NULL) {
468
187
        tmpcb = cb_items;
469
187
        cb_items = cb_items->next;
470
187
        tmpcb->fn(tmpcb->data);
471
187
        OPENSSL_free(tmpcb);
472
187
    }
473
921
}
474
475
/*
476
 * Note: This call assumes its made under the protection of
477
 * ossl_rcu_write_lock
478
 */
479
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
480
187
{
481
187
    struct rcu_cb_item *new =
482
187
        OPENSSL_zalloc(sizeof(*new));
483
484
187
    if (new == NULL)
485
0
        return 0;
486
487
187
    new->data = data;
488
187
    new->fn = cb;
489
490
187
    new->next = lock->cb_items;
491
187
    lock->cb_items = new;
492
493
187
    return 1;
494
187
}
495
496
void *ossl_rcu_uptr_deref(void **p)
497
52.5M
{
498
52.5M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
499
52.5M
}
500
501
void ossl_rcu_assign_uptr(void **p, void **v)
502
28.7k
{
503
28.7k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
504
28.7k
}
505
506
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
507
438
{
508
438
    struct rcu_lock_st *new;
509
510
    /*
511
     * We need a minimum of 2 qp's
512
     */
513
438
    if (num_writers < 2)
514
438
        num_writers = 2;
515
516
438
    ctx = ossl_lib_ctx_get_concrete(ctx);
517
438
    if (ctx == NULL)
518
0
        return 0;
519
520
438
    new = OPENSSL_zalloc(sizeof(*new));
521
438
    if (new == NULL)
522
0
        return NULL;
523
524
438
    new->ctx = ctx;
525
438
    pthread_mutex_init(&new->write_lock, NULL);
526
438
    pthread_mutex_init(&new->prior_lock, NULL);
527
438
    pthread_mutex_init(&new->alloc_lock, NULL);
528
438
    pthread_cond_init(&new->prior_signal, NULL);
529
438
    pthread_cond_init(&new->alloc_signal, NULL);
530
531
438
    new->qp_group = allocate_new_qp_group(new, num_writers);
532
438
    if (new->qp_group == NULL) {
533
0
        OPENSSL_free(new);
534
0
        new = NULL;
535
0
    }
536
537
438
    return new;
538
438
}
539
540
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
541
350
{
542
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
543
544
350
    if (lock == NULL)
545
0
        return;
546
547
    /* make sure we're synchronized */
548
350
    ossl_synchronize_rcu(rlock);
549
550
350
    OPENSSL_free(rlock->qp_group);
551
    /* There should only be a single qp left now */
552
350
    OPENSSL_free(rlock);
553
350
}
554
555
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
556
9.48M
{
557
9.48M
# ifdef USE_RWLOCK
558
9.48M
    CRYPTO_RWLOCK *lock;
559
560
9.48M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
561
        /* Don't set error, to avoid recursion blowup. */
562
0
        return NULL;
563
564
9.48M
    if (pthread_rwlock_init(lock, NULL) != 0) {
565
0
        OPENSSL_free(lock);
566
0
        return NULL;
567
0
    }
568
# else
569
    pthread_mutexattr_t attr;
570
    CRYPTO_RWLOCK *lock;
571
572
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
573
        /* Don't set error, to avoid recursion blowup. */
574
        return NULL;
575
576
    /*
577
     * We don't use recursive mutexes, but try to catch errors if we do.
578
     */
579
    pthread_mutexattr_init(&attr);
580
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
581
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
582
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
583
#   endif
584
#  else
585
    /* The SPT Thread Library does not define MUTEX attributes. */
586
#  endif
587
588
    if (pthread_mutex_init(lock, &attr) != 0) {
589
        pthread_mutexattr_destroy(&attr);
590
        OPENSSL_free(lock);
591
        return NULL;
592
    }
593
594
    pthread_mutexattr_destroy(&attr);
595
# endif
596
597
9.48M
    return lock;
598
9.48M
}
599
600
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
601
1.07G
{
602
1.07G
# ifdef USE_RWLOCK
603
1.07G
    if (pthread_rwlock_rdlock(lock) != 0)
604
0
        return 0;
605
# else
606
    if (pthread_mutex_lock(lock) != 0) {
607
        assert(errno != EDEADLK && errno != EBUSY);
608
        return 0;
609
    }
610
# endif
611
612
1.07G
    return 1;
613
1.07G
}
614
615
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
616
66.6M
{
617
66.6M
# ifdef USE_RWLOCK
618
66.6M
    if (pthread_rwlock_wrlock(lock) != 0)
619
0
        return 0;
620
# else
621
    if (pthread_mutex_lock(lock) != 0) {
622
        assert(errno != EDEADLK && errno != EBUSY);
623
        return 0;
624
    }
625
# endif
626
627
66.6M
    return 1;
628
66.6M
}
629
630
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
631
1.23G
{
632
1.23G
# ifdef USE_RWLOCK
633
1.23G
    if (pthread_rwlock_unlock(lock) != 0)
634
0
        return 0;
635
# else
636
    if (pthread_mutex_unlock(lock) != 0) {
637
        assert(errno != EPERM);
638
        return 0;
639
    }
640
# endif
641
642
1.23G
    return 1;
643
1.23G
}
644
645
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
646
9.47M
{
647
9.47M
    if (lock == NULL)
648
2.23k
        return;
649
650
9.47M
# ifdef USE_RWLOCK
651
9.47M
    pthread_rwlock_destroy(lock);
652
# else
653
    pthread_mutex_destroy(lock);
654
# endif
655
9.47M
    OPENSSL_free(lock);
656
657
9.47M
    return;
658
9.47M
}
659
660
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
661
2.64G
{
662
2.64G
    if (pthread_once(once, init) != 0)
663
0
        return 0;
664
665
2.64G
    return 1;
666
2.64G
}
667
668
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
669
1.89k
{
670
1.89k
    if (pthread_key_create(key, cleanup) != 0)
671
0
        return 0;
672
673
1.89k
    return 1;
674
1.89k
}
675
676
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
677
1.78G
{
678
1.78G
    return pthread_getspecific(*key);
679
1.78G
}
680
681
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
682
1.85k
{
683
1.85k
    if (pthread_setspecific(*key, val) != 0)
684
0
        return 0;
685
686
1.85k
    return 1;
687
1.85k
}
688
689
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
690
1.57k
{
691
1.57k
    if (pthread_key_delete(*key) != 0)
692
0
        return 0;
693
694
1.57k
    return 1;
695
1.57k
}
696
697
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
698
176k
{
699
176k
    return pthread_self();
700
176k
}
701
702
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
703
11.9k
{
704
11.9k
    return pthread_equal(a, b);
705
11.9k
}
706
707
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
708
10.5M
{
709
10.5M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
710
10.5M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
711
10.5M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
712
10.5M
        return 1;
713
10.5M
    }
714
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
715
    /* This will work for all future Solaris versions. */
716
    if (ret != NULL) {
717
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
718
        return 1;
719
    }
720
# endif
721
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
722
0
        return 0;
723
724
0
    *val += amount;
725
0
    *ret  = *val;
726
727
0
    if (!CRYPTO_THREAD_unlock(lock))
728
0
        return 0;
729
730
0
    return 1;
731
0
}
732
733
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
734
                     CRYPTO_RWLOCK *lock)
735
644
{
736
644
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
737
644
    if (__atomic_is_lock_free(sizeof(*val), val)) {
738
644
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
739
644
        return 1;
740
644
    }
741
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
742
    /* This will work for all future Solaris versions. */
743
    if (ret != NULL) {
744
        *ret = atomic_or_64_nv(val, op);
745
        return 1;
746
    }
747
# endif
748
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
749
0
        return 0;
750
0
    *val |= op;
751
0
    *ret  = *val;
752
753
0
    if (!CRYPTO_THREAD_unlock(lock))
754
0
        return 0;
755
756
0
    return 1;
757
0
}
758
759
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
760
2.55G
{
761
2.55G
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
762
2.55G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
763
2.55G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
764
2.55G
        return 1;
765
2.55G
    }
766
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
767
    /* This will work for all future Solaris versions. */
768
    if (ret != NULL) {
769
        *ret = atomic_or_64_nv(val, 0);
770
        return 1;
771
    }
772
# endif
773
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
774
0
        return 0;
775
0
    *ret  = *val;
776
0
    if (!CRYPTO_THREAD_unlock(lock))
777
0
        return 0;
778
779
0
    return 1;
780
0
}
781
782
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
783
0
{
784
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
785
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
786
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
787
0
        return 1;
788
0
    }
789
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
790
    /* This will work for all future Solaris versions. */
791
    if (ret != NULL) {
792
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
793
        return 1;
794
    }
795
# endif
796
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
797
0
        return 0;
798
0
    *ret  = *val;
799
0
    if (!CRYPTO_THREAD_unlock(lock))
800
0
        return 0;
801
802
0
    return 1;
803
0
}
804
805
# ifndef FIPS_MODULE
806
int openssl_init_fork_handlers(void)
807
0
{
808
0
    return 1;
809
0
}
810
# endif /* FIPS_MODULE */
811
812
int openssl_get_fork_id(void)
813
80.4k
{
814
80.4k
    return getpid();
815
80.4k
}
816
#endif