Coverage Report

Created: 2025-08-28 07:07

/src/openssl35/crypto/threads_pthread.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
# if __has_feature(thread_sanitizer)
21
#  define __SANITIZE_THREAD__
22
# endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
# include <sanitizer/tsan_interface.h>
27
# define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28
__tsan_mutex_post_unlock((x), 0)
29
30
# define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31
__tsan_mutex_post_lock((x), 0, 0)
32
#else
33
# define TSAN_FAKE_UNLOCK(x)
34
# define TSAN_FAKE_LOCK(x)
35
#endif
36
37
#if defined(__sun)
38
# include <atomic.h>
39
#endif
40
41
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42
/*
43
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46
 * All of this makes impossible to use __atomic_is_lock_free here.
47
 *
48
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49
 */
50
# define BROKEN_CLANG_ATOMICS
51
#endif
52
53
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54
55
# if defined(OPENSSL_SYS_UNIX)
56
#  include <sys/types.h>
57
#  include <unistd.h>
58
# endif
59
60
# include <assert.h>
61
62
/*
63
 * The Non-Stop KLT thread model currently seems broken in its rwlock
64
 * implementation
65
 */
66
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)
67
#  define USE_RWLOCK
68
# endif
69
70
/*
71
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
72
 * other compilers.
73
74
 * Unfortunately, we can't do that with some "generic type", because there's no
75
 * guarantee that the chosen generic type is large enough to cover all cases.
76
 * Therefore, we implement fallbacks for each applicable type, with composed
77
 * names that include the type they handle.
78
 *
79
 * (an anecdote: we previously tried to use |void *| as the generic type, with
80
 * the thought that the pointer itself is the largest type.  However, this is
81
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
82
 *
83
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
84
 * they can map to the correct fallback function.  In the GNU/clang case, that
85
 * parameter is simply ignored.
86
 */
87
88
/*
89
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
90
 * fallback function names.
91
 */
92
typedef void *pvoid;
93
94
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
95
    && !defined(USE_ATOMIC_FALLBACKS)
96
52.5M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
97
921
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
98
28.7k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
99
978
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
100
57
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
101
# else
102
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
103
104
#  define IMPL_fallback_atomic_load_n(t)                        \
105
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
106
    {                                                           \
107
        t ret;                                                  \
108
                                                                \
109
        pthread_mutex_lock(&atomic_sim_lock);                   \
110
        ret = *p;                                               \
111
        pthread_mutex_unlock(&atomic_sim_lock);                 \
112
        return ret;                                             \
113
    }
114
IMPL_fallback_atomic_load_n(uint32_t)
115
IMPL_fallback_atomic_load_n(uint64_t)
116
IMPL_fallback_atomic_load_n(pvoid)
117
118
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
119
120
#  define IMPL_fallback_atomic_store_n(t)                       \
121
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
122
    {                                                           \
123
        t ret;                                                  \
124
                                                                \
125
        pthread_mutex_lock(&atomic_sim_lock);                   \
126
        ret = *p;                                               \
127
        *p = v;                                                 \
128
        pthread_mutex_unlock(&atomic_sim_lock);                 \
129
        return ret;                                             \
130
    }
131
IMPL_fallback_atomic_store_n(uint32_t)
132
133
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
134
135
#  define IMPL_fallback_atomic_store(t)                         \
136
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
137
    {                                                           \
138
        pthread_mutex_lock(&atomic_sim_lock);                   \
139
        *p = *v;                                                \
140
        pthread_mutex_unlock(&atomic_sim_lock);                 \
141
    }
142
IMPL_fallback_atomic_store(pvoid)
143
144
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
145
146
/*
147
 * The fallbacks that follow don't need any per type implementation, as
148
 * they are designed for uint64_t only.  If there comes a time when multiple
149
 * types need to be covered, it's relatively easy to refactor them the same
150
 * way as the fallbacks above.
151
 */
152
153
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
154
{
155
    uint64_t ret;
156
157
    pthread_mutex_lock(&atomic_sim_lock);
158
    *p += v;
159
    ret = *p;
160
    pthread_mutex_unlock(&atomic_sim_lock);
161
    return ret;
162
}
163
164
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
165
166
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
167
{
168
    uint64_t ret;
169
170
    pthread_mutex_lock(&atomic_sim_lock);
171
    *p -= v;
172
    ret = *p;
173
    pthread_mutex_unlock(&atomic_sim_lock);
174
    return ret;
175
}
176
177
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
178
# endif
179
180
/*
181
 * This is the core of an rcu lock. It tracks the readers and writers for the
182
 * current quiescence point for a given lock. Users is the 64 bit value that
183
 * stores the READERS/ID as defined above
184
 *
185
 */
186
struct rcu_qp {
187
    uint64_t users;
188
};
189
190
struct thread_qp {
191
    struct rcu_qp *qp;
192
    unsigned int depth;
193
    CRYPTO_RCU_LOCK *lock;
194
};
195
196
486
# define MAX_QPS 10
197
/*
198
 * This is the per thread tracking data
199
 * that is assigned to each thread participating
200
 * in an rcu qp
201
 *
202
 * qp points to the qp that it last acquired
203
 *
204
 */
205
struct rcu_thr_data {
206
    struct thread_qp thread_qps[MAX_QPS];
207
};
208
209
/*
210
 * This is the internal version of a CRYPTO_RCU_LOCK
211
 * it is cast from CRYPTO_RCU_LOCK
212
 */
213
struct rcu_lock_st {
214
    /* Callbacks to call for next ossl_synchronize_rcu */
215
    struct rcu_cb_item *cb_items;
216
217
    /* The context we are being created against */
218
    OSSL_LIB_CTX *ctx;
219
220
    /* Array of quiescent points for synchronization */
221
    struct rcu_qp *qp_group;
222
223
    /* rcu generation counter for in-order retirement */
224
    uint32_t id_ctr;
225
226
    /* Number of elements in qp_group array */
227
    uint32_t group_count;
228
229
    /* Index of the current qp in the qp_group array */
230
    uint32_t reader_idx;
231
232
    /* value of the next id_ctr value to be retired */
233
    uint32_t next_to_retire;
234
235
    /* index of the next free rcu_qp in the qp_group */
236
    uint32_t current_alloc_idx;
237
238
    /* number of qp's in qp_group array currently being retired */
239
    uint32_t writers_alloced;
240
241
    /* lock protecting write side operations */
242
    pthread_mutex_t write_lock;
243
244
    /* lock protecting updates to writers_alloced/current_alloc_idx */
245
    pthread_mutex_t alloc_lock;
246
247
    /* signal to wake threads waiting on alloc_lock */
248
    pthread_cond_t alloc_signal;
249
250
    /* lock to enforce in-order retirement */
251
    pthread_mutex_t prior_lock;
252
253
    /* signal to wake threads waiting on prior_lock */
254
    pthread_cond_t prior_signal;
255
};
256
257
/* Read side acquisition of the current qp */
258
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
259
57
{
260
57
    uint32_t qp_idx;
261
262
    /* get the current qp index */
263
57
    for (;;) {
264
57
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
265
266
        /*
267
         * Notes on use of __ATOMIC_ACQUIRE
268
         * We need to ensure the following:
269
         * 1) That subsequent operations aren't optimized by hoisting them above
270
         * this operation.  Specifically, we don't want the below re-load of
271
         * qp_idx to get optimized away
272
         * 2) We want to ensure that any updating of reader_idx on the write side
273
         * of the lock is flushed from a local cpu cache so that we see any
274
         * updates prior to the load.  This is a non-issue on cache coherent
275
         * systems like x86, but is relevant on other arches
276
         */
277
57
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
278
57
                         __ATOMIC_ACQUIRE);
279
280
        /* if the idx hasn't changed, we're good, else try again */
281
57
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
282
57
                                    __ATOMIC_RELAXED))
283
57
            break;
284
285
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
286
0
                         __ATOMIC_RELAXED);
287
0
    }
288
289
57
    return &lock->qp_group[qp_idx];
290
57
}
291
292
static void ossl_rcu_free_local_data(void *arg)
293
3
{
294
3
    OSSL_LIB_CTX *ctx = arg;
295
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
296
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
297
298
3
    OPENSSL_free(data);
299
3
    CRYPTO_THREAD_set_local(lkey, NULL);
300
3
}
301
302
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
303
39
{
304
39
    struct rcu_thr_data *data;
305
39
    int i, available_qp = -1;
306
39
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
307
308
    /*
309
     * we're going to access current_qp here so ask the
310
     * processor to fetch it
311
     */
312
39
    data = CRYPTO_THREAD_get_local(lkey);
313
314
39
    if (data == NULL) {
315
2
        data = OPENSSL_zalloc(sizeof(*data));
316
2
        OPENSSL_assert(data != NULL);
317
2
        CRYPTO_THREAD_set_local(lkey, data);
318
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
319
2
    }
320
321
429
    for (i = 0; i < MAX_QPS; i++) {
322
390
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
323
39
            available_qp = i;
324
        /* If we have a hold on this lock already, we're good */
325
390
        if (data->thread_qps[i].lock == lock) {
326
0
            data->thread_qps[i].depth++;
327
0
            return;
328
0
        }
329
390
    }
330
331
    /*
332
     * if we get here, then we don't have a hold on this lock yet
333
     */
334
39
    assert(available_qp != -1);
335
336
39
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
337
39
    data->thread_qps[available_qp].depth = 1;
338
39
    data->thread_qps[available_qp].lock = lock;
339
39
}
340
341
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
342
57
{
343
57
    int i;
344
57
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
345
57
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
346
57
    uint64_t ret;
347
348
57
    assert(data != NULL);
349
350
57
    for (i = 0; i < MAX_QPS; i++) {
351
57
        if (data->thread_qps[i].lock == lock) {
352
            /*
353
             * we have to use __ATOMIC_RELEASE here
354
             * to ensure that all preceding read instructions complete
355
             * before the decrement is visible to ossl_synchronize_rcu
356
             */
357
57
            data->thread_qps[i].depth--;
358
57
            if (data->thread_qps[i].depth == 0) {
359
57
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
360
57
                                       (uint64_t)1, __ATOMIC_RELEASE);
361
57
                OPENSSL_assert(ret != UINT64_MAX);
362
57
                data->thread_qps[i].qp = NULL;
363
57
                data->thread_qps[i].lock = NULL;
364
57
            }
365
57
            return;
366
57
        }
367
57
    }
368
    /*
369
     * If we get here, we're trying to unlock a lock that we never acquired -
370
     * that's fatal.
371
     */
372
0
    assert(0);
373
0
}
374
375
/*
376
 * Write side allocation routine to get the current qp
377
 * and replace it with a new one
378
 */
379
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
380
921
{
381
921
    uint32_t current_idx;
382
383
921
    pthread_mutex_lock(&lock->alloc_lock);
384
385
    /*
386
     * we need at least one qp to be available with one
387
     * left over, so that readers can start working on
388
     * one that isn't yet being waited on
389
     */
390
921
    while (lock->group_count - lock->writers_alloced < 2)
391
        /* we have to wait for one to be free */
392
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
393
394
921
    current_idx = lock->current_alloc_idx;
395
396
    /* Allocate the qp */
397
921
    lock->writers_alloced++;
398
399
    /* increment the allocation index */
400
921
    lock->current_alloc_idx =
401
921
        (lock->current_alloc_idx + 1) % lock->group_count;
402
403
921
    *curr_id = lock->id_ctr;
404
921
    lock->id_ctr++;
405
406
921
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
407
921
                   __ATOMIC_RELAXED);
408
409
    /*
410
     * this should make sure that the new value of reader_idx is visible in
411
     * get_hold_current_qp, directly after incrementing the users count
412
     */
413
921
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
414
921
                     __ATOMIC_RELEASE);
415
416
    /* wake up any waiters */
417
921
    pthread_cond_signal(&lock->alloc_signal);
418
921
    pthread_mutex_unlock(&lock->alloc_lock);
419
921
    return &lock->qp_group[current_idx];
420
921
}
421
422
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
423
921
{
424
921
    pthread_mutex_lock(&lock->alloc_lock);
425
921
    lock->writers_alloced--;
426
921
    pthread_cond_signal(&lock->alloc_signal);
427
921
    pthread_mutex_unlock(&lock->alloc_lock);
428
921
}
429
430
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
431
                                            uint32_t count)
432
438
{
433
438
    struct rcu_qp *new =
434
438
        OPENSSL_zalloc(sizeof(*new) * count);
435
436
438
    lock->group_count = count;
437
438
    return new;
438
438
}
439
440
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
441
670
{
442
670
    pthread_mutex_lock(&lock->write_lock);
443
670
    TSAN_FAKE_UNLOCK(&lock->write_lock);
444
670
}
445
446
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
447
670
{
448
670
    TSAN_FAKE_LOCK(&lock->write_lock);
449
670
    pthread_mutex_unlock(&lock->write_lock);
450
670
}
451
452
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
453
921
{
454
921
    struct rcu_qp *qp;
455
921
    uint64_t count;
456
921
    uint32_t curr_id;
457
921
    struct rcu_cb_item *cb_items, *tmpcb;
458
459
921
    pthread_mutex_lock(&lock->write_lock);
460
921
    cb_items = lock->cb_items;
461
921
    lock->cb_items = NULL;
462
921
    pthread_mutex_unlock(&lock->write_lock);
463
464
921
    qp = update_qp(lock, &curr_id);
465
466
    /* retire in order */
467
921
    pthread_mutex_lock(&lock->prior_lock);
468
921
    while (lock->next_to_retire != curr_id)
469
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
470
471
    /*
472
     * wait for the reader count to reach zero
473
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
474
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
475
     * is visible prior to our read
476
     * however this is likely just necessary to silence a tsan warning
477
     * because the read side should not do any write operation
478
     * outside the atomic itself
479
     */
480
921
    do {
481
921
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
482
921
    } while (count != (uint64_t)0);
483
484
921
    lock->next_to_retire++;
485
921
    pthread_cond_broadcast(&lock->prior_signal);
486
921
    pthread_mutex_unlock(&lock->prior_lock);
487
488
921
    retire_qp(lock, qp);
489
490
    /* handle any callbacks that we have */
491
1.10k
    while (cb_items != NULL) {
492
187
        tmpcb = cb_items;
493
187
        cb_items = cb_items->next;
494
187
        tmpcb->fn(tmpcb->data);
495
187
        OPENSSL_free(tmpcb);
496
187
    }
497
921
}
498
499
/*
500
 * Note: This call assumes its made under the protection of
501
 * ossl_rcu_write_lock
502
 */
503
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
504
187
{
505
187
    struct rcu_cb_item *new =
506
187
        OPENSSL_zalloc(sizeof(*new));
507
508
187
    if (new == NULL)
509
0
        return 0;
510
511
187
    new->data = data;
512
187
    new->fn = cb;
513
514
187
    new->next = lock->cb_items;
515
187
    lock->cb_items = new;
516
517
187
    return 1;
518
187
}
519
520
void *ossl_rcu_uptr_deref(void **p)
521
52.5M
{
522
52.5M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
523
52.5M
}
524
525
void ossl_rcu_assign_uptr(void **p, void **v)
526
28.7k
{
527
28.7k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
528
28.7k
}
529
530
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
531
438
{
532
438
    struct rcu_lock_st *new;
533
534
    /*
535
     * We need a minimum of 2 qp's
536
     */
537
438
    if (num_writers < 2)
538
438
        num_writers = 2;
539
540
438
    ctx = ossl_lib_ctx_get_concrete(ctx);
541
438
    if (ctx == NULL)
542
0
        return 0;
543
544
438
    new = OPENSSL_zalloc(sizeof(*new));
545
438
    if (new == NULL)
546
0
        return NULL;
547
548
438
    new->ctx = ctx;
549
438
    pthread_mutex_init(&new->write_lock, NULL);
550
438
    pthread_mutex_init(&new->prior_lock, NULL);
551
438
    pthread_mutex_init(&new->alloc_lock, NULL);
552
438
    pthread_cond_init(&new->prior_signal, NULL);
553
438
    pthread_cond_init(&new->alloc_signal, NULL);
554
555
438
    new->qp_group = allocate_new_qp_group(new, num_writers);
556
438
    if (new->qp_group == NULL) {
557
0
        OPENSSL_free(new);
558
0
        new = NULL;
559
0
    }
560
561
438
    return new;
562
438
}
563
564
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
565
350
{
566
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
567
568
350
    if (lock == NULL)
569
0
        return;
570
571
    /* make sure we're synchronized */
572
350
    ossl_synchronize_rcu(rlock);
573
574
350
    OPENSSL_free(rlock->qp_group);
575
    /* There should only be a single qp left now */
576
350
    OPENSSL_free(rlock);
577
350
}
578
579
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
580
9.48M
{
581
9.48M
# ifdef USE_RWLOCK
582
9.48M
    CRYPTO_RWLOCK *lock;
583
584
9.48M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
585
        /* Don't set error, to avoid recursion blowup. */
586
0
        return NULL;
587
588
9.48M
    if (pthread_rwlock_init(lock, NULL) != 0) {
589
0
        OPENSSL_free(lock);
590
0
        return NULL;
591
0
    }
592
# else
593
    pthread_mutexattr_t attr;
594
    CRYPTO_RWLOCK *lock;
595
596
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
597
        /* Don't set error, to avoid recursion blowup. */
598
        return NULL;
599
600
    /*
601
     * We don't use recursive mutexes, but try to catch errors if we do.
602
     */
603
    pthread_mutexattr_init(&attr);
604
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
605
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
606
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
607
#   endif
608
#  else
609
    /* The SPT Thread Library does not define MUTEX attributes. */
610
#  endif
611
612
    if (pthread_mutex_init(lock, &attr) != 0) {
613
        pthread_mutexattr_destroy(&attr);
614
        OPENSSL_free(lock);
615
        return NULL;
616
    }
617
618
    pthread_mutexattr_destroy(&attr);
619
# endif
620
621
9.48M
    return lock;
622
9.48M
}
623
624
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
625
54.7M
{
626
54.7M
# ifdef USE_RWLOCK
627
54.7M
    if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
628
0
        return 0;
629
# else
630
    if (pthread_mutex_lock(lock) != 0) {
631
        assert(errno != EDEADLK && errno != EBUSY);
632
        return 0;
633
    }
634
# endif
635
636
54.7M
    return 1;
637
54.7M
}
638
639
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
640
31.0M
{
641
31.0M
# ifdef USE_RWLOCK
642
31.0M
    if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
643
0
        return 0;
644
# else
645
    if (pthread_mutex_lock(lock) != 0) {
646
        assert(errno != EDEADLK && errno != EBUSY);
647
        return 0;
648
    }
649
# endif
650
651
31.0M
    return 1;
652
31.0M
}
653
654
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
655
1.23G
{
656
1.23G
# ifdef USE_RWLOCK
657
1.23G
    if (pthread_rwlock_unlock(lock) != 0)
658
0
        return 0;
659
# else
660
    if (pthread_mutex_unlock(lock) != 0) {
661
        assert(errno != EPERM);
662
        return 0;
663
    }
664
# endif
665
666
1.23G
    return 1;
667
1.23G
}
668
669
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
670
9.47M
{
671
9.47M
    if (lock == NULL)
672
2.23k
        return;
673
674
9.47M
# ifdef USE_RWLOCK
675
9.47M
    pthread_rwlock_destroy(lock);
676
# else
677
    pthread_mutex_destroy(lock);
678
# endif
679
9.47M
    OPENSSL_free(lock);
680
681
9.47M
    return;
682
9.47M
}
683
684
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
685
2.64G
{
686
2.64G
    if (pthread_once(once, init) != 0)
687
0
        return 0;
688
689
2.64G
    return 1;
690
2.64G
}
691
692
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
693
1.89k
{
694
1.89k
    if (pthread_key_create(key, cleanup) != 0)
695
0
        return 0;
696
697
1.89k
    return 1;
698
1.89k
}
699
700
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
701
1.78G
{
702
1.78G
    return pthread_getspecific(*key);
703
1.78G
}
704
705
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
706
1.85k
{
707
1.85k
    if (pthread_setspecific(*key, val) != 0)
708
0
        return 0;
709
710
1.85k
    return 1;
711
1.85k
}
712
713
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
714
1.57k
{
715
1.57k
    if (pthread_key_delete(*key) != 0)
716
0
        return 0;
717
718
1.57k
    return 1;
719
1.57k
}
720
721
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
722
176k
{
723
176k
    return pthread_self();
724
176k
}
725
726
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
727
11.9k
{
728
11.9k
    return pthread_equal(a, b);
729
11.9k
}
730
731
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
732
10.5M
{
733
10.5M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
734
10.5M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
735
10.5M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
736
10.5M
        return 1;
737
10.5M
    }
738
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
739
    /* This will work for all future Solaris versions. */
740
    if (ret != NULL) {
741
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
742
        return 1;
743
    }
744
# endif
745
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
746
0
        return 0;
747
748
0
    *val += amount;
749
0
    *ret  = *val;
750
751
0
    if (!CRYPTO_THREAD_unlock(lock))
752
0
        return 0;
753
754
0
    return 1;
755
0
}
756
757
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
758
                        CRYPTO_RWLOCK *lock)
759
0
{
760
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
761
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
762
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
763
0
        return 1;
764
0
    }
765
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
766
    /* This will work for all future Solaris versions. */
767
    if (ret != NULL) {
768
        *ret = atomic_add_64_nv(val, op);
769
        return 1;
770
    }
771
# endif
772
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
773
0
        return 0;
774
0
    *val += op;
775
0
    *ret  = *val;
776
777
0
    if (!CRYPTO_THREAD_unlock(lock))
778
0
        return 0;
779
780
0
    return 1;
781
0
}
782
783
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
784
                      CRYPTO_RWLOCK *lock)
785
0
{
786
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
787
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
788
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
789
0
        return 1;
790
0
    }
791
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
792
    /* This will work for all future Solaris versions. */
793
    if (ret != NULL) {
794
        *ret = atomic_and_64_nv(val, op);
795
        return 1;
796
    }
797
# endif
798
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
799
0
        return 0;
800
0
    *val &= op;
801
0
    *ret  = *val;
802
803
0
    if (!CRYPTO_THREAD_unlock(lock))
804
0
        return 0;
805
806
0
    return 1;
807
0
}
808
809
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
810
                     CRYPTO_RWLOCK *lock)
811
644
{
812
644
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
813
644
    if (__atomic_is_lock_free(sizeof(*val), val)) {
814
644
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
815
644
        return 1;
816
644
    }
817
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
818
    /* This will work for all future Solaris versions. */
819
    if (ret != NULL) {
820
        *ret = atomic_or_64_nv(val, op);
821
        return 1;
822
    }
823
# endif
824
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
825
0
        return 0;
826
0
    *val |= op;
827
0
    *ret  = *val;
828
829
0
    if (!CRYPTO_THREAD_unlock(lock))
830
0
        return 0;
831
832
0
    return 1;
833
0
}
834
835
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
836
2.55G
{
837
2.55G
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
838
2.55G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
839
2.55G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
840
2.55G
        return 1;
841
2.55G
    }
842
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
843
    /* This will work for all future Solaris versions. */
844
    if (ret != NULL) {
845
        *ret = atomic_or_64_nv(val, 0);
846
        return 1;
847
    }
848
# endif
849
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
850
0
        return 0;
851
0
    *ret  = *val;
852
0
    if (!CRYPTO_THREAD_unlock(lock))
853
0
        return 0;
854
855
0
    return 1;
856
0
}
857
858
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
859
28.2k
{
860
28.2k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
861
28.2k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
862
28.2k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
863
28.2k
        return 1;
864
28.2k
    }
865
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
866
    /* This will work for all future Solaris versions. */
867
    if (dst != NULL) {
868
        atomic_swap_64(dst, val);
869
        return 1;
870
    }
871
# endif
872
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
873
0
        return 0;
874
0
    *dst  = val;
875
0
    if (!CRYPTO_THREAD_unlock(lock))
876
0
        return 0;
877
878
0
    return 1;
879
0
}
880
881
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
882
0
{
883
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
884
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
885
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
886
0
        return 1;
887
0
    }
888
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
889
    /* This will work for all future Solaris versions. */
890
    if (ret != NULL) {
891
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
892
        return 1;
893
    }
894
# endif
895
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
896
0
        return 0;
897
0
    *ret  = *val;
898
0
    if (!CRYPTO_THREAD_unlock(lock))
899
0
        return 0;
900
901
0
    return 1;
902
0
}
903
904
# ifndef FIPS_MODULE
905
int openssl_init_fork_handlers(void)
906
0
{
907
0
    return 1;
908
0
}
909
# endif /* FIPS_MODULE */
910
911
int openssl_get_fork_id(void)
912
80.4k
{
913
80.4k
    return getpid();
914
80.4k
}
915
#endif