Coverage Report

Created: 2025-06-22 06:56

/src/openssl/crypto/threads_pthread.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include <crypto/sparse_array.h>
16
#include "internal/cryptlib.h"
17
#include "internal/threads_common.h"
18
#include "internal/rcu.h"
19
#include "rcu_internal.h"
20
21
#if defined(__clang__) && defined(__has_feature)
22
# if __has_feature(thread_sanitizer)
23
#  define __SANITIZE_THREAD__
24
# endif
25
#endif
26
27
#if defined(__SANITIZE_THREAD__)
28
# include <sanitizer/tsan_interface.h>
29
# define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
30
__tsan_mutex_post_unlock((x), 0)
31
32
# define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
33
__tsan_mutex_post_lock((x), 0, 0)
34
#else
35
# define TSAN_FAKE_UNLOCK(x)
36
# define TSAN_FAKE_LOCK(x)
37
#endif
38
39
#if defined(__sun)
40
# include <atomic.h>
41
#endif
42
43
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
44
/*
45
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
46
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
47
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
48
 * All of this makes impossible to use __atomic_is_lock_free here.
49
 *
50
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
51
 */
52
# define BROKEN_CLANG_ATOMICS
53
#endif
54
55
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
56
57
# if defined(OPENSSL_SYS_UNIX)
58
#  include <sys/types.h>
59
#  include <unistd.h>
60
# endif
61
62
# include <assert.h>
63
64
/*
65
 * The Non-Stop KLT thread model currently seems broken in its rwlock
66
 * implementation
67
 */
68
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)
69
#  define USE_RWLOCK
70
# endif
71
72
/*
73
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
74
 * other compilers.
75
76
 * Unfortunately, we can't do that with some "generic type", because there's no
77
 * guarantee that the chosen generic type is large enough to cover all cases.
78
 * Therefore, we implement fallbacks for each applicable type, with composed
79
 * names that include the type they handle.
80
 *
81
 * (an anecdote: we previously tried to use |void *| as the generic type, with
82
 * the thought that the pointer itself is the largest type.  However, this is
83
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
84
 *
85
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
86
 * they can map to the correct fallback function.  In the GNU/clang case, that
87
 * parameter is simply ignored.
88
 */
89
90
/*
91
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
92
 * fallback function names.
93
 */
94
typedef void *pvoid;
95
96
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97
    && !defined(USE_ATOMIC_FALLBACKS)
98
2.13M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99
15
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100
1.30k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101
15
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102
0
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
103
# else
104
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
105
106
#  define IMPL_fallback_atomic_load_n(t)                        \
107
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
108
    {                                                           \
109
        t ret;                                                  \
110
                                                                \
111
        pthread_mutex_lock(&atomic_sim_lock);                   \
112
        ret = *p;                                               \
113
        pthread_mutex_unlock(&atomic_sim_lock);                 \
114
        return ret;                                             \
115
    }
116
IMPL_fallback_atomic_load_n(uint32_t)
117
IMPL_fallback_atomic_load_n(uint64_t)
118
IMPL_fallback_atomic_load_n(pvoid)
119
120
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
121
122
#  define IMPL_fallback_atomic_store_n(t)                       \
123
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
124
    {                                                           \
125
        t ret;                                                  \
126
                                                                \
127
        pthread_mutex_lock(&atomic_sim_lock);                   \
128
        ret = *p;                                               \
129
        *p = v;                                                 \
130
        pthread_mutex_unlock(&atomic_sim_lock);                 \
131
        return ret;                                             \
132
    }
133
IMPL_fallback_atomic_store_n(uint32_t)
134
135
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
136
137
#  define IMPL_fallback_atomic_store(t)                         \
138
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
139
    {                                                           \
140
        pthread_mutex_lock(&atomic_sim_lock);                   \
141
        *p = *v;                                                \
142
        pthread_mutex_unlock(&atomic_sim_lock);                 \
143
    }
144
IMPL_fallback_atomic_store(pvoid)
145
146
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
147
148
/*
149
 * The fallbacks that follow don't need any per type implementation, as
150
 * they are designed for uint64_t only.  If there comes a time when multiple
151
 * types need to be covered, it's relatively easy to refactor them the same
152
 * way as the fallbacks above.
153
 */
154
155
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
156
{
157
    uint64_t ret;
158
159
    pthread_mutex_lock(&atomic_sim_lock);
160
    *p += v;
161
    ret = *p;
162
    pthread_mutex_unlock(&atomic_sim_lock);
163
    return ret;
164
}
165
166
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
167
168
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
169
{
170
    uint64_t ret;
171
172
    pthread_mutex_lock(&atomic_sim_lock);
173
    *p -= v;
174
    ret = *p;
175
    pthread_mutex_unlock(&atomic_sim_lock);
176
    return ret;
177
}
178
179
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
180
# endif
181
182
/*
183
 * This is the core of an rcu lock. It tracks the readers and writers for the
184
 * current quiescence point for a given lock. Users is the 64 bit value that
185
 * stores the READERS/ID as defined above
186
 *
187
 */
188
struct rcu_qp {
189
    uint64_t users;
190
};
191
192
struct thread_qp {
193
    struct rcu_qp *qp;
194
    unsigned int depth;
195
    CRYPTO_RCU_LOCK *lock;
196
};
197
198
0
# define MAX_QPS 10
199
/*
200
 * This is the per thread tracking data
201
 * that is assigned to each thread participating
202
 * in an rcu qp
203
 *
204
 * qp points to the qp that it last acquired
205
 *
206
 */
207
struct rcu_thr_data {
208
    struct thread_qp thread_qps[MAX_QPS];
209
};
210
211
/*
212
 * This is the internal version of a CRYPTO_RCU_LOCK
213
 * it is cast from CRYPTO_RCU_LOCK
214
 */
215
struct rcu_lock_st {
216
    /* Callbacks to call for next ossl_synchronize_rcu */
217
    struct rcu_cb_item *cb_items;
218
219
    /* The context we are being created against */
220
    OSSL_LIB_CTX *ctx;
221
222
    /* Array of quiescent points for synchronization */
223
    struct rcu_qp *qp_group;
224
225
    /* rcu generation counter for in-order retirement */
226
    uint32_t id_ctr;
227
228
    /* Number of elements in qp_group array */
229
    uint32_t group_count;
230
231
    /* Index of the current qp in the qp_group array */
232
    uint32_t reader_idx;
233
234
    /* value of the next id_ctr value to be retired */
235
    uint32_t next_to_retire;
236
237
    /* index of the next free rcu_qp in the qp_group */
238
    uint32_t current_alloc_idx;
239
240
    /* number of qp's in qp_group array currently being retired */
241
    uint32_t writers_alloced;
242
243
    /* lock protecting write side operations */
244
    pthread_mutex_t write_lock;
245
246
    /* lock protecting updates to writers_alloced/current_alloc_idx */
247
    pthread_mutex_t alloc_lock;
248
249
    /* signal to wake threads waiting on alloc_lock */
250
    pthread_cond_t alloc_signal;
251
252
    /* lock to enforce in-order retirement */
253
    pthread_mutex_t prior_lock;
254
255
    /* signal to wake threads waiting on prior_lock */
256
    pthread_cond_t prior_signal;
257
};
258
259
/* Read side acquisition of the current qp */
260
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
261
0
{
262
0
    uint32_t qp_idx;
263
264
    /* get the current qp index */
265
0
    for (;;) {
266
0
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
267
268
        /*
269
         * Notes on use of __ATOMIC_ACQUIRE
270
         * We need to ensure the following:
271
         * 1) That subsequent operations aren't optimized by hoisting them above
272
         * this operation.  Specifically, we don't want the below re-load of
273
         * qp_idx to get optimized away
274
         * 2) We want to ensure that any updating of reader_idx on the write side
275
         * of the lock is flushed from a local cpu cache so that we see any
276
         * updates prior to the load.  This is a non-issue on cache coherent
277
         * systems like x86, but is relevant on other arches
278
         */
279
0
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
280
0
                         __ATOMIC_ACQUIRE);
281
282
        /* if the idx hasn't changed, we're good, else try again */
283
0
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
284
0
                                    __ATOMIC_RELAXED))
285
0
            break;
286
287
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
288
0
                         __ATOMIC_RELAXED);
289
0
    }
290
291
0
    return &lock->qp_group[qp_idx];
292
0
}
293
294
static void ossl_rcu_free_local_data(void *arg)
295
0
{
296
0
    OSSL_LIB_CTX *ctx = arg;
297
0
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx);
298
299
0
    CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx, NULL);
300
0
    OPENSSL_free(data);
301
0
}
302
303
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
304
0
{
305
0
    struct rcu_thr_data *data;
306
0
    int i, available_qp = -1;
307
308
    /*
309
     * we're going to access current_qp here so ask the
310
     * processor to fetch it
311
     */
312
0
    data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
313
314
0
    if (data == NULL) {
315
0
        data = OPENSSL_zalloc(sizeof(*data));
316
0
        OPENSSL_assert(data != NULL);
317
0
        CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx, data);
318
0
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
319
0
    }
320
321
0
    for (i = 0; i < MAX_QPS; i++) {
322
0
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
323
0
            available_qp = i;
324
        /* If we have a hold on this lock already, we're good */
325
0
        if (data->thread_qps[i].lock == lock) {
326
0
            data->thread_qps[i].depth++;
327
0
            return;
328
0
        }
329
0
    }
330
331
    /*
332
     * if we get here, then we don't have a hold on this lock yet
333
     */
334
0
    assert(available_qp != -1);
335
336
0
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
337
0
    data->thread_qps[available_qp].depth = 1;
338
0
    data->thread_qps[available_qp].lock = lock;
339
0
}
340
341
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
342
0
{
343
0
    int i;
344
0
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
345
0
    uint64_t ret;
346
347
0
    assert(data != NULL);
348
349
0
    for (i = 0; i < MAX_QPS; i++) {
350
0
        if (data->thread_qps[i].lock == lock) {
351
            /*
352
             * we have to use __ATOMIC_RELEASE here
353
             * to ensure that all preceding read instructions complete
354
             * before the decrement is visible to ossl_synchronize_rcu
355
             */
356
0
            data->thread_qps[i].depth--;
357
0
            if (data->thread_qps[i].depth == 0) {
358
0
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
359
0
                                       (uint64_t)1, __ATOMIC_RELEASE);
360
0
                OPENSSL_assert(ret != UINT64_MAX);
361
0
                data->thread_qps[i].qp = NULL;
362
0
                data->thread_qps[i].lock = NULL;
363
0
            }
364
0
            return;
365
0
        }
366
0
    }
367
    /*
368
     * If we get here, we're trying to unlock a lock that we never acquired -
369
     * that's fatal.
370
     */
371
0
    assert(0);
372
0
}
373
374
/*
375
 * Write side allocation routine to get the current qp
376
 * and replace it with a new one
377
 */
378
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
379
15
{
380
15
    uint32_t current_idx;
381
382
15
    pthread_mutex_lock(&lock->alloc_lock);
383
384
    /*
385
     * we need at least one qp to be available with one
386
     * left over, so that readers can start working on
387
     * one that isn't yet being waited on
388
     */
389
15
    while (lock->group_count - lock->writers_alloced < 2)
390
        /* we have to wait for one to be free */
391
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
392
393
15
    current_idx = lock->current_alloc_idx;
394
395
    /* Allocate the qp */
396
15
    lock->writers_alloced++;
397
398
    /* increment the allocation index */
399
15
    lock->current_alloc_idx =
400
15
        (lock->current_alloc_idx + 1) % lock->group_count;
401
402
15
    *curr_id = lock->id_ctr;
403
15
    lock->id_ctr++;
404
405
15
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
406
15
                   __ATOMIC_RELAXED);
407
408
    /*
409
     * this should make sure that the new value of reader_idx is visible in
410
     * get_hold_current_qp, directly after incrementing the users count
411
     */
412
15
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
413
15
                     __ATOMIC_RELEASE);
414
415
    /* wake up any waiters */
416
15
    pthread_cond_signal(&lock->alloc_signal);
417
15
    pthread_mutex_unlock(&lock->alloc_lock);
418
15
    return &lock->qp_group[current_idx];
419
15
}
420
421
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
422
15
{
423
15
    pthread_mutex_lock(&lock->alloc_lock);
424
15
    lock->writers_alloced--;
425
15
    pthread_cond_signal(&lock->alloc_signal);
426
15
    pthread_mutex_unlock(&lock->alloc_lock);
427
15
}
428
429
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
430
                                            uint32_t count)
431
12
{
432
12
    struct rcu_qp *new =
433
12
        OPENSSL_zalloc(sizeof(*new) * count);
434
435
12
    lock->group_count = count;
436
12
    return new;
437
12
}
438
439
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
440
9
{
441
9
    pthread_mutex_lock(&lock->write_lock);
442
9
    TSAN_FAKE_UNLOCK(&lock->write_lock);
443
9
}
444
445
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
446
9
{
447
9
    TSAN_FAKE_LOCK(&lock->write_lock);
448
9
    pthread_mutex_unlock(&lock->write_lock);
449
9
}
450
451
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
452
15
{
453
15
    struct rcu_qp *qp;
454
15
    uint64_t count;
455
15
    uint32_t curr_id;
456
15
    struct rcu_cb_item *cb_items, *tmpcb;
457
458
15
    pthread_mutex_lock(&lock->write_lock);
459
15
    cb_items = lock->cb_items;
460
15
    lock->cb_items = NULL;
461
15
    pthread_mutex_unlock(&lock->write_lock);
462
463
15
    qp = update_qp(lock, &curr_id);
464
465
    /* retire in order */
466
15
    pthread_mutex_lock(&lock->prior_lock);
467
15
    while (lock->next_to_retire != curr_id)
468
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
469
470
    /*
471
     * wait for the reader count to reach zero
472
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
473
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
474
     * is visible prior to our read
475
     * however this is likely just necessary to silence a tsan warning
476
     * because the read side should not do any write operation
477
     * outside the atomic itself
478
     */
479
15
    do {
480
15
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
481
15
    } while (count != (uint64_t)0);
482
483
15
    lock->next_to_retire++;
484
15
    pthread_cond_broadcast(&lock->prior_signal);
485
15
    pthread_mutex_unlock(&lock->prior_lock);
486
487
15
    retire_qp(lock, qp);
488
489
    /* handle any callbacks that we have */
490
18
    while (cb_items != NULL) {
491
3
        tmpcb = cb_items;
492
3
        cb_items = cb_items->next;
493
3
        tmpcb->fn(tmpcb->data);
494
3
        OPENSSL_free(tmpcb);
495
3
    }
496
15
}
497
498
/*
499
 * Note: This call assumes its made under the protection of
500
 * ossl_rcu_write_lock
501
 */
502
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
503
3
{
504
3
    struct rcu_cb_item *new =
505
3
        OPENSSL_zalloc(sizeof(*new));
506
507
3
    if (new == NULL)
508
0
        return 0;
509
510
3
    new->data = data;
511
3
    new->fn = cb;
512
513
3
    new->next = lock->cb_items;
514
3
    lock->cb_items = new;
515
516
3
    return 1;
517
3
}
518
519
void *ossl_rcu_uptr_deref(void **p)
520
2.13M
{
521
2.13M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
522
2.13M
}
523
524
void ossl_rcu_assign_uptr(void **p, void **v)
525
1.30k
{
526
1.30k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
527
1.30k
}
528
529
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
530
12
{
531
12
    struct rcu_lock_st *new;
532
533
    /*
534
     * We need a minimum of 2 qp's
535
     */
536
12
    if (num_writers < 2)
537
12
        num_writers = 2;
538
539
12
    ctx = ossl_lib_ctx_get_concrete(ctx);
540
12
    if (ctx == NULL)
541
0
        return 0;
542
543
12
    new = OPENSSL_zalloc(sizeof(*new));
544
12
    if (new == NULL)
545
0
        return NULL;
546
547
12
    new->ctx = ctx;
548
12
    pthread_mutex_init(&new->write_lock, NULL);
549
12
    pthread_mutex_init(&new->prior_lock, NULL);
550
12
    pthread_mutex_init(&new->alloc_lock, NULL);
551
12
    pthread_cond_init(&new->prior_signal, NULL);
552
12
    pthread_cond_init(&new->alloc_signal, NULL);
553
554
12
    new->qp_group = allocate_new_qp_group(new, num_writers);
555
12
    if (new->qp_group == NULL) {
556
0
        OPENSSL_free(new);
557
0
        new = NULL;
558
0
    }
559
560
12
    return new;
561
12
}
562
563
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
564
6
{
565
6
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
566
567
6
    if (lock == NULL)
568
0
        return;
569
570
    /* make sure we're synchronized */
571
6
    ossl_synchronize_rcu(rlock);
572
573
6
    OPENSSL_free(rlock->qp_group);
574
    /* There should only be a single qp left now */
575
6
    OPENSSL_free(rlock);
576
6
}
577
578
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
579
228
{
580
228
# ifdef USE_RWLOCK
581
228
    CRYPTO_RWLOCK *lock;
582
583
228
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
584
        /* Don't set error, to avoid recursion blowup. */
585
0
        return NULL;
586
587
228
    if (pthread_rwlock_init(lock, NULL) != 0) {
588
0
        OPENSSL_free(lock);
589
0
        return NULL;
590
0
    }
591
# else
592
    pthread_mutexattr_t attr;
593
    CRYPTO_RWLOCK *lock;
594
595
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
596
        /* Don't set error, to avoid recursion blowup. */
597
        return NULL;
598
599
    /*
600
     * We don't use recursive mutexes, but try to catch errors if we do.
601
     */
602
    pthread_mutexattr_init(&attr);
603
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
604
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
605
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
606
#   endif
607
#  else
608
    /* The SPT Thread Library does not define MUTEX attributes. */
609
#  endif
610
611
    if (pthread_mutex_init(lock, &attr) != 0) {
612
        pthread_mutexattr_destroy(&attr);
613
        OPENSSL_free(lock);
614
        return NULL;
615
    }
616
617
    pthread_mutexattr_destroy(&attr);
618
# endif
619
620
228
    return lock;
621
228
}
622
623
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
624
1.06M
{
625
1.06M
# ifdef USE_RWLOCK
626
1.06M
    if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
627
0
        return 0;
628
# else
629
    if (pthread_mutex_lock(lock) != 0) {
630
        assert(errno != EDEADLK && errno != EBUSY);
631
        return 0;
632
    }
633
# endif
634
635
1.06M
    return 1;
636
1.06M
}
637
638
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
639
7.57k
{
640
7.57k
# ifdef USE_RWLOCK
641
7.57k
    if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
642
0
        return 0;
643
# else
644
    if (pthread_mutex_lock(lock) != 0) {
645
        assert(errno != EDEADLK && errno != EBUSY);
646
        return 0;
647
    }
648
# endif
649
650
7.57k
    return 1;
651
7.57k
}
652
653
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
654
1.07M
{
655
1.07M
# ifdef USE_RWLOCK
656
1.07M
    if (pthread_rwlock_unlock(lock) != 0)
657
0
        return 0;
658
# else
659
    if (pthread_mutex_unlock(lock) != 0) {
660
        assert(errno != EPERM);
661
        return 0;
662
    }
663
# endif
664
665
1.07M
    return 1;
666
1.07M
}
667
668
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
669
96
{
670
96
    if (lock == NULL)
671
21
        return;
672
673
75
# ifdef USE_RWLOCK
674
75
    pthread_rwlock_destroy(lock);
675
# else
676
    pthread_mutex_destroy(lock);
677
# endif
678
75
    OPENSSL_free(lock);
679
680
75
    return;
681
96
}
682
683
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
684
13.3k
{
685
13.3k
    if (pthread_once(once, init) != 0)
686
0
        return 0;
687
688
13.3k
    return 1;
689
13.3k
}
690
691
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
692
12
{
693
12
    if (pthread_key_create(key, cleanup) != 0)
694
0
        return 0;
695
696
12
    return 1;
697
12
}
698
699
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
700
5.95k
{
701
5.95k
    return pthread_getspecific(*key);
702
5.95k
}
703
704
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
705
12
{
706
12
    if (pthread_setspecific(*key, val) != 0)
707
0
        return 0;
708
709
12
    return 1;
710
12
}
711
712
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
713
9
{
714
9
    if (pthread_key_delete(*key) != 0)
715
0
        return 0;
716
717
9
    return 1;
718
9
}
719
720
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
721
0
{
722
0
    return pthread_self();
723
0
}
724
725
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
726
0
{
727
0
    return pthread_equal(a, b);
728
0
}
729
730
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
731
3.95k
{
732
3.95k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
733
3.95k
    if (__atomic_is_lock_free(sizeof(*val), val)) {
734
3.95k
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
735
3.95k
        return 1;
736
3.95k
    }
737
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
738
    /* This will work for all future Solaris versions. */
739
    if (ret != NULL) {
740
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
741
        return 1;
742
    }
743
# endif
744
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
745
0
        return 0;
746
747
0
    *val += amount;
748
0
    *ret  = *val;
749
750
0
    if (!CRYPTO_THREAD_unlock(lock))
751
0
        return 0;
752
753
0
    return 1;
754
0
}
755
756
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
757
                        CRYPTO_RWLOCK *lock)
758
0
{
759
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
760
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
761
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
762
0
        return 1;
763
0
    }
764
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
765
    /* This will work for all future Solaris versions. */
766
    if (ret != NULL) {
767
        *ret = atomic_add_64_nv(val, op);
768
        return 1;
769
    }
770
# endif
771
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
772
0
        return 0;
773
0
    *val += op;
774
0
    *ret  = *val;
775
776
0
    if (!CRYPTO_THREAD_unlock(lock))
777
0
        return 0;
778
779
0
    return 1;
780
0
}
781
782
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
783
                      CRYPTO_RWLOCK *lock)
784
0
{
785
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
786
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
787
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
788
0
        return 1;
789
0
    }
790
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
791
    /* This will work for all future Solaris versions. */
792
    if (ret != NULL) {
793
        *ret = atomic_and_64_nv(val, op);
794
        return 1;
795
    }
796
# endif
797
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
798
0
        return 0;
799
0
    *val &= op;
800
0
    *ret  = *val;
801
802
0
    if (!CRYPTO_THREAD_unlock(lock))
803
0
        return 0;
804
805
0
    return 1;
806
0
}
807
808
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
809
                     CRYPTO_RWLOCK *lock)
810
9
{
811
9
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
812
9
    if (__atomic_is_lock_free(sizeof(*val), val)) {
813
9
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
814
9
        return 1;
815
9
    }
816
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
817
    /* This will work for all future Solaris versions. */
818
    if (ret != NULL) {
819
        *ret = atomic_or_64_nv(val, op);
820
        return 1;
821
    }
822
# endif
823
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
824
0
        return 0;
825
0
    *val |= op;
826
0
    *ret  = *val;
827
828
0
    if (!CRYPTO_THREAD_unlock(lock))
829
0
        return 0;
830
831
0
    return 1;
832
0
}
833
834
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
835
1.10M
{
836
1.10M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
837
1.10M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
838
1.10M
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
839
1.10M
        return 1;
840
1.10M
    }
841
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
842
    /* This will work for all future Solaris versions. */
843
    if (ret != NULL) {
844
        *ret = atomic_or_64_nv(val, 0);
845
        return 1;
846
    }
847
# endif
848
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
849
0
        return 0;
850
0
    *ret  = *val;
851
0
    if (!CRYPTO_THREAD_unlock(lock))
852
0
        return 0;
853
854
0
    return 1;
855
0
}
856
857
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
858
1.29k
{
859
1.29k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
860
1.29k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
861
1.29k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
862
1.29k
        return 1;
863
1.29k
    }
864
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
865
    /* This will work for all future Solaris versions. */
866
    if (dst != NULL) {
867
        atomic_swap_64(dst, val);
868
        return 1;
869
    }
870
# endif
871
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
872
0
        return 0;
873
0
    *dst  = val;
874
0
    if (!CRYPTO_THREAD_unlock(lock))
875
0
        return 0;
876
877
0
    return 1;
878
0
}
879
880
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
881
0
{
882
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
883
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
884
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
885
0
        return 1;
886
0
    }
887
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
888
    /* This will work for all future Solaris versions. */
889
    if (ret != NULL) {
890
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
891
        return 1;
892
    }
893
# endif
894
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
895
0
        return 0;
896
0
    *ret  = *val;
897
0
    if (!CRYPTO_THREAD_unlock(lock))
898
0
        return 0;
899
900
0
    return 1;
901
0
}
902
903
# ifndef FIPS_MODULE
904
int openssl_init_fork_handlers(void)
905
0
{
906
0
    return 1;
907
0
}
908
# endif /* FIPS_MODULE */
909
910
int openssl_get_fork_id(void)
911
0
{
912
0
    return getpid();
913
0
}
914
#endif