Coverage Report

Created: 2025-11-16 06:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openssl35/crypto/threads_pthread.c
Line
Count
Source
1
/*
2
 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License 2.0 (the "License").  You may not use
5
 * this file except in compliance with the License.  You can obtain a copy
6
 * in the file LICENSE in the source distribution or at
7
 * https://www.openssl.org/source/license.html
8
 */
9
10
/* We need to use the OPENSSL_fork_*() deprecated APIs */
11
#define OPENSSL_SUPPRESS_DEPRECATED
12
13
#include <openssl/crypto.h>
14
#include <crypto/cryptlib.h>
15
#include "internal/cryptlib.h"
16
#include "internal/rcu.h"
17
#include "rcu_internal.h"
18
19
#if defined(__clang__) && defined(__has_feature)
20
# if __has_feature(thread_sanitizer)
21
#  define __SANITIZE_THREAD__
22
# endif
23
#endif
24
25
#if defined(__SANITIZE_THREAD__)
26
# include <sanitizer/tsan_interface.h>
27
# define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28
__tsan_mutex_post_unlock((x), 0)
29
30
# define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31
__tsan_mutex_post_lock((x), 0, 0)
32
#else
33
# define TSAN_FAKE_UNLOCK(x)
34
# define TSAN_FAKE_LOCK(x)
35
#endif
36
37
#if defined(__sun)
38
# include <atomic.h>
39
#endif
40
41
#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42
/*
43
 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44
 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45
 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46
 * All of this makes impossible to use __atomic_is_lock_free here.
47
 *
48
 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49
 */
50
# define BROKEN_CLANG_ATOMICS
51
#endif
52
53
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54
55
# if defined(OPENSSL_SYS_UNIX)
56
#  include <sys/types.h>
57
#  include <unistd.h>
58
# endif
59
60
# include <assert.h>
61
62
/*
63
 * The Non-Stop KLT thread model currently seems broken in its rwlock
64
 * implementation
65
 * Likewise is there a problem with the glibc implementation on riscv.
66
 */
67
# if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
68
                                         && !defined(__riscv)
69
#  define USE_RWLOCK
70
# endif
71
72
/*
73
 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
74
 * other compilers.
75
76
 * Unfortunately, we can't do that with some "generic type", because there's no
77
 * guarantee that the chosen generic type is large enough to cover all cases.
78
 * Therefore, we implement fallbacks for each applicable type, with composed
79
 * names that include the type they handle.
80
 *
81
 * (an anecdote: we previously tried to use |void *| as the generic type, with
82
 * the thought that the pointer itself is the largest type.  However, this is
83
 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
84
 *
85
 * All applicable ATOMIC_ macros take the intended type as first parameter, so
86
 * they can map to the correct fallback function.  In the GNU/clang case, that
87
 * parameter is simply ignored.
88
 */
89
90
/*
91
 * Internal types used with the ATOMIC_ macros, to make it possible to compose
92
 * fallback function names.
93
 */
94
typedef void *pvoid;
95
96
# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97
    && !defined(USE_ATOMIC_FALLBACKS)
98
54.9M
#  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99
916
#  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100
28.7k
#  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101
961
#  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102
45
#  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
103
# else
104
static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
105
106
#  define IMPL_fallback_atomic_load_n(t)                        \
107
    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
108
    {                                                           \
109
        t ret;                                                  \
110
                                                                \
111
        pthread_mutex_lock(&atomic_sim_lock);                   \
112
        ret = *p;                                               \
113
        pthread_mutex_unlock(&atomic_sim_lock);                 \
114
        return ret;                                             \
115
    }
116
IMPL_fallback_atomic_load_n(uint32_t)
117
IMPL_fallback_atomic_load_n(uint64_t)
118
IMPL_fallback_atomic_load_n(pvoid)
119
120
#  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
121
122
#  define IMPL_fallback_atomic_store_n(t)                       \
123
    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
124
    {                                                           \
125
        t ret;                                                  \
126
                                                                \
127
        pthread_mutex_lock(&atomic_sim_lock);                   \
128
        ret = *p;                                               \
129
        *p = v;                                                 \
130
        pthread_mutex_unlock(&atomic_sim_lock);                 \
131
        return ret;                                             \
132
    }
133
IMPL_fallback_atomic_store_n(uint32_t)
134
135
#  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
136
137
#  define IMPL_fallback_atomic_store(t)                         \
138
    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
139
    {                                                           \
140
        pthread_mutex_lock(&atomic_sim_lock);                   \
141
        *p = *v;                                                \
142
        pthread_mutex_unlock(&atomic_sim_lock);                 \
143
    }
144
IMPL_fallback_atomic_store(pvoid)
145
146
#  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
147
148
/*
149
 * The fallbacks that follow don't need any per type implementation, as
150
 * they are designed for uint64_t only.  If there comes a time when multiple
151
 * types need to be covered, it's relatively easy to refactor them the same
152
 * way as the fallbacks above.
153
 */
154
155
static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
156
{
157
    uint64_t ret;
158
159
    pthread_mutex_lock(&atomic_sim_lock);
160
    *p += v;
161
    ret = *p;
162
    pthread_mutex_unlock(&atomic_sim_lock);
163
    return ret;
164
}
165
166
#  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
167
168
static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
169
{
170
    uint64_t ret;
171
172
    pthread_mutex_lock(&atomic_sim_lock);
173
    *p -= v;
174
    ret = *p;
175
    pthread_mutex_unlock(&atomic_sim_lock);
176
    return ret;
177
}
178
179
#  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
180
# endif
181
182
/*
183
 * This is the core of an rcu lock. It tracks the readers and writers for the
184
 * current quiescence point for a given lock. Users is the 64 bit value that
185
 * stores the READERS/ID as defined above
186
 *
187
 */
188
struct rcu_qp {
189
    uint64_t users;
190
};
191
192
struct thread_qp {
193
    struct rcu_qp *qp;
194
    unsigned int depth;
195
    CRYPTO_RCU_LOCK *lock;
196
};
197
198
364
# define MAX_QPS 10
199
/*
200
 * This is the per thread tracking data
201
 * that is assigned to each thread participating
202
 * in an rcu qp
203
 *
204
 * qp points to the qp that it last acquired
205
 *
206
 */
207
struct rcu_thr_data {
208
    struct thread_qp thread_qps[MAX_QPS];
209
};
210
211
/*
212
 * This is the internal version of a CRYPTO_RCU_LOCK
213
 * it is cast from CRYPTO_RCU_LOCK
214
 */
215
struct rcu_lock_st {
216
    /* Callbacks to call for next ossl_synchronize_rcu */
217
    struct rcu_cb_item *cb_items;
218
219
    /* The context we are being created against */
220
    OSSL_LIB_CTX *ctx;
221
222
    /* Array of quiescent points for synchronization */
223
    struct rcu_qp *qp_group;
224
225
    /* rcu generation counter for in-order retirement */
226
    uint32_t id_ctr;
227
228
    /* Number of elements in qp_group array */
229
    uint32_t group_count;
230
231
    /* Index of the current qp in the qp_group array */
232
    uint32_t reader_idx;
233
234
    /* value of the next id_ctr value to be retired */
235
    uint32_t next_to_retire;
236
237
    /* index of the next free rcu_qp in the qp_group */
238
    uint32_t current_alloc_idx;
239
240
    /* number of qp's in qp_group array currently being retired */
241
    uint32_t writers_alloced;
242
243
    /* lock protecting write side operations */
244
    pthread_mutex_t write_lock;
245
246
    /* lock protecting updates to writers_alloced/current_alloc_idx */
247
    pthread_mutex_t alloc_lock;
248
249
    /* signal to wake threads waiting on alloc_lock */
250
    pthread_cond_t alloc_signal;
251
252
    /* lock to enforce in-order retirement */
253
    pthread_mutex_t prior_lock;
254
255
    /* signal to wake threads waiting on prior_lock */
256
    pthread_cond_t prior_signal;
257
};
258
259
/* Read side acquisition of the current qp */
260
static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
261
45
{
262
45
    uint32_t qp_idx;
263
264
    /* get the current qp index */
265
45
    for (;;) {
266
45
        qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
267
268
        /*
269
         * Notes on use of __ATOMIC_ACQUIRE
270
         * We need to ensure the following:
271
         * 1) That subsequent operations aren't optimized by hoisting them above
272
         * this operation.  Specifically, we don't want the below re-load of
273
         * qp_idx to get optimized away
274
         * 2) We want to ensure that any updating of reader_idx on the write side
275
         * of the lock is flushed from a local cpu cache so that we see any
276
         * updates prior to the load.  This is a non-issue on cache coherent
277
         * systems like x86, but is relevant on other arches
278
         */
279
45
        ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
280
45
                         __ATOMIC_ACQUIRE);
281
282
        /* if the idx hasn't changed, we're good, else try again */
283
45
        if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
284
45
                                    __ATOMIC_ACQUIRE))
285
45
            break;
286
287
0
        ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
288
0
                         __ATOMIC_RELAXED);
289
0
    }
290
291
45
    return &lock->qp_group[qp_idx];
292
45
}
293
294
static void ossl_rcu_free_local_data(void *arg)
295
3
{
296
3
    OSSL_LIB_CTX *ctx = arg;
297
3
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
298
3
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
299
300
3
    OPENSSL_free(data);
301
3
    CRYPTO_THREAD_set_local(lkey, NULL);
302
3
}
303
304
void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
305
29
{
306
29
    struct rcu_thr_data *data;
307
29
    int i, available_qp = -1;
308
29
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
309
310
    /*
311
     * we're going to access current_qp here so ask the
312
     * processor to fetch it
313
     */
314
29
    data = CRYPTO_THREAD_get_local(lkey);
315
316
29
    if (data == NULL) {
317
2
        data = OPENSSL_zalloc(sizeof(*data));
318
2
        OPENSSL_assert(data != NULL);
319
2
        CRYPTO_THREAD_set_local(lkey, data);
320
2
        ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
321
2
    }
322
323
319
    for (i = 0; i < MAX_QPS; i++) {
324
290
        if (data->thread_qps[i].qp == NULL && available_qp == -1)
325
29
            available_qp = i;
326
        /* If we have a hold on this lock already, we're good */
327
290
        if (data->thread_qps[i].lock == lock) {
328
0
            data->thread_qps[i].depth++;
329
0
            return;
330
0
        }
331
290
    }
332
333
    /*
334
     * if we get here, then we don't have a hold on this lock yet
335
     */
336
29
    assert(available_qp != -1);
337
338
29
    data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
339
29
    data->thread_qps[available_qp].depth = 1;
340
29
    data->thread_qps[available_qp].lock = lock;
341
29
}
342
343
void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
344
45
{
345
45
    int i;
346
45
    CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
347
45
    struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
348
45
    uint64_t ret;
349
350
45
    assert(data != NULL);
351
352
45
    for (i = 0; i < MAX_QPS; i++) {
353
45
        if (data->thread_qps[i].lock == lock) {
354
            /*
355
             * we have to use __ATOMIC_RELEASE here
356
             * to ensure that all preceding read instructions complete
357
             * before the decrement is visible to ossl_synchronize_rcu
358
             */
359
45
            data->thread_qps[i].depth--;
360
45
            if (data->thread_qps[i].depth == 0) {
361
45
                ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
362
45
                                       (uint64_t)1, __ATOMIC_RELEASE);
363
45
                OPENSSL_assert(ret != UINT64_MAX);
364
45
                data->thread_qps[i].qp = NULL;
365
45
                data->thread_qps[i].lock = NULL;
366
45
            }
367
45
            return;
368
45
        }
369
45
    }
370
    /*
371
     * If we get here, we're trying to unlock a lock that we never acquired -
372
     * that's fatal.
373
     */
374
45
    assert(0);
375
0
}
376
377
/*
378
 * Write side allocation routine to get the current qp
379
 * and replace it with a new one
380
 */
381
static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
382
916
{
383
916
    uint32_t current_idx;
384
385
916
    pthread_mutex_lock(&lock->alloc_lock);
386
387
    /*
388
     * we need at least one qp to be available with one
389
     * left over, so that readers can start working on
390
     * one that isn't yet being waited on
391
     */
392
916
    while (lock->group_count - lock->writers_alloced < 2)
393
        /* we have to wait for one to be free */
394
0
        pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
395
396
916
    current_idx = lock->current_alloc_idx;
397
398
    /* Allocate the qp */
399
916
    lock->writers_alloced++;
400
401
    /* increment the allocation index */
402
916
    lock->current_alloc_idx =
403
916
        (lock->current_alloc_idx + 1) % lock->group_count;
404
405
916
    *curr_id = lock->id_ctr;
406
916
    lock->id_ctr++;
407
408
    /*
409
     * make the current state of everything visible by this release
410
     * when get_hold_current_qp acquires the next qp
411
     */
412
916
    ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413
916
                   __ATOMIC_RELEASE);
414
415
    /*
416
     * this should make sure that the new value of reader_idx is visible in
417
     * get_hold_current_qp, directly after incrementing the users count
418
     */
419
916
    ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420
916
                     __ATOMIC_RELEASE);
421
422
    /* wake up any waiters */
423
916
    pthread_cond_signal(&lock->alloc_signal);
424
916
    pthread_mutex_unlock(&lock->alloc_lock);
425
916
    return &lock->qp_group[current_idx];
426
916
}
427
428
static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429
916
{
430
916
    pthread_mutex_lock(&lock->alloc_lock);
431
916
    lock->writers_alloced--;
432
916
    pthread_cond_signal(&lock->alloc_signal);
433
916
    pthread_mutex_unlock(&lock->alloc_lock);
434
916
}
435
436
static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437
                                            uint32_t count)
438
438
{
439
438
    struct rcu_qp *new =
440
438
        OPENSSL_zalloc(sizeof(*new) * count);
441
442
438
    lock->group_count = count;
443
438
    return new;
444
438
}
445
446
void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
447
666
{
448
666
    pthread_mutex_lock(&lock->write_lock);
449
666
    TSAN_FAKE_UNLOCK(&lock->write_lock);
450
666
}
451
452
void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
453
666
{
454
666
    TSAN_FAKE_LOCK(&lock->write_lock);
455
666
    pthread_mutex_unlock(&lock->write_lock);
456
666
}
457
458
void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
459
916
{
460
916
    struct rcu_qp *qp;
461
916
    uint64_t count;
462
916
    uint32_t curr_id;
463
916
    struct rcu_cb_item *cb_items, *tmpcb;
464
465
916
    pthread_mutex_lock(&lock->write_lock);
466
916
    cb_items = lock->cb_items;
467
916
    lock->cb_items = NULL;
468
916
    pthread_mutex_unlock(&lock->write_lock);
469
470
916
    qp = update_qp(lock, &curr_id);
471
472
    /* retire in order */
473
916
    pthread_mutex_lock(&lock->prior_lock);
474
916
    while (lock->next_to_retire != curr_id)
475
0
        pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
476
477
    /*
478
     * wait for the reader count to reach zero
479
     * Note the use of __ATOMIC_ACQUIRE here to ensure that any
480
     * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
481
     * is visible prior to our read
482
     * however this is likely just necessary to silence a tsan warning
483
     * because the read side should not do any write operation
484
     * outside the atomic itself
485
     */
486
916
    do {
487
916
        count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
488
916
    } while (count != (uint64_t)0);
489
490
916
    lock->next_to_retire++;
491
916
    pthread_cond_broadcast(&lock->prior_signal);
492
916
    pthread_mutex_unlock(&lock->prior_lock);
493
494
916
    retire_qp(lock, qp);
495
496
    /* handle any callbacks that we have */
497
1.09k
    while (cb_items != NULL) {
498
179
        tmpcb = cb_items;
499
179
        cb_items = cb_items->next;
500
179
        tmpcb->fn(tmpcb->data);
501
179
        OPENSSL_free(tmpcb);
502
179
    }
503
916
}
504
505
/*
506
 * Note: This call assumes its made under the protection of
507
 * ossl_rcu_write_lock
508
 */
509
int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
510
179
{
511
179
    struct rcu_cb_item *new =
512
179
        OPENSSL_zalloc(sizeof(*new));
513
514
179
    if (new == NULL)
515
0
        return 0;
516
517
179
    new->data = data;
518
179
    new->fn = cb;
519
520
179
    new->next = lock->cb_items;
521
179
    lock->cb_items = new;
522
523
179
    return 1;
524
179
}
525
526
void *ossl_rcu_uptr_deref(void **p)
527
54.9M
{
528
54.9M
    return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
529
54.9M
}
530
531
void ossl_rcu_assign_uptr(void **p, void **v)
532
28.7k
{
533
28.7k
    ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
534
28.7k
}
535
536
CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
537
438
{
538
438
    struct rcu_lock_st *new;
539
540
    /*
541
     * We need a minimum of 2 qp's
542
     */
543
438
    if (num_writers < 2)
544
438
        num_writers = 2;
545
546
438
    ctx = ossl_lib_ctx_get_concrete(ctx);
547
438
    if (ctx == NULL)
548
0
        return 0;
549
550
438
    new = OPENSSL_zalloc(sizeof(*new));
551
438
    if (new == NULL)
552
0
        return NULL;
553
554
438
    new->ctx = ctx;
555
438
    pthread_mutex_init(&new->write_lock, NULL);
556
438
    pthread_mutex_init(&new->prior_lock, NULL);
557
438
    pthread_mutex_init(&new->alloc_lock, NULL);
558
438
    pthread_cond_init(&new->prior_signal, NULL);
559
438
    pthread_cond_init(&new->alloc_signal, NULL);
560
561
438
    new->qp_group = allocate_new_qp_group(new, num_writers);
562
438
    if (new->qp_group == NULL) {
563
0
        OPENSSL_free(new);
564
0
        new = NULL;
565
0
    }
566
567
438
    return new;
568
438
}
569
570
void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
571
350
{
572
350
    struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
573
574
350
    if (lock == NULL)
575
0
        return;
576
577
    /* make sure we're synchronized */
578
350
    ossl_synchronize_rcu(rlock);
579
580
350
    OPENSSL_free(rlock->qp_group);
581
    /* There should only be a single qp left now */
582
350
    OPENSSL_free(rlock);
583
350
}
584
585
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
586
9.94M
{
587
9.94M
# ifdef USE_RWLOCK
588
9.94M
    CRYPTO_RWLOCK *lock;
589
590
9.94M
    if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
591
        /* Don't set error, to avoid recursion blowup. */
592
0
        return NULL;
593
594
9.94M
    if (pthread_rwlock_init(lock, NULL) != 0) {
595
0
        OPENSSL_free(lock);
596
0
        return NULL;
597
0
    }
598
# else
599
    pthread_mutexattr_t attr;
600
    CRYPTO_RWLOCK *lock;
601
602
    if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
603
        /* Don't set error, to avoid recursion blowup. */
604
        return NULL;
605
606
    /*
607
     * We don't use recursive mutexes, but try to catch errors if we do.
608
     */
609
    pthread_mutexattr_init(&attr);
610
#  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
611
#   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
612
    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
613
#   endif
614
#  else
615
    /* The SPT Thread Library does not define MUTEX attributes. */
616
#  endif
617
618
    if (pthread_mutex_init(lock, &attr) != 0) {
619
        pthread_mutexattr_destroy(&attr);
620
        OPENSSL_free(lock);
621
        return NULL;
622
    }
623
624
    pthread_mutexattr_destroy(&attr);
625
# endif
626
627
9.94M
    return lock;
628
9.94M
}
629
630
__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
631
59.8M
{
632
59.8M
# ifdef USE_RWLOCK
633
59.8M
    if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
634
0
        return 0;
635
# else
636
    if (pthread_mutex_lock(lock) != 0) {
637
        assert(errno != EDEADLK && errno != EBUSY);
638
        return 0;
639
    }
640
# endif
641
642
59.8M
    return 1;
643
59.8M
}
644
645
__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
646
29.9M
{
647
29.9M
# ifdef USE_RWLOCK
648
29.9M
    if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
649
0
        return 0;
650
# else
651
    if (pthread_mutex_lock(lock) != 0) {
652
        assert(errno != EDEADLK && errno != EBUSY);
653
        return 0;
654
    }
655
# endif
656
657
29.9M
    return 1;
658
29.9M
}
659
660
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
661
1.26G
{
662
1.26G
# ifdef USE_RWLOCK
663
1.26G
    if (pthread_rwlock_unlock(lock) != 0)
664
0
        return 0;
665
# else
666
    if (pthread_mutex_unlock(lock) != 0) {
667
        assert(errno != EPERM);
668
        return 0;
669
    }
670
# endif
671
672
1.26G
    return 1;
673
1.26G
}
674
675
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
676
9.94M
{
677
9.94M
    if (lock == NULL)
678
2.20k
        return;
679
680
9.94M
# ifdef USE_RWLOCK
681
9.94M
    pthread_rwlock_destroy(lock);
682
# else
683
    pthread_mutex_destroy(lock);
684
# endif
685
9.94M
    OPENSSL_free(lock);
686
687
9.94M
    return;
688
9.94M
}
689
690
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
691
2.92G
{
692
2.92G
    if (pthread_once(once, init) != 0)
693
0
        return 0;
694
695
2.92G
    return 1;
696
2.92G
}
697
698
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
699
1.89k
{
700
1.89k
    if (pthread_key_create(key, cleanup) != 0)
701
0
        return 0;
702
703
1.89k
    return 1;
704
1.89k
}
705
706
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
707
1.83G
{
708
1.83G
    return pthread_getspecific(*key);
709
1.83G
}
710
711
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
712
1.91k
{
713
1.91k
    if (pthread_setspecific(*key, val) != 0)
714
0
        return 0;
715
716
1.91k
    return 1;
717
1.91k
}
718
719
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
720
1.57k
{
721
1.57k
    if (pthread_key_delete(*key) != 0)
722
0
        return 0;
723
724
1.57k
    return 1;
725
1.57k
}
726
727
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
728
183k
{
729
183k
    return pthread_self();
730
183k
}
731
732
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
733
11.8k
{
734
11.8k
    return pthread_equal(a, b);
735
11.8k
}
736
737
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
738
10.9M
{
739
10.9M
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
740
10.9M
    if (__atomic_is_lock_free(sizeof(*val), val)) {
741
10.9M
        *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
742
10.9M
        return 1;
743
10.9M
    }
744
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
745
    /* This will work for all future Solaris versions. */
746
    if (ret != NULL) {
747
        *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
748
        return 1;
749
    }
750
# endif
751
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
752
0
        return 0;
753
754
0
    *val += amount;
755
0
    *ret  = *val;
756
757
0
    if (!CRYPTO_THREAD_unlock(lock))
758
0
        return 0;
759
760
0
    return 1;
761
0
}
762
763
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
764
                        CRYPTO_RWLOCK *lock)
765
0
{
766
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
767
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
768
0
        *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
769
0
        return 1;
770
0
    }
771
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
772
    /* This will work for all future Solaris versions. */
773
    if (ret != NULL) {
774
        *ret = atomic_add_64_nv(val, op);
775
        return 1;
776
    }
777
# endif
778
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
779
0
        return 0;
780
0
    *val += op;
781
0
    *ret  = *val;
782
783
0
    if (!CRYPTO_THREAD_unlock(lock))
784
0
        return 0;
785
786
0
    return 1;
787
0
}
788
789
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
790
                      CRYPTO_RWLOCK *lock)
791
0
{
792
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
793
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
794
0
        *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
795
0
        return 1;
796
0
    }
797
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
798
    /* This will work for all future Solaris versions. */
799
    if (ret != NULL) {
800
        *ret = atomic_and_64_nv(val, op);
801
        return 1;
802
    }
803
# endif
804
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
805
0
        return 0;
806
0
    *val &= op;
807
0
    *ret  = *val;
808
809
0
    if (!CRYPTO_THREAD_unlock(lock))
810
0
        return 0;
811
812
0
    return 1;
813
0
}
814
815
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
816
                     CRYPTO_RWLOCK *lock)
817
644
{
818
644
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
819
644
    if (__atomic_is_lock_free(sizeof(*val), val)) {
820
644
        *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
821
644
        return 1;
822
644
    }
823
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
824
    /* This will work for all future Solaris versions. */
825
    if (ret != NULL) {
826
        *ret = atomic_or_64_nv(val, op);
827
        return 1;
828
    }
829
# endif
830
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
831
0
        return 0;
832
0
    *val |= op;
833
0
    *ret  = *val;
834
835
0
    if (!CRYPTO_THREAD_unlock(lock))
836
0
        return 0;
837
838
0
    return 1;
839
0
}
840
841
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
842
2.43G
{
843
2.43G
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
844
2.43G
    if (__atomic_is_lock_free(sizeof(*val), val)) {
845
2.43G
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
846
2.43G
        return 1;
847
2.43G
    }
848
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
849
    /* This will work for all future Solaris versions. */
850
    if (ret != NULL) {
851
        *ret = atomic_or_64_nv(val, 0);
852
        return 1;
853
    }
854
# endif
855
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
856
0
        return 0;
857
0
    *ret  = *val;
858
0
    if (!CRYPTO_THREAD_unlock(lock))
859
0
        return 0;
860
861
0
    return 1;
862
0
}
863
864
int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
865
28.2k
{
866
28.2k
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
867
28.2k
    if (__atomic_is_lock_free(sizeof(*dst), dst)) {
868
28.2k
        __atomic_store(dst, &val, __ATOMIC_RELEASE);
869
28.2k
        return 1;
870
28.2k
    }
871
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
872
    /* This will work for all future Solaris versions. */
873
    if (dst != NULL) {
874
        atomic_swap_64(dst, val);
875
        return 1;
876
    }
877
# endif
878
0
    if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
879
0
        return 0;
880
0
    *dst  = val;
881
0
    if (!CRYPTO_THREAD_unlock(lock))
882
0
        return 0;
883
884
0
    return 1;
885
0
}
886
887
int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
888
0
{
889
0
# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
890
0
    if (__atomic_is_lock_free(sizeof(*val), val)) {
891
0
        __atomic_load(val, ret, __ATOMIC_ACQUIRE);
892
0
        return 1;
893
0
    }
894
# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
895
    /* This will work for all future Solaris versions. */
896
    if (ret != NULL) {
897
        *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
898
        return 1;
899
    }
900
# endif
901
0
    if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
902
0
        return 0;
903
0
    *ret  = *val;
904
0
    if (!CRYPTO_THREAD_unlock(lock))
905
0
        return 0;
906
907
0
    return 1;
908
0
}
909
910
# ifndef FIPS_MODULE
911
int openssl_init_fork_handlers(void)
912
0
{
913
0
    return 1;
914
0
}
915
# endif /* FIPS_MODULE */
916
917
int openssl_get_fork_id(void)
918
74.8k
{
919
74.8k
    return getpid();
920
74.8k
}
921
#endif