Coverage Report

Created: 2025-11-28 06:44

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/lock_api-0.4.14/src/remutex.rs
Line
Count
Source
1
// Copyright 2018 Amanieu d'Antras
2
//
3
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
// copied, modified, or distributed except according to those terms.
7
8
use crate::{
9
    mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10
    GuardNoSend,
11
};
12
use core::{
13
    cell::{Cell, UnsafeCell},
14
    fmt,
15
    marker::PhantomData,
16
    mem,
17
    num::NonZeroUsize,
18
    ops::Deref,
19
    sync::atomic::{AtomicUsize, Ordering},
20
};
21
22
#[cfg(feature = "arc_lock")]
23
use alloc::sync::Arc;
24
#[cfg(feature = "arc_lock")]
25
use core::mem::ManuallyDrop;
26
#[cfg(feature = "arc_lock")]
27
use core::ptr;
28
29
#[cfg(feature = "owning_ref")]
30
use owning_ref::StableAddress;
31
32
#[cfg(feature = "serde")]
33
use serde::{Deserialize, Deserializer, Serialize, Serializer};
34
35
/// Helper trait which returns a non-zero thread ID.
36
///
37
/// The simplest way to implement this trait is to return the address of a
38
/// thread-local variable.
39
///
40
/// # Safety
41
///
42
/// Implementations of this trait must ensure that no two active threads share
43
/// the same thread ID. However the ID of a thread that has exited can be
44
/// re-used since that thread is no longer active.
45
pub unsafe trait GetThreadId {
46
    /// Initial value.
47
    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
48
    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
49
    #[allow(clippy::declare_interior_mutable_const)]
50
    const INIT: Self;
51
52
    /// Returns a non-zero thread ID which identifies the current thread of
53
    /// execution.
54
    fn nonzero_thread_id(&self) -> NonZeroUsize;
55
}
56
57
/// A raw mutex type that wraps another raw mutex to provide reentrancy.
58
///
59
/// Although this has the same methods as the [`RawMutex`] trait, it does
60
/// not implement it, and should not be used in the same way, since this
61
/// mutex can successfully acquire a lock multiple times in the same thread.
62
/// Only use this when you know you want a raw mutex that can be locked
63
/// reentrantly; you probably want [`ReentrantMutex`] instead.
64
pub struct RawReentrantMutex<R, G> {
65
    owner: AtomicUsize,
66
    lock_count: Cell<usize>,
67
    mutex: R,
68
    get_thread_id: G,
69
}
70
71
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
72
unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
73
74
impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
75
    /// Initial value for an unlocked mutex.
76
    #[allow(clippy::declare_interior_mutable_const)]
77
    pub const INIT: Self = RawReentrantMutex {
78
        owner: AtomicUsize::new(0),
79
        lock_count: Cell::new(0),
80
        mutex: R::INIT,
81
        get_thread_id: G::INIT,
82
    };
83
84
    #[inline]
85
0
    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
86
0
        let id = self.get_thread_id.nonzero_thread_id().get();
87
0
        if self.owner.load(Ordering::Relaxed) == id {
88
0
            self.lock_count.set(
89
0
                self.lock_count
90
0
                    .get()
91
0
                    .checked_add(1)
92
0
                    .expect("ReentrantMutex lock count overflow"),
93
0
            );
94
0
        } else {
95
0
            if !try_lock() {
96
0
                return false;
97
0
            }
98
0
            self.owner.store(id, Ordering::Relaxed);
99
0
            debug_assert_eq!(self.lock_count.get(), 0);
100
0
            self.lock_count.set(1);
101
        }
102
0
        true
103
0
    }
104
105
    /// Acquires this mutex, blocking if it's held by another thread.
106
    #[inline]
107
0
    pub fn lock(&self) {
108
0
        self.lock_internal(|| {
109
0
            self.mutex.lock();
110
0
            true
111
0
        });
112
0
    }
113
114
    /// Attempts to acquire this mutex without blocking. Returns `true`
115
    /// if the lock was successfully acquired and `false` otherwise.
116
    #[inline]
117
0
    pub fn try_lock(&self) -> bool {
118
0
        self.lock_internal(|| self.mutex.try_lock())
119
0
    }
120
121
    /// Unlocks this mutex. The inner mutex may not be unlocked if
122
    /// this mutex was acquired previously in the current thread.
123
    ///
124
    /// # Safety
125
    ///
126
    /// This method may only be called if the mutex is held by the current thread.
127
    #[inline]
128
0
    pub unsafe fn unlock(&self) {
129
0
        let lock_count = self.lock_count.get() - 1;
130
0
        self.lock_count.set(lock_count);
131
0
        if lock_count == 0 {
132
0
            self.owner.store(0, Ordering::Relaxed);
133
0
            self.mutex.unlock();
134
0
        }
135
0
    }
136
137
    /// Checks whether the mutex is currently locked.
138
    #[inline]
139
0
    pub fn is_locked(&self) -> bool {
140
0
        self.mutex.is_locked()
141
0
    }
142
143
    /// Checks whether the mutex is currently held by the current thread.
144
    #[inline]
145
0
    pub fn is_owned_by_current_thread(&self) -> bool {
146
0
        let id = self.get_thread_id.nonzero_thread_id().get();
147
0
        self.owner.load(Ordering::Relaxed) == id
148
0
    }
149
}
150
151
impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
152
    /// Unlocks this mutex using a fair unlock protocol. The inner mutex
153
    /// may not be unlocked if this mutex was acquired previously in the
154
    /// current thread.
155
    ///
156
    /// # Safety
157
    ///
158
    /// This method may only be called if the mutex is held by the current thread.
159
    #[inline]
160
0
    pub unsafe fn unlock_fair(&self) {
161
0
        let lock_count = self.lock_count.get() - 1;
162
0
        self.lock_count.set(lock_count);
163
0
        if lock_count == 0 {
164
0
            self.owner.store(0, Ordering::Relaxed);
165
0
            self.mutex.unlock_fair();
166
0
        }
167
0
    }
168
169
    /// Temporarily yields the mutex to a waiting thread if there is one.
170
    ///
171
    /// This method is functionally equivalent to calling `unlock_fair` followed
172
    /// by `lock`, however it can be much more efficient in the case where there
173
    /// are no waiting threads.
174
    ///
175
    /// # Safety
176
    ///
177
    /// This method may only be called if the mutex is held by the current thread.
178
    #[inline]
179
0
    pub unsafe fn bump(&self) {
180
0
        if self.lock_count.get() == 1 {
181
0
            let id = self.owner.load(Ordering::Relaxed);
182
0
            self.owner.store(0, Ordering::Relaxed);
183
0
            self.lock_count.set(0);
184
0
            self.mutex.bump();
185
0
            self.owner.store(id, Ordering::Relaxed);
186
0
            self.lock_count.set(1);
187
0
        }
188
0
    }
189
}
190
191
impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
192
    /// Attempts to acquire this lock until a timeout is reached.
193
    #[inline]
194
0
    pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
195
0
        self.lock_internal(|| self.mutex.try_lock_until(timeout))
196
0
    }
197
198
    /// Attempts to acquire this lock until a timeout is reached.
199
    #[inline]
200
0
    pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
201
0
        self.lock_internal(|| self.mutex.try_lock_for(timeout))
202
0
    }
203
}
204
205
/// A mutex which can be recursively locked by a single thread.
206
///
207
/// This type is identical to `Mutex` except for the following points:
208
///
209
/// - Locking multiple times from the same thread will work correctly instead of
210
///   deadlocking.
211
/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
212
///   Use a `RefCell` if you need this.
213
///
214
/// See [`Mutex`](crate::Mutex) for more details about the underlying mutex
215
/// primitive.
216
pub struct ReentrantMutex<R, G, T: ?Sized> {
217
    raw: RawReentrantMutex<R, G>,
218
    data: UnsafeCell<T>,
219
}
220
221
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
222
    for ReentrantMutex<R, G, T>
223
{
224
}
225
unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
226
    for ReentrantMutex<R, G, T>
227
{
228
}
229
230
impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
231
    /// Creates a new reentrant mutex in an unlocked state ready for use.
232
    #[inline]
233
0
    pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
234
0
        ReentrantMutex {
235
0
            data: UnsafeCell::new(val),
236
0
            raw: RawReentrantMutex {
237
0
                owner: AtomicUsize::new(0),
238
0
                lock_count: Cell::new(0),
239
0
                mutex: R::INIT,
240
0
                get_thread_id: G::INIT,
241
0
            },
242
0
        }
243
0
    }
244
245
    /// Consumes this mutex, returning the underlying data.
246
    #[inline]
247
0
    pub fn into_inner(self) -> T {
248
0
        self.data.into_inner()
249
0
    }
250
}
251
252
impl<R, G, T> ReentrantMutex<R, G, T> {
253
    /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
254
    /// helper to get the thread ID.
255
    #[inline]
256
0
    pub const fn from_raw(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
257
0
        ReentrantMutex {
258
0
            data: UnsafeCell::new(val),
259
0
            raw: RawReentrantMutex {
260
0
                owner: AtomicUsize::new(0),
261
0
                lock_count: Cell::new(0),
262
0
                mutex: raw_mutex,
263
0
                get_thread_id,
264
0
            },
265
0
        }
266
0
    }
267
268
    /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
269
    /// helper to get the thread ID.
270
    ///
271
    /// This allows creating a reentrant mutex in a constant context on stable
272
    /// Rust.
273
    ///
274
    /// This method is a legacy alias for [`from_raw`](Self::from_raw).
275
    #[inline]
276
0
    pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
277
0
        Self::from_raw(raw_mutex, get_thread_id, val)
278
0
    }
279
}
280
281
impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
282
    /// Creates a new `ReentrantMutexGuard` without checking if the lock is held.
283
    ///
284
    /// # Safety
285
    ///
286
    /// This method must only be called if the thread logically holds the lock.
287
    ///
288
    /// Calling this function when a guard has already been produced is undefined behaviour unless
289
    /// the guard was forgotten with `mem::forget`.
290
    #[inline]
291
0
    pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> {
292
0
        ReentrantMutexGuard {
293
0
            remutex: &self,
294
0
            marker: PhantomData,
295
0
        }
296
0
    }
297
298
    /// Acquires a reentrant mutex, blocking the current thread until it is able
299
    /// to do so.
300
    ///
301
    /// If the mutex is held by another thread then this function will block the
302
    /// local thread until it is available to acquire the mutex. If the mutex is
303
    /// already held by the current thread then this function will increment the
304
    /// lock reference count and return immediately. Upon returning,
305
    /// the thread is the only thread with the mutex held. An RAII guard is
306
    /// returned to allow scoped unlock of the lock. When the guard goes out of
307
    /// scope, the mutex will be unlocked.
308
    #[inline]
309
    #[track_caller]
310
0
    pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
311
0
        self.raw.lock();
312
        // SAFETY: The lock is held, as required.
313
0
        unsafe { self.make_guard_unchecked() }
314
0
    }
315
316
    /// Attempts to acquire this lock.
317
    ///
318
    /// If the lock could not be acquired at this time, then `None` is returned.
319
    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
320
    /// guard is dropped.
321
    ///
322
    /// This function does not block.
323
    #[inline]
324
    #[track_caller]
325
0
    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
326
0
        if self.raw.try_lock() {
327
            // SAFETY: The lock is held, as required.
328
0
            Some(unsafe { self.make_guard_unchecked() })
329
        } else {
330
0
            None
331
        }
332
0
    }
333
334
    /// Returns a mutable reference to the underlying data.
335
    ///
336
    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
337
    /// take place---the mutable borrow statically guarantees no locks exist.
338
    #[inline]
339
0
    pub fn get_mut(&mut self) -> &mut T {
340
0
        unsafe { &mut *self.data.get() }
341
0
    }
342
343
    /// Checks whether the mutex is currently locked.
344
    #[inline]
345
    #[track_caller]
346
0
    pub fn is_locked(&self) -> bool {
347
0
        self.raw.is_locked()
348
0
    }
349
350
    /// Checks whether the mutex is currently held by the current thread.
351
    #[inline]
352
    #[track_caller]
353
0
    pub fn is_owned_by_current_thread(&self) -> bool {
354
0
        self.raw.is_owned_by_current_thread()
355
0
    }
356
357
    /// Forcibly unlocks the mutex.
358
    ///
359
    /// This is useful when combined with `mem::forget` to hold a lock without
360
    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
361
    /// dealing with FFI.
362
    ///
363
    /// # Safety
364
    ///
365
    /// This method must only be called if the current thread logically owns a
366
    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
367
    /// Behavior is undefined if a mutex is unlocked when not locked.
368
    #[inline]
369
    #[track_caller]
370
0
    pub unsafe fn force_unlock(&self) {
371
0
        self.raw.unlock();
372
0
    }
373
374
    /// Returns the underlying raw mutex object.
375
    ///
376
    /// Note that you will most likely need to import the `RawMutex` trait from
377
    /// `lock_api` to be able to call functions on the raw mutex.
378
    ///
379
    /// # Safety
380
    ///
381
    /// This method is unsafe because it allows unlocking a mutex while
382
    /// still holding a reference to a `ReentrantMutexGuard`.
383
    #[inline]
384
0
    pub unsafe fn raw(&self) -> &R {
385
0
        &self.raw.mutex
386
0
    }
387
388
    /// Returns a raw pointer to the underlying data.
389
    ///
390
    /// This is useful when combined with `mem::forget` to hold a lock without
391
    /// the need to maintain a `ReentrantMutexGuard` object alive, for example
392
    /// when dealing with FFI.
393
    ///
394
    /// # Safety
395
    ///
396
    /// You must ensure that there are no data races when dereferencing the
397
    /// returned pointer, for example if the current thread logically owns a
398
    /// `ReentrantMutexGuard` but that guard has been discarded using
399
    /// `mem::forget`.
400
    #[inline]
401
0
    pub fn data_ptr(&self) -> *mut T {
402
0
        self.data.get()
403
0
    }
404
405
    /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held.
406
    ///
407
    /// # Safety
408
    ///
409
    /// This method must only be called if the thread logically holds the lock.
410
    ///
411
    /// Calling this function when a guard has already been produced is undefined behaviour unless
412
    /// the guard was forgotten with `mem::forget`.
413
    #[cfg(feature = "arc_lock")]
414
    #[inline]
415
    pub unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
416
        ArcReentrantMutexGuard {
417
            remutex: self.clone(),
418
            marker: PhantomData,
419
        }
420
    }
421
422
    /// Acquires a reentrant mutex through an `Arc`.
423
    ///
424
    /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an
425
    /// `Arc` and the resulting mutex guard has no lifetime requirements.
426
    #[cfg(feature = "arc_lock")]
427
    #[inline]
428
    #[track_caller]
429
    pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
430
        self.raw.lock();
431
        // SAFETY: locking guarantee is upheld
432
        unsafe { self.make_arc_guard_unchecked() }
433
    }
434
435
    /// Attempts to acquire a reentrant mutex through an `Arc`.
436
    ///
437
    /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside
438
    /// of an `Arc` and the resulting mutex guard has no lifetime requirements.
439
    #[cfg(feature = "arc_lock")]
440
    #[inline]
441
    #[track_caller]
442
    pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> {
443
        if self.raw.try_lock() {
444
            // SAFETY: locking guarantee is upheld
445
            Some(unsafe { self.make_arc_guard_unchecked() })
446
        } else {
447
            None
448
        }
449
    }
450
}
451
452
impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
453
    /// Forcibly unlocks the mutex using a fair unlock protocol.
454
    ///
455
    /// This is useful when combined with `mem::forget` to hold a lock without
456
    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
457
    /// dealing with FFI.
458
    ///
459
    /// # Safety
460
    ///
461
    /// This method must only be called if the current thread logically owns a
462
    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
463
    /// Behavior is undefined if a mutex is unlocked when not locked.
464
    #[inline]
465
    #[track_caller]
466
0
    pub unsafe fn force_unlock_fair(&self) {
467
0
        self.raw.unlock_fair();
468
0
    }
469
}
470
471
impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
472
    /// Attempts to acquire this lock until a timeout is reached.
473
    ///
474
    /// If the lock could not be acquired before the timeout expired, then
475
    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
476
    /// be unlocked when the guard is dropped.
477
    #[inline]
478
    #[track_caller]
479
0
    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
480
0
        if self.raw.try_lock_for(timeout) {
481
            // SAFETY: The lock is held, as required.
482
0
            Some(unsafe { self.make_guard_unchecked() })
483
        } else {
484
0
            None
485
        }
486
0
    }
487
488
    /// Attempts to acquire this lock until a timeout is reached.
489
    ///
490
    /// If the lock could not be acquired before the timeout expired, then
491
    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
492
    /// be unlocked when the guard is dropped.
493
    #[inline]
494
    #[track_caller]
495
0
    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
496
0
        if self.raw.try_lock_until(timeout) {
497
            // SAFETY: The lock is held, as required.
498
0
            Some(unsafe { self.make_guard_unchecked() })
499
        } else {
500
0
            None
501
        }
502
0
    }
503
504
    /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
505
    ///
506
    /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be
507
    /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
508
    #[cfg(feature = "arc_lock")]
509
    #[inline]
510
    #[track_caller]
511
    pub fn try_lock_arc_for(
512
        self: &Arc<Self>,
513
        timeout: R::Duration,
514
    ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
515
        if self.raw.try_lock_for(timeout) {
516
            // SAFETY: locking guarantee is upheld
517
            Some(unsafe { self.make_arc_guard_unchecked() })
518
        } else {
519
            None
520
        }
521
    }
522
523
    /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
524
    ///
525
    /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be
526
    /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
527
    #[cfg(feature = "arc_lock")]
528
    #[inline]
529
    #[track_caller]
530
    pub fn try_lock_arc_until(
531
        self: &Arc<Self>,
532
        timeout: R::Instant,
533
    ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
534
        if self.raw.try_lock_until(timeout) {
535
            // SAFETY: locking guarantee is upheld
536
            Some(unsafe { self.make_arc_guard_unchecked() })
537
        } else {
538
            None
539
        }
540
    }
541
}
542
543
impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
544
    #[inline]
545
0
    fn default() -> ReentrantMutex<R, G, T> {
546
0
        ReentrantMutex::new(Default::default())
547
0
    }
548
}
549
550
impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
551
    #[inline]
552
0
    fn from(t: T) -> ReentrantMutex<R, G, T> {
553
0
        ReentrantMutex::new(t)
554
0
    }
555
}
556
557
impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
558
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
559
0
        match self.try_lock() {
560
0
            Some(guard) => f
561
0
                .debug_struct("ReentrantMutex")
562
0
                .field("data", &&*guard)
563
0
                .finish(),
564
            None => {
565
                struct LockedPlaceholder;
566
                impl fmt::Debug for LockedPlaceholder {
567
0
                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
568
0
                        f.write_str("<locked>")
569
0
                    }
570
                }
571
572
0
                f.debug_struct("ReentrantMutex")
573
0
                    .field("data", &LockedPlaceholder)
574
0
                    .finish()
575
            }
576
        }
577
0
    }
578
}
579
580
// Copied and modified from serde
581
#[cfg(feature = "serde")]
582
impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
583
where
584
    R: RawMutex,
585
    G: GetThreadId,
586
    T: Serialize + ?Sized,
587
{
588
    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
589
    where
590
        S: Serializer,
591
    {
592
        self.lock().serialize(serializer)
593
    }
594
}
595
596
#[cfg(feature = "serde")]
597
impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
598
where
599
    R: RawMutex,
600
    G: GetThreadId,
601
    T: Deserialize<'de> + ?Sized,
602
{
603
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
604
    where
605
        D: Deserializer<'de>,
606
    {
607
        Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
608
    }
609
}
610
611
/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
612
/// is dropped (falls out of scope), the lock will be unlocked.
613
///
614
/// The data protected by the mutex can be accessed through this guard via its
615
/// `Deref` implementation.
616
#[clippy::has_significant_drop]
617
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
618
pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
619
    remutex: &'a ReentrantMutex<R, G, T>,
620
    marker: PhantomData<(&'a T, GuardNoSend)>,
621
}
622
623
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
624
    for ReentrantMutexGuard<'a, R, G, T>
625
{
626
}
627
628
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
629
    /// Returns a reference to the original `ReentrantMutex` object.
630
0
    pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
631
0
        s.remutex
632
0
    }
633
634
    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
635
    ///
636
    /// This operation cannot fail as the `ReentrantMutexGuard` passed
637
    /// in already locked the mutex.
638
    ///
639
    /// This is an associated function that needs to be
640
    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
641
    /// the same name on the contents of the locked data.
642
    #[inline]
643
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
644
0
    where
645
0
        F: FnOnce(&T) -> &U,
646
    {
647
0
        let raw = &s.remutex.raw;
648
0
        let data = f(unsafe { &*s.remutex.data.get() });
649
0
        mem::forget(s);
650
0
        MappedReentrantMutexGuard {
651
0
            raw,
652
0
            data,
653
0
            marker: PhantomData,
654
0
        }
655
0
    }
656
657
    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
658
    /// locked data. The original guard is return if the closure returns `None`.
659
    ///
660
    /// This operation cannot fail as the `ReentrantMutexGuard` passed
661
    /// in already locked the mutex.
662
    ///
663
    /// This is an associated function that needs to be
664
    /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
665
    /// the same name on the contents of the locked data.
666
    #[inline]
667
0
    pub fn try_map<U: ?Sized, F>(
668
0
        s: Self,
669
0
        f: F,
670
0
    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
671
0
    where
672
0
        F: FnOnce(&T) -> Option<&U>,
673
    {
674
0
        let raw = &s.remutex.raw;
675
0
        let data = match f(unsafe { &*s.remutex.data.get() }) {
676
0
            Some(data) => data,
677
0
            None => return Err(s),
678
        };
679
0
        mem::forget(s);
680
0
        Ok(MappedReentrantMutexGuard {
681
0
            raw,
682
0
            data,
683
0
            marker: PhantomData,
684
0
        })
685
0
    }
686
687
    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
688
    /// locked data. The original guard is returned alongside arbitrary user data
689
    /// if the closure returns `Err`.
690
    ///
691
    /// This operation cannot fail as the `ReentrantMutexGuard` passed
692
    /// in already locked the mutex.
693
    ///
694
    /// This is an associated function that needs to be
695
    /// used as `ReentrantMutexGuard::try_map_or_err(...)`. A method would interfere with methods of
696
    /// the same name on the contents of the locked data.
697
    #[inline]
698
0
    pub fn try_map_or_err<U: ?Sized, F, E>(
699
0
        s: Self,
700
0
        f: F,
701
0
    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, (Self, E)>
702
0
    where
703
0
        F: FnOnce(&T) -> Result<&U, E>,
704
    {
705
0
        let raw = &s.remutex.raw;
706
0
        let data = match f(unsafe { &*s.remutex.data.get() }) {
707
0
            Ok(data) => data,
708
0
            Err(e) => return Err((s, e)),
709
        };
710
0
        mem::forget(s);
711
0
        Ok(MappedReentrantMutexGuard {
712
0
            raw,
713
0
            data,
714
0
            marker: PhantomData,
715
0
        })
716
0
    }
717
718
    /// Temporarily unlocks the mutex to execute the given function.
719
    ///
720
    /// This is safe because `&mut` guarantees that there exist no other
721
    /// references to the data protected by the mutex.
722
    #[inline]
723
    #[track_caller]
724
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
725
0
    where
726
0
        F: FnOnce() -> U,
727
    {
728
        // Safety: A ReentrantMutexGuard always holds the lock.
729
0
        unsafe {
730
0
            s.remutex.raw.unlock();
731
0
        }
732
0
        defer!(s.remutex.raw.lock());
733
0
        f()
734
0
    }
735
}
736
737
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
738
    ReentrantMutexGuard<'a, R, G, T>
739
{
740
    /// Unlocks the mutex using a fair unlock protocol.
741
    ///
742
    /// By default, mutexes are unfair and allow the current thread to re-lock
743
    /// the mutex before another has the chance to acquire the lock, even if
744
    /// that thread has been blocked on the mutex for a long time. This is the
745
    /// default because it allows much higher throughput as it avoids forcing a
746
    /// context switch on every mutex unlock. This can result in one thread
747
    /// acquiring a mutex many more times than other threads.
748
    ///
749
    /// However in some cases it can be beneficial to ensure fairness by forcing
750
    /// the lock to pass on to a waiting thread if there is one. This is done by
751
    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
752
    #[inline]
753
    #[track_caller]
754
0
    pub fn unlock_fair(s: Self) {
755
        // Safety: A ReentrantMutexGuard always holds the lock
756
0
        unsafe {
757
0
            s.remutex.raw.unlock_fair();
758
0
        }
759
0
        mem::forget(s);
760
0
    }
761
762
    /// Temporarily unlocks the mutex to execute the given function.
763
    ///
764
    /// The mutex is unlocked a fair unlock protocol.
765
    ///
766
    /// This is safe because `&mut` guarantees that there exist no other
767
    /// references to the data protected by the mutex.
768
    #[inline]
769
    #[track_caller]
770
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
771
0
    where
772
0
        F: FnOnce() -> U,
773
    {
774
        // Safety: A ReentrantMutexGuard always holds the lock
775
0
        unsafe {
776
0
            s.remutex.raw.unlock_fair();
777
0
        }
778
0
        defer!(s.remutex.raw.lock());
779
0
        f()
780
0
    }
781
782
    /// Temporarily yields the mutex to a waiting thread if there is one.
783
    ///
784
    /// This method is functionally equivalent to calling `unlock_fair` followed
785
    /// by `lock`, however it can be much more efficient in the case where there
786
    /// are no waiting threads.
787
    #[inline]
788
    #[track_caller]
789
0
    pub fn bump(s: &mut Self) {
790
        // Safety: A ReentrantMutexGuard always holds the lock
791
0
        unsafe {
792
0
            s.remutex.raw.bump();
793
0
        }
794
0
    }
795
}
796
797
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
798
    for ReentrantMutexGuard<'a, R, G, T>
799
{
800
    type Target = T;
801
    #[inline]
802
0
    fn deref(&self) -> &T {
803
0
        unsafe { &*self.remutex.data.get() }
804
0
    }
805
}
806
807
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
808
    for ReentrantMutexGuard<'a, R, G, T>
809
{
810
    #[inline]
811
0
    fn drop(&mut self) {
812
        // Safety: A ReentrantMutexGuard always holds the lock.
813
0
        unsafe {
814
0
            self.remutex.raw.unlock();
815
0
        }
816
0
    }
817
}
818
819
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
820
    for ReentrantMutexGuard<'a, R, G, T>
821
{
822
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
823
0
        fmt::Debug::fmt(&**self, f)
824
0
    }
825
}
826
827
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
828
    for ReentrantMutexGuard<'a, R, G, T>
829
{
830
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
831
0
        (**self).fmt(f)
832
0
    }
833
}
834
835
#[cfg(feature = "owning_ref")]
836
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
837
    for ReentrantMutexGuard<'a, R, G, T>
838
{
839
}
840
841
/// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`.
842
///
843
/// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the
844
/// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static`
845
/// lifetime.
846
#[cfg(feature = "arc_lock")]
847
#[clippy::has_significant_drop]
848
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
849
pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> {
850
    remutex: Arc<ReentrantMutex<R, G, T>>,
851
    marker: PhantomData<GuardNoSend>,
852
}
853
854
#[cfg(feature = "arc_lock")]
855
impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
856
    /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`.
857
    pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> {
858
        &s.remutex
859
    }
860
861
    /// Unlocks the mutex and returns the `Arc` that was held by the [`ArcReentrantMutexGuard`].
862
    #[inline]
863
    pub fn into_arc(s: Self) -> Arc<ReentrantMutex<R, G, T>> {
864
        // SAFETY: Skip our Drop impl and manually unlock the mutex.
865
        let s = ManuallyDrop::new(s);
866
        unsafe {
867
            s.remutex.raw.unlock();
868
            ptr::read(&s.remutex)
869
        }
870
    }
871
872
    /// Temporarily unlocks the mutex to execute the given function.
873
    ///
874
    /// This is safe because `&mut` guarantees that there exist no other
875
    /// references to the data protected by the mutex.
876
    #[inline]
877
    #[track_caller]
878
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
879
    where
880
        F: FnOnce() -> U,
881
    {
882
        // Safety: A ReentrantMutexGuard always holds the lock.
883
        unsafe {
884
            s.remutex.raw.unlock();
885
        }
886
        defer!(s.remutex.raw.lock());
887
        f()
888
    }
889
}
890
891
#[cfg(feature = "arc_lock")]
892
impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
893
    /// Unlocks the mutex using a fair unlock protocol.
894
    ///
895
    /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`].
896
    #[inline]
897
    #[track_caller]
898
    pub fn unlock_fair(s: Self) {
899
        drop(Self::into_arc_fair(s));
900
    }
901
902
    /// Unlocks the mutex using a fair unlock protocol and returns the `Arc` that was held by the [`ArcReentrantMutexGuard`].
903
    #[inline]
904
    pub fn into_arc_fair(s: Self) -> Arc<ReentrantMutex<R, G, T>> {
905
        // SAFETY: Skip our Drop impl and manually unlock the mutex.
906
        let s = ManuallyDrop::new(s);
907
        unsafe {
908
            s.remutex.raw.unlock_fair();
909
            ptr::read(&s.remutex)
910
        }
911
    }
912
913
    /// Temporarily unlocks the mutex to execute the given function.
914
    ///
915
    /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`].
916
    #[inline]
917
    #[track_caller]
918
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
919
    where
920
        F: FnOnce() -> U,
921
    {
922
        // Safety: A ReentrantMutexGuard always holds the lock
923
        unsafe {
924
            s.remutex.raw.unlock_fair();
925
        }
926
        defer!(s.remutex.raw.lock());
927
        f()
928
    }
929
930
    /// Temporarily yields the mutex to a waiting thread if there is one.
931
    ///
932
    /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`].
933
    #[inline]
934
    #[track_caller]
935
    pub fn bump(s: &mut Self) {
936
        // Safety: A ReentrantMutexGuard always holds the lock
937
        unsafe {
938
            s.remutex.raw.bump();
939
        }
940
    }
941
}
942
943
#[cfg(feature = "arc_lock")]
944
impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> {
945
    type Target = T;
946
    #[inline]
947
    fn deref(&self) -> &T {
948
        unsafe { &*self.remutex.data.get() }
949
    }
950
}
951
952
#[cfg(feature = "arc_lock")]
953
impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> {
954
    #[inline]
955
    fn drop(&mut self) {
956
        // Safety: A ReentrantMutexGuard always holds the lock.
957
        unsafe {
958
            self.remutex.raw.unlock();
959
        }
960
    }
961
}
962
963
/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
964
/// subfield of the protected data.
965
///
966
/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
967
/// former doesn't support temporarily unlocking and re-locking, since that
968
/// could introduce soundness issues if the locked object is modified by another
969
/// thread.
970
#[clippy::has_significant_drop]
971
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
972
pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
973
    raw: &'a RawReentrantMutex<R, G>,
974
    data: *const T,
975
    marker: PhantomData<&'a T>,
976
}
977
978
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
979
    for MappedReentrantMutexGuard<'a, R, G, T>
980
{
981
}
982
983
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
984
    MappedReentrantMutexGuard<'a, R, G, T>
985
{
986
    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
987
    ///
988
    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
989
    /// in already locked the mutex.
990
    ///
991
    /// This is an associated function that needs to be
992
    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
993
    /// the same name on the contents of the locked data.
994
    #[inline]
995
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
996
0
    where
997
0
        F: FnOnce(&T) -> &U,
998
    {
999
0
        let raw = s.raw;
1000
0
        let data = f(unsafe { &*s.data });
1001
0
        mem::forget(s);
1002
0
        MappedReentrantMutexGuard {
1003
0
            raw,
1004
0
            data,
1005
0
            marker: PhantomData,
1006
0
        }
1007
0
    }
1008
1009
    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
1010
    /// locked data. The original guard is return if the closure returns `None`.
1011
    ///
1012
    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
1013
    /// in already locked the mutex.
1014
    ///
1015
    /// This is an associated function that needs to be
1016
    /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
1017
    /// the same name on the contents of the locked data.
1018
    #[inline]
1019
0
    pub fn try_map<U: ?Sized, F>(
1020
0
        s: Self,
1021
0
        f: F,
1022
0
    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
1023
0
    where
1024
0
        F: FnOnce(&T) -> Option<&U>,
1025
    {
1026
0
        let raw = s.raw;
1027
0
        let data = match f(unsafe { &*s.data }) {
1028
0
            Some(data) => data,
1029
0
            None => return Err(s),
1030
        };
1031
0
        mem::forget(s);
1032
0
        Ok(MappedReentrantMutexGuard {
1033
0
            raw,
1034
0
            data,
1035
0
            marker: PhantomData,
1036
0
        })
1037
0
    }
1038
1039
    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
1040
    /// locked data. The original guard is returned alongside arbitrary user data
1041
    /// if the closure returns `Err`.
1042
    ///
1043
    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
1044
    /// in already locked the mutex.
1045
    ///
1046
    /// This is an associated function that needs to be
1047
    /// used as `MappedReentrantMutexGuard::try_map_or_err(...)`. A method would interfere with methods of
1048
    /// the same name on the contents of the locked data.
1049
    #[inline]
1050
0
    pub fn try_map_or_err<U: ?Sized, F, E>(
1051
0
        s: Self,
1052
0
        f: F,
1053
0
    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, (Self, E)>
1054
0
    where
1055
0
        F: FnOnce(&T) -> Result<&U, E>,
1056
    {
1057
0
        let raw = s.raw;
1058
0
        let data = match f(unsafe { &*s.data }) {
1059
0
            Ok(data) => data,
1060
0
            Err(e) => return Err((s, e)),
1061
        };
1062
0
        mem::forget(s);
1063
0
        Ok(MappedReentrantMutexGuard {
1064
0
            raw,
1065
0
            data,
1066
0
            marker: PhantomData,
1067
0
        })
1068
0
    }
1069
}
1070
1071
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
1072
    MappedReentrantMutexGuard<'a, R, G, T>
1073
{
1074
    /// Unlocks the mutex using a fair unlock protocol.
1075
    ///
1076
    /// By default, mutexes are unfair and allow the current thread to re-lock
1077
    /// the mutex before another has the chance to acquire the lock, even if
1078
    /// that thread has been blocked on the mutex for a long time. This is the
1079
    /// default because it allows much higher throughput as it avoids forcing a
1080
    /// context switch on every mutex unlock. This can result in one thread
1081
    /// acquiring a mutex many more times than other threads.
1082
    ///
1083
    /// However in some cases it can be beneficial to ensure fairness by forcing
1084
    /// the lock to pass on to a waiting thread if there is one. This is done by
1085
    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
1086
    #[inline]
1087
    #[track_caller]
1088
0
    pub fn unlock_fair(s: Self) {
1089
        // Safety: A MappedReentrantMutexGuard always holds the lock
1090
0
        unsafe {
1091
0
            s.raw.unlock_fair();
1092
0
        }
1093
0
        mem::forget(s);
1094
0
    }
1095
}
1096
1097
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
1098
    for MappedReentrantMutexGuard<'a, R, G, T>
1099
{
1100
    type Target = T;
1101
    #[inline]
1102
0
    fn deref(&self) -> &T {
1103
0
        unsafe { &*self.data }
1104
0
    }
1105
}
1106
1107
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
1108
    for MappedReentrantMutexGuard<'a, R, G, T>
1109
{
1110
    #[inline]
1111
0
    fn drop(&mut self) {
1112
        // Safety: A MappedReentrantMutexGuard always holds the lock.
1113
0
        unsafe {
1114
0
            self.raw.unlock();
1115
0
        }
1116
0
    }
1117
}
1118
1119
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1120
    for MappedReentrantMutexGuard<'a, R, G, T>
1121
{
1122
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1123
0
        fmt::Debug::fmt(&**self, f)
1124
0
    }
1125
}
1126
1127
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1128
    for MappedReentrantMutexGuard<'a, R, G, T>
1129
{
1130
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1131
0
        (**self).fmt(f)
1132
0
    }
1133
}
1134
1135
#[cfg(feature = "owning_ref")]
1136
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
1137
    for MappedReentrantMutexGuard<'a, R, G, T>
1138
{
1139
}