Coverage Report

Created: 2025-10-10 07:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/lock_api-0.4.14/src/rwlock.rs
Line
Count
Source
1
// Copyright 2016 Amanieu d'Antras
2
//
3
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
// copied, modified, or distributed except according to those terms.
7
8
use core::cell::UnsafeCell;
9
use core::fmt;
10
use core::marker::PhantomData;
11
use core::mem;
12
use core::ops::{Deref, DerefMut};
13
14
#[cfg(feature = "arc_lock")]
15
use alloc::sync::Arc;
16
#[cfg(feature = "arc_lock")]
17
use core::mem::ManuallyDrop;
18
#[cfg(feature = "arc_lock")]
19
use core::ptr;
20
21
#[cfg(feature = "owning_ref")]
22
use owning_ref::StableAddress;
23
24
#[cfg(feature = "serde")]
25
use serde::{Deserialize, Deserializer, Serialize, Serializer};
26
27
/// Basic operations for a reader-writer lock.
28
///
29
/// Types implementing this trait can be used by `RwLock` to form a safe and
30
/// fully-functioning `RwLock` type.
31
///
32
/// # Safety
33
///
34
/// Implementations of this trait must ensure that the `RwLock` is actually
35
/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
36
/// lock exists, and a shared lock can't be acquire while an exclusive lock
37
/// exists.
38
pub unsafe trait RawRwLock {
39
    /// Initial value for an unlocked `RwLock`.
40
    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
41
    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
42
    #[allow(clippy::declare_interior_mutable_const)]
43
    const INIT: Self;
44
45
    /// Marker type which determines whether a lock guard should be `Send`. Use
46
    /// one of the `GuardSend` or `GuardNoSend` helper types here.
47
    type GuardMarker;
48
49
    /// Acquires a shared lock, blocking the current thread until it is able to do so.
50
    fn lock_shared(&self);
51
52
    /// Attempts to acquire a shared lock without blocking.
53
    fn try_lock_shared(&self) -> bool;
54
55
    /// Releases a shared lock.
56
    ///
57
    /// # Safety
58
    ///
59
    /// This method may only be called if a shared lock is held in the current context.
60
    unsafe fn unlock_shared(&self);
61
62
    /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
63
    fn lock_exclusive(&self);
64
65
    /// Attempts to acquire an exclusive lock without blocking.
66
    fn try_lock_exclusive(&self) -> bool;
67
68
    /// Releases an exclusive lock.
69
    ///
70
    /// # Safety
71
    ///
72
    /// This method may only be called if an exclusive lock is held in the current context.
73
    unsafe fn unlock_exclusive(&self);
74
75
    /// Checks if this `RwLock` is currently locked in any way.
76
    #[inline]
77
0
    fn is_locked(&self) -> bool {
78
0
        let acquired_lock = self.try_lock_exclusive();
79
0
        if acquired_lock {
80
            // Safety: A lock was successfully acquired above.
81
0
            unsafe {
82
0
                self.unlock_exclusive();
83
0
            }
84
0
        }
85
0
        !acquired_lock
86
0
    }
87
88
    /// Check if this `RwLock` is currently exclusively locked.
89
0
    fn is_locked_exclusive(&self) -> bool {
90
0
        let acquired_lock = self.try_lock_shared();
91
0
        if acquired_lock {
92
            // Safety: A shared lock was successfully acquired above.
93
0
            unsafe {
94
0
                self.unlock_shared();
95
0
            }
96
0
        }
97
0
        !acquired_lock
98
0
    }
99
}
100
101
/// Additional methods for `RwLock`s which support fair unlocking.
102
///
103
/// Fair unlocking means that a lock is handed directly over to the next waiting
104
/// thread if there is one, without giving other threads the opportunity to
105
/// "steal" the lock in the meantime. This is typically slower than unfair
106
/// unlocking, but may be necessary in certain circumstances.
107
pub unsafe trait RawRwLockFair: RawRwLock {
108
    /// Releases a shared lock using a fair unlock protocol.
109
    ///
110
    /// # Safety
111
    ///
112
    /// This method may only be called if a shared lock is held in the current context.
113
    unsafe fn unlock_shared_fair(&self);
114
115
    /// Releases an exclusive lock using a fair unlock protocol.
116
    ///
117
    /// # Safety
118
    ///
119
    /// This method may only be called if an exclusive lock is held in the current context.
120
    unsafe fn unlock_exclusive_fair(&self);
121
122
    /// Temporarily yields a shared lock to a waiting thread if there is one.
123
    ///
124
    /// This method is functionally equivalent to calling `unlock_shared_fair` followed
125
    /// by `lock_shared`, however it can be much more efficient in the case where there
126
    /// are no waiting threads.
127
    ///
128
    /// # Safety
129
    ///
130
    /// This method may only be called if a shared lock is held in the current context.
131
0
    unsafe fn bump_shared(&self) {
132
0
        self.unlock_shared_fair();
133
0
        self.lock_shared();
134
0
    }
135
136
    /// Temporarily yields an exclusive lock to a waiting thread if there is one.
137
    ///
138
    /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
139
    /// by `lock_exclusive`, however it can be much more efficient in the case where there
140
    /// are no waiting threads.
141
    ///
142
    /// # Safety
143
    ///
144
    /// This method may only be called if an exclusive lock is held in the current context.
145
0
    unsafe fn bump_exclusive(&self) {
146
0
        self.unlock_exclusive_fair();
147
0
        self.lock_exclusive();
148
0
    }
149
}
150
151
/// Additional methods for `RwLock`s which support atomically downgrading an
152
/// exclusive lock to a shared lock.
153
pub unsafe trait RawRwLockDowngrade: RawRwLock {
154
    /// Atomically downgrades an exclusive lock into a shared lock without
155
    /// allowing any thread to take an exclusive lock in the meantime.
156
    ///
157
    /// # Safety
158
    ///
159
    /// This method may only be called if an exclusive lock is held in the current context.
160
    unsafe fn downgrade(&self);
161
}
162
163
/// Additional methods for `RwLock`s which support locking with timeouts.
164
///
165
/// The `Duration` and `Instant` types are specified as associated types so that
166
/// this trait is usable even in `no_std` environments.
167
pub unsafe trait RawRwLockTimed: RawRwLock {
168
    /// Duration type used for `try_lock_for`.
169
    type Duration;
170
171
    /// Instant type used for `try_lock_until`.
172
    type Instant;
173
174
    /// Attempts to acquire a shared lock until a timeout is reached.
175
    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
176
177
    /// Attempts to acquire a shared lock until a timeout is reached.
178
    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
179
180
    /// Attempts to acquire an exclusive lock until a timeout is reached.
181
    fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
182
183
    /// Attempts to acquire an exclusive lock until a timeout is reached.
184
    fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
185
}
186
187
/// Additional methods for `RwLock`s which support recursive read locks.
188
///
189
/// These are guaranteed to succeed without blocking if
190
/// another read lock is held at the time of the call. This allows a thread
191
/// to recursively lock a `RwLock`. However using this method can cause
192
/// writers to starve since readers no longer block if a writer is waiting
193
/// for the lock.
194
pub unsafe trait RawRwLockRecursive: RawRwLock {
195
    /// Acquires a shared lock without deadlocking in case of a recursive lock.
196
    fn lock_shared_recursive(&self);
197
198
    /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
199
    fn try_lock_shared_recursive(&self) -> bool;
200
}
201
202
/// Additional methods for `RwLock`s which support recursive read locks and timeouts.
203
pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
204
    /// Attempts to acquire a shared lock until a timeout is reached, without
205
    /// deadlocking in case of a recursive lock.
206
    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
207
208
    /// Attempts to acquire a shared lock until a timeout is reached, without
209
    /// deadlocking in case of a recursive lock.
210
    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
211
}
212
213
/// Additional methods for `RwLock`s which support atomically upgrading a shared
214
/// lock to an exclusive lock.
215
///
216
/// This requires acquiring a special "upgradable read lock" instead of a
217
/// normal shared lock. There may only be one upgradable lock at any time,
218
/// otherwise deadlocks could occur when upgrading.
219
pub unsafe trait RawRwLockUpgrade: RawRwLock {
220
    /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
221
    fn lock_upgradable(&self);
222
223
    /// Attempts to acquire an upgradable lock without blocking.
224
    fn try_lock_upgradable(&self) -> bool;
225
226
    /// Releases an upgradable lock.
227
    ///
228
    /// # Safety
229
    ///
230
    /// This method may only be called if an upgradable lock is held in the current context.
231
    unsafe fn unlock_upgradable(&self);
232
233
    /// Upgrades an upgradable lock to an exclusive lock.
234
    ///
235
    /// # Safety
236
    ///
237
    /// This method may only be called if an upgradable lock is held in the current context.
238
    unsafe fn upgrade(&self);
239
240
    /// Attempts to upgrade an upgradable lock to an exclusive lock without
241
    /// blocking.
242
    ///
243
    /// # Safety
244
    ///
245
    /// This method may only be called if an upgradable lock is held in the current context.
246
    unsafe fn try_upgrade(&self) -> bool;
247
}
248
249
/// Additional methods for `RwLock`s which support upgradable locks and fair
250
/// unlocking.
251
pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
252
    /// Releases an upgradable lock using a fair unlock protocol.
253
    ///
254
    /// # Safety
255
    ///
256
    /// This method may only be called if an upgradable lock is held in the current context.
257
    unsafe fn unlock_upgradable_fair(&self);
258
259
    /// Temporarily yields an upgradable lock to a waiting thread if there is one.
260
    ///
261
    /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
262
    /// by `lock_upgradable`, however it can be much more efficient in the case where there
263
    /// are no waiting threads.
264
    ///
265
    /// # Safety
266
    ///
267
    /// This method may only be called if an upgradable lock is held in the current context.
268
0
    unsafe fn bump_upgradable(&self) {
269
0
        self.unlock_upgradable_fair();
270
0
        self.lock_upgradable();
271
0
    }
272
}
273
274
/// Additional methods for `RwLock`s which support upgradable locks and lock
275
/// downgrading.
276
pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
277
    /// Downgrades an upgradable lock to a shared lock.
278
    ///
279
    /// # Safety
280
    ///
281
    /// This method may only be called if an upgradable lock is held in the current context.
282
    unsafe fn downgrade_upgradable(&self);
283
284
    /// Downgrades an exclusive lock to an upgradable lock.
285
    ///
286
    /// # Safety
287
    ///
288
    /// This method may only be called if an exclusive lock is held in the current context.
289
    unsafe fn downgrade_to_upgradable(&self);
290
}
291
292
/// Additional methods for `RwLock`s which support upgradable locks and locking
293
/// with timeouts.
294
pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
295
    /// Attempts to acquire an upgradable lock until a timeout is reached.
296
    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
297
298
    /// Attempts to acquire an upgradable lock until a timeout is reached.
299
    fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
300
301
    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
302
    /// timeout is reached.
303
    ///
304
    /// # Safety
305
    ///
306
    /// This method may only be called if an upgradable lock is held in the current context.
307
    unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
308
309
    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
310
    /// timeout is reached.
311
    ///
312
    /// # Safety
313
    ///
314
    /// This method may only be called if an upgradable lock is held in the current context.
315
    unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
316
}
317
318
/// A reader-writer lock
319
///
320
/// This type of lock allows a number of readers or at most one writer at any
321
/// point in time. The write portion of this lock typically allows modification
322
/// of the underlying data (exclusive access) and the read portion of this lock
323
/// typically allows for read-only access (shared access).
324
///
325
/// The type parameter `T` represents the data that this lock protects. It is
326
/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
327
/// allow concurrent access through readers. The RAII guards returned from the
328
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
329
/// to allow access to the contained of the lock.
330
pub struct RwLock<R, T: ?Sized> {
331
    raw: R,
332
    data: UnsafeCell<T>,
333
}
334
335
// Copied and modified from serde
336
#[cfg(feature = "serde")]
337
impl<R, T> Serialize for RwLock<R, T>
338
where
339
    R: RawRwLock,
340
    T: Serialize + ?Sized,
341
{
342
    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
343
    where
344
        S: Serializer,
345
    {
346
        self.read().serialize(serializer)
347
    }
348
}
349
350
#[cfg(feature = "serde")]
351
impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
352
where
353
    R: RawRwLock,
354
    T: Deserialize<'de> + ?Sized,
355
{
356
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
357
    where
358
        D: Deserializer<'de>,
359
    {
360
        Deserialize::deserialize(deserializer).map(RwLock::new)
361
    }
362
}
363
364
unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
365
unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
366
367
impl<R: RawRwLock, T> RwLock<R, T> {
368
    /// Creates a new instance of an `RwLock<T>` which is unlocked.
369
    #[inline]
370
130
    pub const fn new(val: T) -> RwLock<R, T> {
371
130
        RwLock {
372
130
            data: UnsafeCell::new(val),
373
130
            raw: R::INIT,
374
130
        }
375
130
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, usize>>::new
<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::new
Line
Count
Source
370
130
    pub const fn new(val: T) -> RwLock<R, T> {
371
130
        RwLock {
372
130
            data: UnsafeCell::new(val),
373
130
            raw: R::INIT,
374
130
        }
375
130
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::new
376
377
    /// Consumes this `RwLock`, returning the underlying data.
378
    #[inline]
379
    #[allow(unused_unsafe)]
380
0
    pub fn into_inner(self) -> T {
381
0
        unsafe { self.data.into_inner() }
382
0
    }
383
}
384
385
impl<R, T> RwLock<R, T> {
386
    /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
387
    /// `RawRwLock<T>`.
388
    #[inline]
389
0
    pub const fn from_raw(raw_rwlock: R, val: T) -> RwLock<R, T> {
390
0
        RwLock {
391
0
            data: UnsafeCell::new(val),
392
0
            raw: raw_rwlock,
393
0
        }
394
0
    }
395
396
    /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
397
    /// `RawRwLock<T>`.
398
    ///
399
    /// This allows creating a `RwLock<T>` in a constant context on stable
400
    /// Rust.
401
    ///
402
    /// This method is a legacy alias for [`from_raw`](Self::from_raw).
403
    #[inline]
404
0
    pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
405
0
        Self::from_raw(raw_rwlock, val)
406
0
    }
407
}
408
409
impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
410
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
411
    ///
412
    /// # Safety
413
    ///
414
    /// This method must only be called if the thread logically holds a read lock.
415
    ///
416
    /// This function does not increment the read count of the lock. Calling this function when a
417
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
418
    /// with `mem::forget`.
419
    #[inline]
420
0
    pub unsafe fn make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T> {
421
0
        RwLockReadGuard {
422
0
            rwlock: self,
423
0
            marker: PhantomData,
424
0
        }
425
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::make_read_guard_unchecked
426
427
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
428
    ///
429
    /// # Safety
430
    ///
431
    /// This method must only be called if the thread logically holds a write lock.
432
    ///
433
    /// Calling this function when a guard has already been produced is undefined behaviour unless
434
    /// the guard was forgotten with `mem::forget`.
435
    #[inline]
436
0
    pub unsafe fn make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T> {
437
0
        RwLockWriteGuard {
438
0
            rwlock: self,
439
0
            marker: PhantomData,
440
0
        }
441
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::make_write_guard_unchecked
442
443
    /// Locks this `RwLock` with shared read access, blocking the current thread
444
    /// until it can be acquired.
445
    ///
446
    /// The calling thread will be blocked until there are no more writers which
447
    /// hold the lock. There may be other readers currently inside the lock when
448
    /// this method returns.
449
    ///
450
    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
451
    /// the current thread already holds one may result in a deadlock.
452
    ///
453
    /// Returns an RAII guard which will release this thread's shared access
454
    /// once it is dropped.
455
    #[inline]
456
    #[track_caller]
457
0
    pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
458
0
        self.raw.lock_shared();
459
        // SAFETY: The lock is held, as required.
460
0
        unsafe { self.make_read_guard_unchecked() }
461
0
    }
462
463
    /// Attempts to acquire this `RwLock` with shared read access.
464
    ///
465
    /// If the access could not be granted at this time, then `None` is returned.
466
    /// Otherwise, an RAII guard is returned which will release the shared access
467
    /// when it is dropped.
468
    ///
469
    /// This function does not block.
470
    #[inline]
471
    #[track_caller]
472
0
    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
473
0
        if self.raw.try_lock_shared() {
474
            // SAFETY: The lock is held, as required.
475
0
            Some(unsafe { self.make_read_guard_unchecked() })
476
        } else {
477
0
            None
478
        }
479
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::try_read
480
481
    /// Locks this `RwLock` with exclusive write access, blocking the current
482
    /// thread until it can be acquired.
483
    ///
484
    /// This function will not return while other writers or other readers
485
    /// currently have access to the lock.
486
    ///
487
    /// Returns an RAII guard which will drop the write access of this `RwLock`
488
    /// when dropped.
489
    #[inline]
490
    #[track_caller]
491
0
    pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
492
0
        self.raw.lock_exclusive();
493
        // SAFETY: The lock is held, as required.
494
0
        unsafe { self.make_write_guard_unchecked() }
495
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::write
496
497
    /// Attempts to lock this `RwLock` with exclusive write access.
498
    ///
499
    /// If the lock could not be acquired at this time, then `None` is returned.
500
    /// Otherwise, an RAII guard is returned which will release the lock when
501
    /// it is dropped.
502
    ///
503
    /// This function does not block.
504
    #[inline]
505
    #[track_caller]
506
0
    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
507
0
        if self.raw.try_lock_exclusive() {
508
            // SAFETY: The lock is held, as required.
509
0
            Some(unsafe { self.make_write_guard_unchecked() })
510
        } else {
511
0
            None
512
        }
513
0
    }
514
515
    /// Returns a mutable reference to the underlying data.
516
    ///
517
    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
518
    /// take place---the mutable borrow statically guarantees no locks exist.
519
    #[inline]
520
0
    pub fn get_mut(&mut self) -> &mut T {
521
0
        unsafe { &mut *self.data.get() }
522
0
    }
523
524
    /// Checks whether this `RwLock` is currently locked in any way.
525
    #[inline]
526
    #[track_caller]
527
0
    pub fn is_locked(&self) -> bool {
528
0
        self.raw.is_locked()
529
0
    }
530
531
    /// Check if this `RwLock` is currently exclusively locked.
532
    #[inline]
533
    #[track_caller]
534
0
    pub fn is_locked_exclusive(&self) -> bool {
535
0
        self.raw.is_locked_exclusive()
536
0
    }
537
538
    /// Forcibly unlocks a read lock.
539
    ///
540
    /// This is useful when combined with `mem::forget` to hold a lock without
541
    /// the need to maintain a `RwLockReadGuard` object alive, for example when
542
    /// dealing with FFI.
543
    ///
544
    /// # Safety
545
    ///
546
    /// This method must only be called if the current thread logically owns a
547
    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
548
    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
549
    #[inline]
550
    #[track_caller]
551
0
    pub unsafe fn force_unlock_read(&self) {
552
0
        self.raw.unlock_shared();
553
0
    }
554
555
    /// Forcibly unlocks a write lock.
556
    ///
557
    /// This is useful when combined with `mem::forget` to hold a lock without
558
    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
559
    /// dealing with FFI.
560
    ///
561
    /// # Safety
562
    ///
563
    /// This method must only be called if the current thread logically owns a
564
    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
565
    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
566
    #[inline]
567
    #[track_caller]
568
0
    pub unsafe fn force_unlock_write(&self) {
569
0
        self.raw.unlock_exclusive();
570
0
    }
571
572
    /// Returns the underlying raw reader-writer lock object.
573
    ///
574
    /// Note that you will most likely need to import the `RawRwLock` trait from
575
    /// `lock_api` to be able to call functions on the raw
576
    /// reader-writer lock.
577
    ///
578
    /// # Safety
579
    ///
580
    /// This method is unsafe because it allows unlocking a mutex while
581
    /// still holding a reference to a lock guard.
582
0
    pub unsafe fn raw(&self) -> &R {
583
0
        &self.raw
584
0
    }
585
586
    /// Returns a raw pointer to the underlying data.
587
    ///
588
    /// This is useful when combined with `mem::forget` to hold a lock without
589
    /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
590
    /// alive, for example when dealing with FFI.
591
    ///
592
    /// # Safety
593
    ///
594
    /// You must ensure that there are no data races when dereferencing the
595
    /// returned pointer, for example if the current thread logically owns a
596
    /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
597
    /// using `mem::forget`.
598
    #[inline]
599
0
    pub fn data_ptr(&self) -> *mut T {
600
0
        self.data.get()
601
0
    }
602
603
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
604
    ///
605
    /// # Safety
606
    ///
607
    /// This method must only be called if the thread logically holds a read lock.
608
    ///
609
    /// This function does not increment the read count of the lock. Calling this function when a
610
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
611
    /// with `mem::forget`.`
612
    #[cfg(feature = "arc_lock")]
613
    #[inline]
614
    pub unsafe fn make_arc_read_guard_unchecked(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
615
        ArcRwLockReadGuard {
616
            rwlock: self.clone(),
617
            marker: PhantomData,
618
        }
619
    }
620
621
    /// Creates a new `RwLockWriteGuard` without checking if the lock is held.
622
    ///
623
    /// # Safety
624
    ///
625
    /// This method must only be called if the thread logically holds a write lock.
626
    ///
627
    /// Calling this function when a guard has already been produced is undefined behaviour unless
628
    /// the guard was forgotten with `mem::forget`.
629
    #[cfg(feature = "arc_lock")]
630
    #[inline]
631
    pub unsafe fn make_arc_write_guard_unchecked(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> {
632
        ArcRwLockWriteGuard {
633
            rwlock: self.clone(),
634
            marker: PhantomData,
635
        }
636
    }
637
638
    /// Locks this `RwLock` with read access, through an `Arc`.
639
    ///
640
    /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc`
641
    /// and the resulting read guard has no lifetime requirements.
642
    #[cfg(feature = "arc_lock")]
643
    #[inline]
644
    #[track_caller]
645
    pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
646
        self.raw.lock_shared();
647
        // SAFETY: locking guarantee is upheld
648
        unsafe { self.make_arc_read_guard_unchecked() }
649
    }
650
651
    /// Attempts to lock this `RwLock` with read access, through an `Arc`.
652
    ///
653
    /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an
654
    /// `Arc` and the resulting read guard has no lifetime requirements.
655
    #[cfg(feature = "arc_lock")]
656
    #[inline]
657
    #[track_caller]
658
    pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> {
659
        if self.raw.try_lock_shared() {
660
            // SAFETY: locking guarantee is upheld
661
            Some(unsafe { self.make_arc_read_guard_unchecked() })
662
        } else {
663
            None
664
        }
665
    }
666
667
    /// Locks this `RwLock` with write access, through an `Arc`.
668
    ///
669
    /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc`
670
    /// and the resulting write guard has no lifetime requirements.
671
    #[cfg(feature = "arc_lock")]
672
    #[inline]
673
    #[track_caller]
674
    pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> {
675
        self.raw.lock_exclusive();
676
        // SAFETY: locking guarantee is upheld
677
        unsafe { self.make_arc_write_guard_unchecked() }
678
    }
679
680
    /// Attempts to lock this `RwLock` with writ access, through an `Arc`.
681
    ///
682
    /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an
683
    /// `Arc` and the resulting write guard has no lifetime requirements.
684
    #[cfg(feature = "arc_lock")]
685
    #[inline]
686
    #[track_caller]
687
    pub fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>> {
688
        if self.raw.try_lock_exclusive() {
689
            // SAFETY: locking guarantee is upheld
690
            Some(unsafe { self.make_arc_write_guard_unchecked() })
691
        } else {
692
            None
693
        }
694
    }
695
}
696
697
impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
698
    /// Forcibly unlocks a read lock using a fair unlock protocol.
699
    ///
700
    /// This is useful when combined with `mem::forget` to hold a lock without
701
    /// the need to maintain a `RwLockReadGuard` object alive, for example when
702
    /// dealing with FFI.
703
    ///
704
    /// # Safety
705
    ///
706
    /// This method must only be called if the current thread logically owns a
707
    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
708
    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
709
    #[inline]
710
    #[track_caller]
711
0
    pub unsafe fn force_unlock_read_fair(&self) {
712
0
        self.raw.unlock_shared_fair();
713
0
    }
714
715
    /// Forcibly unlocks a write lock using a fair unlock protocol.
716
    ///
717
    /// This is useful when combined with `mem::forget` to hold a lock without
718
    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
719
    /// dealing with FFI.
720
    ///
721
    /// # Safety
722
    ///
723
    /// This method must only be called if the current thread logically owns a
724
    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
725
    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
726
    #[inline]
727
    #[track_caller]
728
0
    pub unsafe fn force_unlock_write_fair(&self) {
729
0
        self.raw.unlock_exclusive_fair();
730
0
    }
731
}
732
733
impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
734
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
735
    /// is reached.
736
    ///
737
    /// If the access could not be granted before the timeout expires, then
738
    /// `None` is returned. Otherwise, an RAII guard is returned which will
739
    /// release the shared access when it is dropped.
740
    #[inline]
741
    #[track_caller]
742
0
    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
743
0
        if self.raw.try_lock_shared_for(timeout) {
744
            // SAFETY: The lock is held, as required.
745
0
            Some(unsafe { self.make_read_guard_unchecked() })
746
        } else {
747
0
            None
748
        }
749
0
    }
750
751
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
752
    /// is reached.
753
    ///
754
    /// If the access could not be granted before the timeout expires, then
755
    /// `None` is returned. Otherwise, an RAII guard is returned which will
756
    /// release the shared access when it is dropped.
757
    #[inline]
758
    #[track_caller]
759
0
    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
760
0
        if self.raw.try_lock_shared_until(timeout) {
761
            // SAFETY: The lock is held, as required.
762
0
            Some(unsafe { self.make_read_guard_unchecked() })
763
        } else {
764
0
            None
765
        }
766
0
    }
767
768
    /// Attempts to acquire this `RwLock` with exclusive write access until a
769
    /// timeout is reached.
770
    ///
771
    /// If the access could not be granted before the timeout expires, then
772
    /// `None` is returned. Otherwise, an RAII guard is returned which will
773
    /// release the exclusive access when it is dropped.
774
    #[inline]
775
    #[track_caller]
776
0
    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
777
0
        if self.raw.try_lock_exclusive_for(timeout) {
778
            // SAFETY: The lock is held, as required.
779
0
            Some(unsafe { self.make_write_guard_unchecked() })
780
        } else {
781
0
            None
782
        }
783
0
    }
784
785
    /// Attempts to acquire this `RwLock` with exclusive write access until a
786
    /// timeout is reached.
787
    ///
788
    /// If the access could not be granted before the timeout expires, then
789
    /// `None` is returned. Otherwise, an RAII guard is returned which will
790
    /// release the exclusive access when it is dropped.
791
    #[inline]
792
    #[track_caller]
793
0
    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
794
0
        if self.raw.try_lock_exclusive_until(timeout) {
795
            // SAFETY: The lock is held, as required.
796
0
            Some(unsafe { self.make_write_guard_unchecked() })
797
        } else {
798
0
            None
799
        }
800
0
    }
801
802
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
803
    ///
804
    /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an
805
    /// `Arc` and the resulting read guard has no lifetime requirements.
806
    #[cfg(feature = "arc_lock")]
807
    #[inline]
808
    #[track_caller]
809
    pub fn try_read_arc_for(
810
        self: &Arc<Self>,
811
        timeout: R::Duration,
812
    ) -> Option<ArcRwLockReadGuard<R, T>> {
813
        if self.raw.try_lock_shared_for(timeout) {
814
            // SAFETY: locking guarantee is upheld
815
            Some(unsafe { self.make_arc_read_guard_unchecked() })
816
        } else {
817
            None
818
        }
819
    }
820
821
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
822
    ///
823
    /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of
824
    /// an `Arc` and the resulting read guard has no lifetime requirements.
825
    #[cfg(feature = "arc_lock")]
826
    #[inline]
827
    #[track_caller]
828
    pub fn try_read_arc_until(
829
        self: &Arc<Self>,
830
        timeout: R::Instant,
831
    ) -> Option<ArcRwLockReadGuard<R, T>> {
832
        if self.raw.try_lock_shared_until(timeout) {
833
            // SAFETY: locking guarantee is upheld
834
            Some(unsafe { self.make_arc_read_guard_unchecked() })
835
        } else {
836
            None
837
        }
838
    }
839
840
    /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`.
841
    ///
842
    /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of
843
    /// an `Arc` and the resulting write guard has no lifetime requirements.
844
    #[cfg(feature = "arc_lock")]
845
    #[inline]
846
    #[track_caller]
847
    pub fn try_write_arc_for(
848
        self: &Arc<Self>,
849
        timeout: R::Duration,
850
    ) -> Option<ArcRwLockWriteGuard<R, T>> {
851
        if self.raw.try_lock_exclusive_for(timeout) {
852
            // SAFETY: locking guarantee is upheld
853
            Some(unsafe { self.make_arc_write_guard_unchecked() })
854
        } else {
855
            None
856
        }
857
    }
858
859
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
860
    ///
861
    /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of
862
    /// an `Arc` and the resulting read guard has no lifetime requirements.
863
    #[cfg(feature = "arc_lock")]
864
    #[inline]
865
    #[track_caller]
866
    pub fn try_write_arc_until(
867
        self: &Arc<Self>,
868
        timeout: R::Instant,
869
    ) -> Option<ArcRwLockWriteGuard<R, T>> {
870
        if self.raw.try_lock_exclusive_until(timeout) {
871
            // SAFETY: locking guarantee is upheld
872
            Some(unsafe { self.make_arc_write_guard_unchecked() })
873
        } else {
874
            None
875
        }
876
    }
877
}
878
879
impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
880
    /// Locks this `RwLock` with shared read access, blocking the current thread
881
    /// until it can be acquired.
882
    ///
883
    /// The calling thread will be blocked until there are no more writers which
884
    /// hold the lock. There may be other readers currently inside the lock when
885
    /// this method returns.
886
    ///
887
    /// Unlike `read`, this method is guaranteed to succeed without blocking if
888
    /// another read lock is held at the time of the call. This allows a thread
889
    /// to recursively lock a `RwLock`. However using this method can cause
890
    /// writers to starve since readers no longer block if a writer is waiting
891
    /// for the lock.
892
    ///
893
    /// Returns an RAII guard which will release this thread's shared access
894
    /// once it is dropped.
895
    #[inline]
896
    #[track_caller]
897
0
    pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
898
0
        self.raw.lock_shared_recursive();
899
        // SAFETY: The lock is held, as required.
900
0
        unsafe { self.make_read_guard_unchecked() }
901
0
    }
902
903
    /// Attempts to acquire this `RwLock` with shared read access.
904
    ///
905
    /// If the access could not be granted at this time, then `None` is returned.
906
    /// Otherwise, an RAII guard is returned which will release the shared access
907
    /// when it is dropped.
908
    ///
909
    /// This method is guaranteed to succeed if another read lock is held at the
910
    /// time of the call. See the documentation for `read_recursive` for details.
911
    ///
912
    /// This function does not block.
913
    #[inline]
914
    #[track_caller]
915
0
    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
916
0
        if self.raw.try_lock_shared_recursive() {
917
            // SAFETY: The lock is held, as required.
918
0
            Some(unsafe { self.make_read_guard_unchecked() })
919
        } else {
920
0
            None
921
        }
922
0
    }
923
924
    /// Locks this `RwLock` with shared read access, through an `Arc`.
925
    ///
926
    /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of
927
    /// an `Arc` and the resulting read guard has no lifetime requirements.
928
    #[cfg(feature = "arc_lock")]
929
    #[inline]
930
    #[track_caller]
931
    pub fn read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
932
        self.raw.lock_shared_recursive();
933
        // SAFETY: locking guarantee is upheld
934
        unsafe { self.make_arc_read_guard_unchecked() }
935
    }
936
937
    /// Attempts to lock this `RwLock` with shared read access, through an `Arc`.
938
    ///
939
    /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside
940
    /// of an `Arc` and the resulting read guard has no lifetime requirements.
941
    #[cfg(feature = "arc_lock")]
942
    #[inline]
943
    #[track_caller]
944
    pub fn try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> {
945
        if self.raw.try_lock_shared_recursive() {
946
            // SAFETY: locking guarantee is upheld
947
            Some(unsafe { self.make_arc_read_guard_unchecked() })
948
        } else {
949
            None
950
        }
951
    }
952
}
953
954
impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
955
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
956
    /// is reached.
957
    ///
958
    /// If the access could not be granted before the timeout expires, then
959
    /// `None` is returned. Otherwise, an RAII guard is returned which will
960
    /// release the shared access when it is dropped.
961
    ///
962
    /// This method is guaranteed to succeed without blocking if another read
963
    /// lock is held at the time of the call. See the documentation for
964
    /// `read_recursive` for details.
965
    #[inline]
966
    #[track_caller]
967
0
    pub fn try_read_recursive_for(
968
0
        &self,
969
0
        timeout: R::Duration,
970
0
    ) -> Option<RwLockReadGuard<'_, R, T>> {
971
0
        if self.raw.try_lock_shared_recursive_for(timeout) {
972
            // SAFETY: The lock is held, as required.
973
0
            Some(unsafe { self.make_read_guard_unchecked() })
974
        } else {
975
0
            None
976
        }
977
0
    }
978
979
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
980
    /// is reached.
981
    ///
982
    /// If the access could not be granted before the timeout expires, then
983
    /// `None` is returned. Otherwise, an RAII guard is returned which will
984
    /// release the shared access when it is dropped.
985
    #[inline]
986
    #[track_caller]
987
0
    pub fn try_read_recursive_until(
988
0
        &self,
989
0
        timeout: R::Instant,
990
0
    ) -> Option<RwLockReadGuard<'_, R, T>> {
991
0
        if self.raw.try_lock_shared_recursive_until(timeout) {
992
            // SAFETY: The lock is held, as required.
993
0
            Some(unsafe { self.make_read_guard_unchecked() })
994
        } else {
995
0
            None
996
        }
997
0
    }
998
999
    /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`.
1000
    ///
1001
    /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be
1002
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1003
    #[cfg(feature = "arc_lock")]
1004
    #[inline]
1005
    #[track_caller]
1006
    pub fn try_read_arc_recursive_for(
1007
        self: &Arc<Self>,
1008
        timeout: R::Duration,
1009
    ) -> Option<ArcRwLockReadGuard<R, T>> {
1010
        if self.raw.try_lock_shared_recursive_for(timeout) {
1011
            // SAFETY: locking guarantee is upheld
1012
            Some(unsafe { self.make_arc_read_guard_unchecked() })
1013
        } else {
1014
            None
1015
        }
1016
    }
1017
1018
    /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`.
1019
    ///
1020
    /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be
1021
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1022
    #[cfg(feature = "arc_lock")]
1023
    #[inline]
1024
    #[track_caller]
1025
    pub fn try_read_arc_recursive_until(
1026
        self: &Arc<Self>,
1027
        timeout: R::Instant,
1028
    ) -> Option<ArcRwLockReadGuard<R, T>> {
1029
        if self.raw.try_lock_shared_recursive_until(timeout) {
1030
            // SAFETY: locking guarantee is upheld
1031
            Some(unsafe { self.make_arc_read_guard_unchecked() })
1032
        } else {
1033
            None
1034
        }
1035
    }
1036
}
1037
1038
impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
1039
    /// Creates a new `RwLockUpgradableReadGuard` without checking if the lock is held.
1040
    ///
1041
    /// # Safety
1042
    ///
1043
    /// This method must only be called if the thread logically holds an upgradable read lock.
1044
    ///
1045
    /// This function does not increment the read count of the lock. Calling this function when a
1046
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
1047
    /// with `mem::forget`.
1048
    #[inline]
1049
0
    pub unsafe fn make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
1050
0
        RwLockUpgradableReadGuard {
1051
0
            rwlock: self,
1052
0
            marker: PhantomData,
1053
0
        }
1054
0
    }
1055
1056
    /// Locks this `RwLock` with upgradable read access, blocking the current thread
1057
    /// until it can be acquired.
1058
    ///
1059
    /// The calling thread will be blocked until there are no more writers or other
1060
    /// upgradable reads which hold the lock. There may be other readers currently
1061
    /// inside the lock when this method returns.
1062
    ///
1063
    /// Returns an RAII guard which will release this thread's shared access
1064
    /// once it is dropped.
1065
    #[inline]
1066
    #[track_caller]
1067
0
    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
1068
0
        self.raw.lock_upgradable();
1069
        // SAFETY: The lock is held, as required.
1070
0
        unsafe { self.make_upgradable_guard_unchecked() }
1071
0
    }
1072
1073
    /// Attempts to acquire this `RwLock` with upgradable read access.
1074
    ///
1075
    /// If the access could not be granted at this time, then `None` is returned.
1076
    /// Otherwise, an RAII guard is returned which will release the shared access
1077
    /// when it is dropped.
1078
    ///
1079
    /// This function does not block.
1080
    #[inline]
1081
    #[track_caller]
1082
0
    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1083
0
        if self.raw.try_lock_upgradable() {
1084
            // SAFETY: The lock is held, as required.
1085
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1086
        } else {
1087
0
            None
1088
        }
1089
0
    }
1090
1091
    /// Creates a new `ArcRwLockUpgradableReadGuard` without checking if the lock is held.
1092
    ///
1093
    /// # Safety
1094
    ///
1095
    /// This method must only be called if the thread logically holds an upgradable read lock.
1096
    ///
1097
    /// This function does not increment the read count of the lock. Calling this function when a
1098
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
1099
    /// with `mem::forget`.`
1100
    #[cfg(feature = "arc_lock")]
1101
    #[inline]
1102
    pub unsafe fn make_upgradable_arc_guard_unchecked(
1103
        self: &Arc<Self>,
1104
    ) -> ArcRwLockUpgradableReadGuard<R, T> {
1105
        ArcRwLockUpgradableReadGuard {
1106
            rwlock: self.clone(),
1107
            marker: PhantomData,
1108
        }
1109
    }
1110
1111
    /// Locks this `RwLock` with upgradable read access, through an `Arc`.
1112
    ///
1113
    /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be
1114
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1115
    #[cfg(feature = "arc_lock")]
1116
    #[inline]
1117
    #[track_caller]
1118
    pub fn upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> {
1119
        self.raw.lock_upgradable();
1120
        // SAFETY: locking guarantee is upheld
1121
        unsafe { self.make_upgradable_arc_guard_unchecked() }
1122
    }
1123
1124
    /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`.
1125
    ///
1126
    /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be
1127
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1128
    #[cfg(feature = "arc_lock")]
1129
    #[inline]
1130
    #[track_caller]
1131
    pub fn try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1132
        if self.raw.try_lock_upgradable() {
1133
            // SAFETY: locking guarantee is upheld
1134
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1135
        } else {
1136
            None
1137
        }
1138
    }
1139
}
1140
1141
impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
1142
    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
1143
    /// is reached.
1144
    ///
1145
    /// If the access could not be granted before the timeout expires, then
1146
    /// `None` is returned. Otherwise, an RAII guard is returned which will
1147
    /// release the shared access when it is dropped.
1148
    #[inline]
1149
    #[track_caller]
1150
0
    pub fn try_upgradable_read_for(
1151
0
        &self,
1152
0
        timeout: R::Duration,
1153
0
    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1154
0
        if self.raw.try_lock_upgradable_for(timeout) {
1155
            // SAFETY: The lock is held, as required.
1156
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1157
        } else {
1158
0
            None
1159
        }
1160
0
    }
1161
1162
    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
1163
    /// is reached.
1164
    ///
1165
    /// If the access could not be granted before the timeout expires, then
1166
    /// `None` is returned. Otherwise, an RAII guard is returned which will
1167
    /// release the shared access when it is dropped.
1168
    #[inline]
1169
    #[track_caller]
1170
0
    pub fn try_upgradable_read_until(
1171
0
        &self,
1172
0
        timeout: R::Instant,
1173
0
    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1174
0
        if self.raw.try_lock_upgradable_until(timeout) {
1175
            // SAFETY: The lock is held, as required.
1176
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1177
        } else {
1178
0
            None
1179
        }
1180
0
    }
1181
1182
    /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`.
1183
    ///
1184
    /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be
1185
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1186
    #[cfg(feature = "arc_lock")]
1187
    #[inline]
1188
    #[track_caller]
1189
    pub fn try_upgradable_read_arc_for(
1190
        self: &Arc<Self>,
1191
        timeout: R::Duration,
1192
    ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1193
        if self.raw.try_lock_upgradable_for(timeout) {
1194
            // SAFETY: locking guarantee is upheld
1195
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1196
        } else {
1197
            None
1198
        }
1199
    }
1200
1201
    /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`.
1202
    ///
1203
    /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be
1204
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1205
    #[cfg(feature = "arc_lock")]
1206
    #[inline]
1207
    #[track_caller]
1208
    pub fn try_upgradable_read_arc_until(
1209
        self: &Arc<Self>,
1210
        timeout: R::Instant,
1211
    ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1212
        if self.raw.try_lock_upgradable_until(timeout) {
1213
            // SAFETY: locking guarantee is upheld
1214
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1215
        } else {
1216
            None
1217
        }
1218
    }
1219
}
1220
1221
impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
1222
    #[inline]
1223
0
    fn default() -> RwLock<R, T> {
1224
0
        RwLock::new(Default::default())
1225
0
    }
1226
}
1227
1228
impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
1229
    #[inline]
1230
0
    fn from(t: T) -> RwLock<R, T> {
1231
0
        RwLock::new(t)
1232
0
    }
1233
}
1234
1235
impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
1236
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1237
0
        let mut d = f.debug_struct("RwLock");
1238
0
        match self.try_read() {
1239
0
            Some(guard) => d.field("data", &&*guard),
1240
            None => {
1241
                // Additional format_args! here is to remove quotes around <locked> in debug output.
1242
0
                d.field("data", &format_args!("<locked>"))
1243
            }
1244
        };
1245
0
        d.finish()
1246
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _> as core::fmt::Debug>::fmt
1247
}
1248
1249
/// RAII structure used to release the shared read access of a lock when
1250
/// dropped.
1251
#[clippy::has_significant_drop]
1252
#[must_use = "if unused the RwLock will immediately unlock"]
1253
pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
1254
    rwlock: &'a RwLock<R, T>,
1255
    marker: PhantomData<(&'a T, R::GuardMarker)>,
1256
}
1257
1258
unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockReadGuard<'_, R, T> {}
1259
1260
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
1261
    /// Returns a reference to the original reader-writer lock object.
1262
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1263
0
        s.rwlock
1264
0
    }
1265
1266
    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1267
    ///
1268
    /// This operation cannot fail as the `RwLockReadGuard` passed
1269
    /// in already locked the data.
1270
    ///
1271
    /// This is an associated function that needs to be
1272
    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
1273
    /// the same name on the contents of the locked data.
1274
    #[inline]
1275
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1276
0
    where
1277
0
        F: FnOnce(&T) -> &U,
1278
    {
1279
0
        let raw = &s.rwlock.raw;
1280
0
        let data = f(unsafe { &*s.rwlock.data.get() });
1281
0
        mem::forget(s);
1282
0
        MappedRwLockReadGuard {
1283
0
            raw,
1284
0
            data,
1285
0
            marker: PhantomData,
1286
0
        }
1287
0
    }
1288
1289
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1290
    /// locked data. Returns the original guard if the closure returns `None`.
1291
    ///
1292
    /// This operation cannot fail as the `RwLockReadGuard` passed
1293
    /// in already locked the data.
1294
    ///
1295
    /// This is an associated function that needs to be
1296
    /// used as `RwLockReadGuard::try_map(...)`. A method would interfere with methods of
1297
    /// the same name on the contents of the locked data.
1298
    #[inline]
1299
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1300
0
    where
1301
0
        F: FnOnce(&T) -> Option<&U>,
1302
    {
1303
0
        let raw = &s.rwlock.raw;
1304
0
        let data = match f(unsafe { &*s.rwlock.data.get() }) {
1305
0
            Some(data) => data,
1306
0
            None => return Err(s),
1307
        };
1308
0
        mem::forget(s);
1309
0
        Ok(MappedRwLockReadGuard {
1310
0
            raw,
1311
0
            data,
1312
0
            marker: PhantomData,
1313
0
        })
1314
0
    }
1315
1316
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1317
    /// locked data. The original guard is returned alongside arbitrary user data
1318
    /// if the closure returns `Err`.
1319
    ///
1320
    /// This operation cannot fail as the `RwLockReadGuard` passed
1321
    /// in already locked the data.
1322
    ///
1323
    /// This is an associated function that needs to be
1324
    /// used as `RwLockReadGuard::try_map_or_err(...)`. A method would interfere with methods of
1325
    /// the same name on the contents of the locked data.
1326
    #[inline]
1327
0
    pub fn try_map_or_err<U: ?Sized, F, E>(
1328
0
        s: Self,
1329
0
        f: F,
1330
0
    ) -> Result<MappedRwLockReadGuard<'a, R, U>, (Self, E)>
1331
0
    where
1332
0
        F: FnOnce(&T) -> Result<&U, E>,
1333
    {
1334
0
        let raw = &s.rwlock.raw;
1335
0
        let data = match f(unsafe { &*s.rwlock.data.get() }) {
1336
0
            Ok(data) => data,
1337
0
            Err(e) => return Err((s, e)),
1338
        };
1339
0
        mem::forget(s);
1340
0
        Ok(MappedRwLockReadGuard {
1341
0
            raw,
1342
0
            data,
1343
0
            marker: PhantomData,
1344
0
        })
1345
0
    }
1346
1347
    /// Temporarily unlocks the `RwLock` to execute the given function.
1348
    ///
1349
    /// This is safe because `&mut` guarantees that there exist no other
1350
    /// references to the data protected by the `RwLock`.
1351
    #[inline]
1352
    #[track_caller]
1353
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1354
0
    where
1355
0
        F: FnOnce() -> U,
1356
    {
1357
        // Safety: An RwLockReadGuard always holds a shared lock.
1358
0
        unsafe {
1359
0
            s.rwlock.raw.unlock_shared();
1360
0
        }
1361
0
        defer!(s.rwlock.raw.lock_shared());
1362
0
        f()
1363
0
    }
1364
}
1365
1366
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
1367
    /// Unlocks the `RwLock` using a fair unlock protocol.
1368
    ///
1369
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1370
    /// the `RwLock` before another has the chance to acquire the lock, even if
1371
    /// that thread has been blocked on the `RwLock` for a long time. This is
1372
    /// the default because it allows much higher throughput as it avoids
1373
    /// forcing a context switch on every `RwLock` unlock. This can result in one
1374
    /// thread acquiring a `RwLock` many more times than other threads.
1375
    ///
1376
    /// However in some cases it can be beneficial to ensure fairness by forcing
1377
    /// the lock to pass on to a waiting thread if there is one. This is done by
1378
    /// using this method instead of dropping the `RwLockReadGuard` normally.
1379
    #[inline]
1380
    #[track_caller]
1381
0
    pub fn unlock_fair(s: Self) {
1382
        // Safety: An RwLockReadGuard always holds a shared lock.
1383
0
        unsafe {
1384
0
            s.rwlock.raw.unlock_shared_fair();
1385
0
        }
1386
0
        mem::forget(s);
1387
0
    }
1388
1389
    /// Temporarily unlocks the `RwLock` to execute the given function.
1390
    ///
1391
    /// The `RwLock` is unlocked a fair unlock protocol.
1392
    ///
1393
    /// This is safe because `&mut` guarantees that there exist no other
1394
    /// references to the data protected by the `RwLock`.
1395
    #[inline]
1396
    #[track_caller]
1397
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1398
0
    where
1399
0
        F: FnOnce() -> U,
1400
    {
1401
        // Safety: An RwLockReadGuard always holds a shared lock.
1402
0
        unsafe {
1403
0
            s.rwlock.raw.unlock_shared_fair();
1404
0
        }
1405
0
        defer!(s.rwlock.raw.lock_shared());
1406
0
        f()
1407
0
    }
1408
1409
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1410
    ///
1411
    /// This method is functionally equivalent to calling `unlock_fair` followed
1412
    /// by `read`, however it can be much more efficient in the case where there
1413
    /// are no waiting threads.
1414
    #[inline]
1415
    #[track_caller]
1416
0
    pub fn bump(s: &mut Self) {
1417
        // Safety: An RwLockReadGuard always holds a shared lock.
1418
0
        unsafe {
1419
0
            s.rwlock.raw.bump_shared();
1420
0
        }
1421
0
    }
1422
}
1423
1424
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
1425
    type Target = T;
1426
    #[inline]
1427
0
    fn deref(&self) -> &T {
1428
0
        unsafe { &*self.rwlock.data.get() }
1429
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _> as core::ops::deref::Deref>::deref
1430
}
1431
1432
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
1433
    #[inline]
1434
0
    fn drop(&mut self) {
1435
        // Safety: An RwLockReadGuard always holds a shared lock.
1436
0
        unsafe {
1437
0
            self.rwlock.raw.unlock_shared();
1438
0
        }
1439
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _> as core::ops::drop::Drop>::drop
1440
}
1441
1442
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
1443
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1444
0
        fmt::Debug::fmt(&**self, f)
1445
0
    }
1446
}
1447
1448
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1449
    for RwLockReadGuard<'a, R, T>
1450
{
1451
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1452
0
        (**self).fmt(f)
1453
0
    }
1454
}
1455
1456
#[cfg(feature = "owning_ref")]
1457
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
1458
1459
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
1460
///
1461
/// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock`
1462
/// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime.
1463
#[cfg(feature = "arc_lock")]
1464
#[clippy::has_significant_drop]
1465
#[must_use = "if unused the RwLock will immediately unlock"]
1466
pub struct ArcRwLockReadGuard<R: RawRwLock, T: ?Sized> {
1467
    rwlock: Arc<RwLock<R, T>>,
1468
    marker: PhantomData<R::GuardMarker>,
1469
}
1470
1471
#[cfg(feature = "arc_lock")]
1472
impl<R: RawRwLock, T: ?Sized> ArcRwLockReadGuard<R, T> {
1473
    /// Returns a reference to the rwlock, contained in its `Arc`.
1474
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
1475
        &s.rwlock
1476
    }
1477
1478
    /// Unlocks the `RwLock` and returns the `Arc` that was held by the [`ArcRwLockReadGuard`].
1479
    #[inline]
1480
    pub fn into_arc(s: Self) -> Arc<RwLock<R, T>> {
1481
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
1482
        let s = ManuallyDrop::new(s);
1483
        unsafe {
1484
            s.rwlock.raw.unlock_shared();
1485
            ptr::read(&s.rwlock)
1486
        }
1487
    }
1488
1489
    /// Temporarily unlocks the `RwLock` to execute the given function.
1490
    ///
1491
    /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`].
1492
    #[inline]
1493
    #[track_caller]
1494
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1495
    where
1496
        F: FnOnce() -> U,
1497
    {
1498
        // Safety: An RwLockReadGuard always holds a shared lock.
1499
        unsafe {
1500
            s.rwlock.raw.unlock_shared();
1501
        }
1502
        defer!(s.rwlock.raw.lock_shared());
1503
        f()
1504
    }
1505
}
1506
1507
#[cfg(feature = "arc_lock")]
1508
impl<R: RawRwLockFair, T: ?Sized> ArcRwLockReadGuard<R, T> {
1509
    /// Unlocks the `RwLock` using a fair unlock protocol.
1510
    ///
1511
    /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`].
1512
    #[inline]
1513
    #[track_caller]
1514
    pub fn unlock_fair(s: Self) {
1515
        drop(Self::into_arc_fair(s));
1516
    }
1517
1518
    /// Unlocks the `RwLock` using a fair unlock protocol and returns the `Arc` that was held by the [`ArcRwLockReadGuard`].
1519
    #[inline]
1520
    pub fn into_arc_fair(s: Self) -> Arc<RwLock<R, T>> {
1521
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
1522
        let s = ManuallyDrop::new(s);
1523
        unsafe {
1524
            s.rwlock.raw.unlock_shared_fair();
1525
            ptr::read(&s.rwlock)
1526
        }
1527
    }
1528
1529
    /// Temporarily unlocks the `RwLock` to execute the given function.
1530
    ///
1531
    /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`].
1532
    #[inline]
1533
    #[track_caller]
1534
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1535
    where
1536
        F: FnOnce() -> U,
1537
    {
1538
        // Safety: An RwLockReadGuard always holds a shared lock.
1539
        unsafe {
1540
            s.rwlock.raw.unlock_shared_fair();
1541
        }
1542
        defer!(s.rwlock.raw.lock_shared());
1543
        f()
1544
    }
1545
1546
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1547
    ///
1548
    /// This is functionally identical to the `bump` method on [`RwLockReadGuard`].
1549
    #[inline]
1550
    #[track_caller]
1551
    pub fn bump(s: &mut Self) {
1552
        // Safety: An RwLockReadGuard always holds a shared lock.
1553
        unsafe {
1554
            s.rwlock.raw.bump_shared();
1555
        }
1556
    }
1557
}
1558
1559
#[cfg(feature = "arc_lock")]
1560
impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockReadGuard<R, T> {
1561
    type Target = T;
1562
    #[inline]
1563
    fn deref(&self) -> &T {
1564
        unsafe { &*self.rwlock.data.get() }
1565
    }
1566
}
1567
1568
#[cfg(feature = "arc_lock")]
1569
impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockReadGuard<R, T> {
1570
    #[inline]
1571
    fn drop(&mut self) {
1572
        // Safety: An RwLockReadGuard always holds a shared lock.
1573
        unsafe {
1574
            self.rwlock.raw.unlock_shared();
1575
        }
1576
    }
1577
}
1578
1579
#[cfg(feature = "arc_lock")]
1580
impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockReadGuard<R, T> {
1581
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1582
        fmt::Debug::fmt(&**self, f)
1583
    }
1584
}
1585
1586
#[cfg(feature = "arc_lock")]
1587
impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockReadGuard<R, T> {
1588
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1589
        (**self).fmt(f)
1590
    }
1591
}
1592
1593
/// RAII structure used to release the exclusive write access of a lock when
1594
/// dropped.
1595
#[clippy::has_significant_drop]
1596
#[must_use = "if unused the RwLock will immediately unlock"]
1597
pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1598
    rwlock: &'a RwLock<R, T>,
1599
    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
1600
}
1601
1602
unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockWriteGuard<'_, R, T> {}
1603
1604
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1605
    /// Returns a reference to the original reader-writer lock object.
1606
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1607
0
        s.rwlock
1608
0
    }
1609
1610
    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1611
    ///
1612
    /// This operation cannot fail as the `RwLockWriteGuard` passed
1613
    /// in already locked the data.
1614
    ///
1615
    /// This is an associated function that needs to be
1616
    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
1617
    /// the same name on the contents of the locked data.
1618
    #[inline]
1619
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1620
0
    where
1621
0
        F: FnOnce(&mut T) -> &mut U,
1622
    {
1623
0
        let raw = &s.rwlock.raw;
1624
0
        let data = f(unsafe { &mut *s.rwlock.data.get() });
1625
0
        mem::forget(s);
1626
0
        MappedRwLockWriteGuard {
1627
0
            raw,
1628
0
            data,
1629
0
            marker: PhantomData,
1630
0
        }
1631
0
    }
1632
1633
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1634
    /// locked data. The original guard is return if the closure returns `None`.
1635
    ///
1636
    /// This operation cannot fail as the `RwLockWriteGuard` passed
1637
    /// in already locked the data.
1638
    ///
1639
    /// This is an associated function that needs to be
1640
    /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with methods of
1641
    /// the same name on the contents of the locked data.
1642
    #[inline]
1643
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1644
0
    where
1645
0
        F: FnOnce(&mut T) -> Option<&mut U>,
1646
    {
1647
0
        let raw = &s.rwlock.raw;
1648
0
        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
1649
0
            Some(data) => data,
1650
0
            None => return Err(s),
1651
        };
1652
0
        mem::forget(s);
1653
0
        Ok(MappedRwLockWriteGuard {
1654
0
            raw,
1655
0
            data,
1656
0
            marker: PhantomData,
1657
0
        })
1658
0
    }
1659
1660
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1661
    /// locked data. The original guard is returned alongside arbitrary user data
1662
    /// if the closure returns `Err`.
1663
    ///
1664
    /// This operation cannot fail as the `RwLockWriteGuard` passed
1665
    /// in already locked the data.
1666
    ///
1667
    /// This is an associated function that needs to be
1668
    /// used as `RwLockWriteGuard::try_map_or_err(...)`. A method would interfere with methods of
1669
    /// the same name on the contents of the locked data.
1670
    #[inline]
1671
0
    pub fn try_map_or_err<U: ?Sized, F, E>(
1672
0
        s: Self,
1673
0
        f: F,
1674
0
    ) -> Result<MappedRwLockWriteGuard<'a, R, U>, (Self, E)>
1675
0
    where
1676
0
        F: FnOnce(&mut T) -> Result<&mut U, E>,
1677
    {
1678
0
        let raw = &s.rwlock.raw;
1679
0
        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
1680
0
            Ok(data) => data,
1681
0
            Err(e) => return Err((s, e)),
1682
        };
1683
0
        mem::forget(s);
1684
0
        Ok(MappedRwLockWriteGuard {
1685
0
            raw,
1686
0
            data,
1687
0
            marker: PhantomData,
1688
0
        })
1689
0
    }
1690
1691
    /// Temporarily unlocks the `RwLock` to execute the given function.
1692
    ///
1693
    /// This is safe because `&mut` guarantees that there exist no other
1694
    /// references to the data protected by the `RwLock`.
1695
    #[inline]
1696
    #[track_caller]
1697
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1698
0
    where
1699
0
        F: FnOnce() -> U,
1700
    {
1701
        // Safety: An RwLockReadGuard always holds a shared lock.
1702
0
        unsafe {
1703
0
            s.rwlock.raw.unlock_exclusive();
1704
0
        }
1705
0
        defer!(s.rwlock.raw.lock_exclusive());
1706
0
        f()
1707
0
    }
1708
}
1709
1710
impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1711
    /// Atomically downgrades a write lock into a read lock without allowing any
1712
    /// writers to take exclusive access of the lock in the meantime.
1713
    ///
1714
    /// Note that if there are any writers currently waiting to take the lock
1715
    /// then other readers may not be able to acquire the lock even if it was
1716
    /// downgraded.
1717
    #[track_caller]
1718
0
    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1719
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1720
0
        unsafe {
1721
0
            s.rwlock.raw.downgrade();
1722
0
        }
1723
0
        let rwlock = s.rwlock;
1724
0
        mem::forget(s);
1725
0
        RwLockReadGuard {
1726
0
            rwlock,
1727
0
            marker: PhantomData,
1728
0
        }
1729
0
    }
1730
}
1731
1732
impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1733
    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1734
    /// writers to take exclusive access of the lock in the meantime.
1735
    ///
1736
    /// Note that if there are any writers currently waiting to take the lock
1737
    /// then other readers may not be able to acquire the lock even if it was
1738
    /// downgraded.
1739
    #[track_caller]
1740
0
    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
1741
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1742
0
        unsafe {
1743
0
            s.rwlock.raw.downgrade_to_upgradable();
1744
0
        }
1745
0
        let rwlock = s.rwlock;
1746
0
        mem::forget(s);
1747
0
        RwLockUpgradableReadGuard {
1748
0
            rwlock,
1749
0
            marker: PhantomData,
1750
0
        }
1751
0
    }
1752
}
1753
1754
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1755
    /// Unlocks the `RwLock` using a fair unlock protocol.
1756
    ///
1757
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1758
    /// the `RwLock` before another has the chance to acquire the lock, even if
1759
    /// that thread has been blocked on the `RwLock` for a long time. This is
1760
    /// the default because it allows much higher throughput as it avoids
1761
    /// forcing a context switch on every `RwLock` unlock. This can result in one
1762
    /// thread acquiring a `RwLock` many more times than other threads.
1763
    ///
1764
    /// However in some cases it can be beneficial to ensure fairness by forcing
1765
    /// the lock to pass on to a waiting thread if there is one. This is done by
1766
    /// using this method instead of dropping the `RwLockWriteGuard` normally.
1767
    #[inline]
1768
    #[track_caller]
1769
0
    pub fn unlock_fair(s: Self) {
1770
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1771
0
        unsafe {
1772
0
            s.rwlock.raw.unlock_exclusive_fair();
1773
0
        }
1774
0
        mem::forget(s);
1775
0
    }
1776
1777
    /// Temporarily unlocks the `RwLock` to execute the given function.
1778
    ///
1779
    /// The `RwLock` is unlocked a fair unlock protocol.
1780
    ///
1781
    /// This is safe because `&mut` guarantees that there exist no other
1782
    /// references to the data protected by the `RwLock`.
1783
    #[inline]
1784
    #[track_caller]
1785
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1786
0
    where
1787
0
        F: FnOnce() -> U,
1788
    {
1789
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1790
0
        unsafe {
1791
0
            s.rwlock.raw.unlock_exclusive_fair();
1792
0
        }
1793
0
        defer!(s.rwlock.raw.lock_exclusive());
1794
0
        f()
1795
0
    }
1796
1797
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1798
    ///
1799
    /// This method is functionally equivalent to calling `unlock_fair` followed
1800
    /// by `write`, however it can be much more efficient in the case where there
1801
    /// are no waiting threads.
1802
    #[inline]
1803
    #[track_caller]
1804
0
    pub fn bump(s: &mut Self) {
1805
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1806
0
        unsafe {
1807
0
            s.rwlock.raw.bump_exclusive();
1808
0
        }
1809
0
    }
1810
}
1811
1812
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
1813
    type Target = T;
1814
    #[inline]
1815
0
    fn deref(&self) -> &T {
1816
0
        unsafe { &*self.rwlock.data.get() }
1817
0
    }
1818
}
1819
1820
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
1821
    #[inline]
1822
0
    fn deref_mut(&mut self) -> &mut T {
1823
0
        unsafe { &mut *self.rwlock.data.get() }
1824
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _> as core::ops::deref::DerefMut>::deref_mut
1825
}
1826
1827
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1828
    #[inline]
1829
0
    fn drop(&mut self) {
1830
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1831
0
        unsafe {
1832
0
            self.rwlock.raw.unlock_exclusive();
1833
0
        }
1834
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _> as core::ops::drop::Drop>::drop
1835
}
1836
1837
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
1838
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1839
0
        fmt::Debug::fmt(&**self, f)
1840
0
    }
1841
}
1842
1843
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1844
    for RwLockWriteGuard<'a, R, T>
1845
{
1846
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1847
0
        (**self).fmt(f)
1848
0
    }
1849
}
1850
1851
#[cfg(feature = "owning_ref")]
1852
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1853
1854
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
1855
/// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock`
1856
/// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime.
1857
#[cfg(feature = "arc_lock")]
1858
#[clippy::has_significant_drop]
1859
#[must_use = "if unused the RwLock will immediately unlock"]
1860
pub struct ArcRwLockWriteGuard<R: RawRwLock, T: ?Sized> {
1861
    rwlock: Arc<RwLock<R, T>>,
1862
    marker: PhantomData<R::GuardMarker>,
1863
}
1864
1865
#[cfg(feature = "arc_lock")]
1866
impl<R: RawRwLock, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1867
    /// Returns a reference to the rwlock, contained in its `Arc`.
1868
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
1869
        &s.rwlock
1870
    }
1871
1872
    /// Unlocks the `RwLock` and returns the `Arc` that was held by the [`ArcRwLockWriteGuard`].
1873
    #[inline]
1874
    pub fn into_arc(s: Self) -> Arc<RwLock<R, T>> {
1875
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
1876
        let s = ManuallyDrop::new(s);
1877
        unsafe {
1878
            s.rwlock.raw.unlock_exclusive();
1879
            ptr::read(&s.rwlock)
1880
        }
1881
    }
1882
1883
    /// Temporarily unlocks the `RwLock` to execute the given function.
1884
    ///
1885
    /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`].
1886
    #[inline]
1887
    #[track_caller]
1888
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1889
    where
1890
        F: FnOnce() -> U,
1891
    {
1892
        // Safety: An RwLockWriteGuard always holds a shared lock.
1893
        unsafe {
1894
            s.rwlock.raw.unlock_exclusive();
1895
        }
1896
        defer!(s.rwlock.raw.lock_exclusive());
1897
        f()
1898
    }
1899
}
1900
1901
#[cfg(feature = "arc_lock")]
1902
impl<R: RawRwLockDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1903
    /// Atomically downgrades a write lock into a read lock without allowing any
1904
    /// writers to take exclusive access of the lock in the meantime.
1905
    ///
1906
    /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`].
1907
    #[track_caller]
1908
    pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> {
1909
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1910
        unsafe {
1911
            s.rwlock.raw.downgrade();
1912
        }
1913
1914
        // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read
1915
        let s = ManuallyDrop::new(s);
1916
        let rwlock = unsafe { ptr::read(&s.rwlock) };
1917
1918
        ArcRwLockReadGuard {
1919
            rwlock,
1920
            marker: PhantomData,
1921
        }
1922
    }
1923
}
1924
1925
#[cfg(feature = "arc_lock")]
1926
impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1927
    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1928
    /// writers to take exclusive access of the lock in the meantime.
1929
    ///
1930
    /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`].
1931
    #[track_caller]
1932
    pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T> {
1933
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1934
        unsafe {
1935
            s.rwlock.raw.downgrade_to_upgradable();
1936
        }
1937
1938
        // SAFETY: same as above
1939
        let s = ManuallyDrop::new(s);
1940
        let rwlock = unsafe { ptr::read(&s.rwlock) };
1941
1942
        ArcRwLockUpgradableReadGuard {
1943
            rwlock,
1944
            marker: PhantomData,
1945
        }
1946
    }
1947
}
1948
1949
#[cfg(feature = "arc_lock")]
1950
impl<R: RawRwLockFair, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1951
    /// Unlocks the `RwLock` using a fair unlock protocol.
1952
    ///
1953
    /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`].
1954
    #[inline]
1955
    #[track_caller]
1956
    pub fn unlock_fair(s: Self) {
1957
        drop(Self::into_arc_fair(s));
1958
    }
1959
1960
    /// Unlocks the `RwLock` using a fair unlock protocol and returns the `Arc` that was held by the [`ArcRwLockWriteGuard`].
1961
    #[inline]
1962
    pub fn into_arc_fair(s: Self) -> Arc<RwLock<R, T>> {
1963
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
1964
        let s = ManuallyDrop::new(s);
1965
        unsafe {
1966
            s.rwlock.raw.unlock_exclusive_fair();
1967
            ptr::read(&s.rwlock)
1968
        }
1969
    }
1970
1971
    /// Temporarily unlocks the `RwLock` to execute the given function.
1972
    ///
1973
    /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`].
1974
    #[inline]
1975
    #[track_caller]
1976
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1977
    where
1978
        F: FnOnce() -> U,
1979
    {
1980
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1981
        unsafe {
1982
            s.rwlock.raw.unlock_exclusive_fair();
1983
        }
1984
        defer!(s.rwlock.raw.lock_exclusive());
1985
        f()
1986
    }
1987
1988
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1989
    ///
1990
    /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`].
1991
    #[inline]
1992
    #[track_caller]
1993
    pub fn bump(s: &mut Self) {
1994
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1995
        unsafe {
1996
            s.rwlock.raw.bump_exclusive();
1997
        }
1998
    }
1999
}
2000
2001
#[cfg(feature = "arc_lock")]
2002
impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockWriteGuard<R, T> {
2003
    type Target = T;
2004
    #[inline]
2005
    fn deref(&self) -> &T {
2006
        unsafe { &*self.rwlock.data.get() }
2007
    }
2008
}
2009
2010
#[cfg(feature = "arc_lock")]
2011
impl<R: RawRwLock, T: ?Sized> DerefMut for ArcRwLockWriteGuard<R, T> {
2012
    #[inline]
2013
    fn deref_mut(&mut self) -> &mut T {
2014
        unsafe { &mut *self.rwlock.data.get() }
2015
    }
2016
}
2017
2018
#[cfg(feature = "arc_lock")]
2019
impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockWriteGuard<R, T> {
2020
    #[inline]
2021
    fn drop(&mut self) {
2022
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
2023
        unsafe {
2024
            self.rwlock.raw.unlock_exclusive();
2025
        }
2026
    }
2027
}
2028
2029
#[cfg(feature = "arc_lock")]
2030
impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockWriteGuard<R, T> {
2031
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2032
        fmt::Debug::fmt(&**self, f)
2033
    }
2034
}
2035
2036
#[cfg(feature = "arc_lock")]
2037
impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockWriteGuard<R, T> {
2038
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2039
        (**self).fmt(f)
2040
    }
2041
}
2042
2043
/// RAII structure used to release the upgradable read access of a lock when
2044
/// dropped.
2045
#[clippy::has_significant_drop]
2046
#[must_use = "if unused the RwLock will immediately unlock"]
2047
pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
2048
    rwlock: &'a RwLock<R, T>,
2049
    marker: PhantomData<(&'a T, R::GuardMarker)>,
2050
}
2051
2052
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
2053
    for RwLockUpgradableReadGuard<'a, R, T>
2054
{
2055
}
2056
2057
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2058
    /// Returns a reference to the original reader-writer lock object.
2059
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
2060
0
        s.rwlock
2061
0
    }
2062
2063
    /// Temporarily unlocks the `RwLock` to execute the given function.
2064
    ///
2065
    /// This is safe because `&mut` guarantees that there exist no other
2066
    /// references to the data protected by the `RwLock`.
2067
    #[inline]
2068
    #[track_caller]
2069
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
2070
0
    where
2071
0
        F: FnOnce() -> U,
2072
    {
2073
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2074
0
        unsafe {
2075
0
            s.rwlock.raw.unlock_upgradable();
2076
0
        }
2077
0
        defer!(s.rwlock.raw.lock_upgradable());
2078
0
        f()
2079
0
    }
2080
2081
    /// Atomically upgrades an upgradable read lock lock into an exclusive write lock,
2082
    /// blocking the current thread until it can be acquired.
2083
    #[track_caller]
2084
0
    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
2085
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2086
0
        unsafe {
2087
0
            s.rwlock.raw.upgrade();
2088
0
        }
2089
0
        let rwlock = s.rwlock;
2090
0
        mem::forget(s);
2091
0
        RwLockWriteGuard {
2092
0
            rwlock,
2093
0
            marker: PhantomData,
2094
0
        }
2095
0
    }
2096
2097
    /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2098
    ///
2099
    /// If the access could not be granted at this time, then the current guard is returned.
2100
    #[track_caller]
2101
0
    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
2102
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2103
0
        if unsafe { s.rwlock.raw.try_upgrade() } {
2104
0
            let rwlock = s.rwlock;
2105
0
            mem::forget(s);
2106
0
            Ok(RwLockWriteGuard {
2107
0
                rwlock,
2108
0
                marker: PhantomData,
2109
0
            })
2110
        } else {
2111
0
            Err(s)
2112
        }
2113
0
    }
2114
}
2115
2116
impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2117
    /// Unlocks the `RwLock` using a fair unlock protocol.
2118
    ///
2119
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
2120
    /// the `RwLock` before another has the chance to acquire the lock, even if
2121
    /// that thread has been blocked on the `RwLock` for a long time. This is
2122
    /// the default because it allows much higher throughput as it avoids
2123
    /// forcing a context switch on every `RwLock` unlock. This can result in one
2124
    /// thread acquiring a `RwLock` many more times than other threads.
2125
    ///
2126
    /// However in some cases it can be beneficial to ensure fairness by forcing
2127
    /// the lock to pass on to a waiting thread if there is one. This is done by
2128
    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
2129
    #[inline]
2130
    #[track_caller]
2131
0
    pub fn unlock_fair(s: Self) {
2132
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2133
0
        unsafe {
2134
0
            s.rwlock.raw.unlock_upgradable_fair();
2135
0
        }
2136
0
        mem::forget(s);
2137
0
    }
2138
2139
    /// Temporarily unlocks the `RwLock` to execute the given function.
2140
    ///
2141
    /// The `RwLock` is unlocked a fair unlock protocol.
2142
    ///
2143
    /// This is safe because `&mut` guarantees that there exist no other
2144
    /// references to the data protected by the `RwLock`.
2145
    #[inline]
2146
    #[track_caller]
2147
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
2148
0
    where
2149
0
        F: FnOnce() -> U,
2150
    {
2151
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2152
0
        unsafe {
2153
0
            s.rwlock.raw.unlock_upgradable_fair();
2154
0
        }
2155
0
        defer!(s.rwlock.raw.lock_upgradable());
2156
0
        f()
2157
0
    }
2158
2159
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
2160
    ///
2161
    /// This method is functionally equivalent to calling `unlock_fair` followed
2162
    /// by `upgradable_read`, however it can be much more efficient in the case where there
2163
    /// are no waiting threads.
2164
    #[inline]
2165
    #[track_caller]
2166
0
    pub fn bump(s: &mut Self) {
2167
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2168
0
        unsafe {
2169
0
            s.rwlock.raw.bump_upgradable();
2170
0
        }
2171
0
    }
2172
}
2173
2174
impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2175
    /// Atomically downgrades an upgradable read lock lock into a shared read lock
2176
    /// without allowing any writers to take exclusive access of the lock in the
2177
    /// meantime.
2178
    ///
2179
    /// Note that if there are any writers currently waiting to take the lock
2180
    /// then other readers may not be able to acquire the lock even if it was
2181
    /// downgraded.
2182
    #[track_caller]
2183
0
    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
2184
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2185
0
        unsafe {
2186
0
            s.rwlock.raw.downgrade_upgradable();
2187
0
        }
2188
0
        let rwlock = s.rwlock;
2189
0
        mem::forget(s);
2190
0
        RwLockReadGuard {
2191
0
            rwlock,
2192
0
            marker: PhantomData,
2193
0
        }
2194
0
    }
2195
2196
    /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock,
2197
    /// blocking the current thread until it can be acquired.
2198
    ///
2199
    /// Then, calls the provided closure with an exclusive reference to the lock's data.
2200
    ///
2201
    /// Finally, atomically downgrades the lock back to an upgradable read lock.
2202
    /// The closure's return value is wrapped in `Some` and returned.
2203
    ///
2204
    /// This function only requires a mutable reference to the guard, unlike
2205
    /// `upgrade` which takes the guard by value.
2206
    #[track_caller]
2207
0
    pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret {
2208
0
        unsafe {
2209
0
            self.rwlock.raw.upgrade();
2210
0
        }
2211
2212
        // Safety: We just upgraded the lock, so we have mutable access to the data.
2213
        // This will restore the state the lock was in at the start of the function.
2214
0
        defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2215
2216
        // Safety: We upgraded the lock, so we have mutable access to the data.
2217
        // When this function returns, whether by drop or panic,
2218
        // the drop guard will downgrade it back to an upgradeable lock.
2219
0
        f(unsafe { &mut *self.rwlock.data.get() })
2220
0
    }
2221
2222
    /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2223
    ///
2224
    /// If the access could not be granted at this time, then `None` is returned.
2225
    ///
2226
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2227
    /// and finally downgrades the lock back to an upgradable read lock.
2228
    /// The closure's return value is wrapped in `Some` and returned.
2229
    ///
2230
    /// This function only requires a mutable reference to the guard, unlike
2231
    /// `try_upgrade` which takes the guard by value.
2232
    #[track_caller]
2233
0
    pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> {
2234
0
        if unsafe { self.rwlock.raw.try_upgrade() } {
2235
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2236
            // This will restore the state the lock was in at the start of the function.
2237
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2238
2239
            // Safety: We upgraded the lock, so we have mutable access to the data.
2240
            // When this function returns, whether by drop or panic,
2241
            // the drop guard will downgrade it back to an upgradeable lock.
2242
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2243
        } else {
2244
0
            None
2245
        }
2246
0
    }
2247
}
2248
2249
impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2250
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2251
    /// write lock, until a timeout is reached.
2252
    ///
2253
    /// If the access could not be granted before the timeout expires, then
2254
    /// the current guard is returned.
2255
    #[track_caller]
2256
0
    pub fn try_upgrade_for(
2257
0
        s: Self,
2258
0
        timeout: R::Duration,
2259
0
    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
2260
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2261
0
        if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
2262
0
            let rwlock = s.rwlock;
2263
0
            mem::forget(s);
2264
0
            Ok(RwLockWriteGuard {
2265
0
                rwlock,
2266
0
                marker: PhantomData,
2267
0
            })
2268
        } else {
2269
0
            Err(s)
2270
        }
2271
0
    }
2272
2273
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2274
    /// write lock, until a timeout is reached.
2275
    ///
2276
    /// If the access could not be granted before the timeout expires, then
2277
    /// the current guard is returned.
2278
    #[inline]
2279
    #[track_caller]
2280
0
    pub fn try_upgrade_until(
2281
0
        s: Self,
2282
0
        timeout: R::Instant,
2283
0
    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
2284
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2285
0
        if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
2286
0
            let rwlock = s.rwlock;
2287
0
            mem::forget(s);
2288
0
            Ok(RwLockWriteGuard {
2289
0
                rwlock,
2290
0
                marker: PhantomData,
2291
0
            })
2292
        } else {
2293
0
            Err(s)
2294
        }
2295
0
    }
2296
}
2297
2298
impl<'a, R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a>
2299
    RwLockUpgradableReadGuard<'a, R, T>
2300
{
2301
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2302
    /// write lock, until a timeout is reached.
2303
    ///
2304
    /// If the access could not be granted before the timeout expires, then
2305
    /// `None` is returned.
2306
    ///
2307
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2308
    /// and finally downgrades the lock back to an upgradable read lock.
2309
    /// The closure's return value is wrapped in `Some` and returned.
2310
    ///
2311
    /// This function only requires a mutable reference to the guard, unlike
2312
    /// `try_upgrade_for` which takes the guard by value.
2313
    #[track_caller]
2314
0
    pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>(
2315
0
        &mut self,
2316
0
        timeout: R::Duration,
2317
0
        f: F,
2318
0
    ) -> Option<Ret> {
2319
0
        if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } {
2320
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2321
            // This will restore the state the lock was in at the start of the function.
2322
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2323
2324
            // Safety: We upgraded the lock, so we have mutable access to the data.
2325
            // When this function returns, whether by drop or panic,
2326
            // the drop guard will downgrade it back to an upgradeable lock.
2327
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2328
        } else {
2329
0
            None
2330
        }
2331
0
    }
2332
2333
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2334
    /// write lock, until a timeout is reached.
2335
    ///
2336
    /// If the access could not be granted before the timeout expires, then
2337
    /// `None` is returned.
2338
    ///
2339
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2340
    /// and finally downgrades the lock back to an upgradable read lock.
2341
    /// The closure's return value is wrapped in `Some` and returned.
2342
    ///
2343
    /// This function only requires a mutable reference to the guard, unlike
2344
    /// `try_upgrade_until` which takes the guard by value.
2345
    #[track_caller]
2346
0
    pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>(
2347
0
        &mut self,
2348
0
        timeout: R::Instant,
2349
0
        f: F,
2350
0
    ) -> Option<Ret> {
2351
0
        if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } {
2352
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2353
            // This will restore the state the lock was in at the start of the function.
2354
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2355
2356
            // Safety: We upgraded the lock, so we have mutable access to the data.
2357
            // When this function returns, whether by drop or panic,
2358
            // the drop guard will downgrade it back to an upgradeable lock.
2359
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2360
        } else {
2361
0
            None
2362
        }
2363
0
    }
2364
}
2365
2366
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
2367
    type Target = T;
2368
    #[inline]
2369
0
    fn deref(&self) -> &T {
2370
0
        unsafe { &*self.rwlock.data.get() }
2371
0
    }
2372
}
2373
2374
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
2375
    #[inline]
2376
0
    fn drop(&mut self) {
2377
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2378
0
        unsafe {
2379
0
            self.rwlock.raw.unlock_upgradable();
2380
0
        }
2381
0
    }
2382
}
2383
2384
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
2385
    for RwLockUpgradableReadGuard<'a, R, T>
2386
{
2387
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2388
0
        fmt::Debug::fmt(&**self, f)
2389
0
    }
2390
}
2391
2392
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
2393
    for RwLockUpgradableReadGuard<'a, R, T>
2394
{
2395
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2396
0
        (**self).fmt(f)
2397
0
    }
2398
}
2399
2400
#[cfg(feature = "owning_ref")]
2401
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
2402
    for RwLockUpgradableReadGuard<'a, R, T>
2403
{
2404
}
2405
2406
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
2407
/// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the
2408
/// `RwLock` it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static`
2409
/// lifetime.
2410
#[cfg(feature = "arc_lock")]
2411
#[clippy::has_significant_drop]
2412
#[must_use = "if unused the RwLock will immediately unlock"]
2413
pub struct ArcRwLockUpgradableReadGuard<R: RawRwLockUpgrade, T: ?Sized> {
2414
    rwlock: Arc<RwLock<R, T>>,
2415
    marker: PhantomData<R::GuardMarker>,
2416
}
2417
2418
#[cfg(feature = "arc_lock")]
2419
impl<R: RawRwLockUpgrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2420
    /// Returns a reference to the rwlock, contained in its original `Arc`.
2421
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
2422
        &s.rwlock
2423
    }
2424
2425
    /// Unlocks the `RwLock` and returns the `Arc` that was held by the [`ArcRwLockUpgradableReadGuard`].
2426
    #[inline]
2427
    pub fn into_arc(s: Self) -> Arc<RwLock<R, T>> {
2428
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
2429
        let s = ManuallyDrop::new(s);
2430
        unsafe {
2431
            s.rwlock.raw.unlock_upgradable();
2432
            ptr::read(&s.rwlock)
2433
        }
2434
    }
2435
2436
    /// Temporarily unlocks the `RwLock` to execute the given function.
2437
    ///
2438
    /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`].
2439
    #[inline]
2440
    #[track_caller]
2441
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
2442
    where
2443
        F: FnOnce() -> U,
2444
    {
2445
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2446
        unsafe {
2447
            s.rwlock.raw.unlock_upgradable();
2448
        }
2449
        defer!(s.rwlock.raw.lock_upgradable());
2450
        f()
2451
    }
2452
2453
    /// Atomically upgrades an upgradable read lock lock into an exclusive write lock,
2454
    /// blocking the current thread until it can be acquired.
2455
    #[track_caller]
2456
    pub fn upgrade(s: Self) -> ArcRwLockWriteGuard<R, T> {
2457
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2458
        unsafe {
2459
            s.rwlock.raw.upgrade();
2460
        }
2461
2462
        // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out
2463
        //         of the struct
2464
        let s = ManuallyDrop::new(s);
2465
        let rwlock = unsafe { ptr::read(&s.rwlock) };
2466
2467
        ArcRwLockWriteGuard {
2468
            rwlock,
2469
            marker: PhantomData,
2470
        }
2471
    }
2472
2473
    /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2474
    ///
2475
    /// If the access could not be granted at this time, then the current guard is returned.
2476
    #[track_caller]
2477
    pub fn try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2478
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2479
        if unsafe { s.rwlock.raw.try_upgrade() } {
2480
            // SAFETY: same as above
2481
            let s = ManuallyDrop::new(s);
2482
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2483
2484
            Ok(ArcRwLockWriteGuard {
2485
                rwlock,
2486
                marker: PhantomData,
2487
            })
2488
        } else {
2489
            Err(s)
2490
        }
2491
    }
2492
}
2493
2494
#[cfg(feature = "arc_lock")]
2495
impl<R: RawRwLockUpgradeFair, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2496
    /// Unlocks the `RwLock` using a fair unlock protocol.
2497
    ///
2498
    /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`].
2499
    #[inline]
2500
    #[track_caller]
2501
    pub fn unlock_fair(s: Self) {
2502
        drop(Self::into_arc_fair(s));
2503
    }
2504
2505
    /// Unlocks the `RwLock` using a fair unlock protocol and returns the `Arc` that was held by the [`ArcRwLockUpgradableReadGuard`].
2506
    #[inline]
2507
    pub fn into_arc_fair(s: Self) -> Arc<RwLock<R, T>> {
2508
        // SAFETY: Skip our Drop impl and manually unlock the rwlock.
2509
        let s = ManuallyDrop::new(s);
2510
        unsafe {
2511
            s.rwlock.raw.unlock_upgradable_fair();
2512
            ptr::read(&s.rwlock)
2513
        }
2514
    }
2515
2516
    /// Temporarily unlocks the `RwLock` to execute the given function.
2517
    ///
2518
    /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`].
2519
    #[inline]
2520
    #[track_caller]
2521
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
2522
    where
2523
        F: FnOnce() -> U,
2524
    {
2525
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2526
        unsafe {
2527
            s.rwlock.raw.unlock_upgradable_fair();
2528
        }
2529
        defer!(s.rwlock.raw.lock_upgradable());
2530
        f()
2531
    }
2532
2533
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
2534
    ///
2535
    /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`].
2536
    #[inline]
2537
    #[track_caller]
2538
    pub fn bump(s: &mut Self) {
2539
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2540
        unsafe {
2541
            s.rwlock.raw.bump_upgradable();
2542
        }
2543
    }
2544
}
2545
2546
#[cfg(feature = "arc_lock")]
2547
impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2548
    /// Atomically downgrades an upgradable read lock lock into a shared read lock
2549
    /// without allowing any writers to take exclusive access of the lock in the
2550
    /// meantime.
2551
    ///
2552
    /// Note that if there are any writers currently waiting to take the lock
2553
    /// then other readers may not be able to acquire the lock even if it was
2554
    /// downgraded.
2555
    #[track_caller]
2556
    pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> {
2557
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2558
        unsafe {
2559
            s.rwlock.raw.downgrade_upgradable();
2560
        }
2561
2562
        // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed
2563
        let s = ManuallyDrop::new(s);
2564
        let rwlock = unsafe { ptr::read(&s.rwlock) };
2565
2566
        ArcRwLockReadGuard {
2567
            rwlock,
2568
            marker: PhantomData,
2569
        }
2570
    }
2571
2572
    /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock,
2573
    /// blocking the current thread until it can be acquired.
2574
    ///
2575
    /// Then, calls the provided closure with an exclusive reference to the lock's data.
2576
    ///
2577
    /// Finally, atomically downgrades the lock back to an upgradable read lock.
2578
    /// The closure's return value is returned.
2579
    ///
2580
    /// This function only requires a mutable reference to the guard, unlike
2581
    /// `upgrade` which takes the guard by value.
2582
    #[track_caller]
2583
    pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret {
2584
        unsafe {
2585
            self.rwlock.raw.upgrade();
2586
        }
2587
2588
        // Safety: We just upgraded the lock, so we have mutable access to the data.
2589
        // This will restore the state the lock was in at the start of the function.
2590
        defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2591
2592
        // Safety: We upgraded the lock, so we have mutable access to the data.
2593
        // When this function returns, whether by drop or panic,
2594
        // the drop guard will downgrade it back to an upgradeable lock.
2595
        f(unsafe { &mut *self.rwlock.data.get() })
2596
    }
2597
2598
    /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2599
    ///
2600
    /// If the access could not be granted at this time, then `None` is returned.
2601
    ///
2602
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2603
    /// and finally downgrades the lock back to an upgradable read lock.
2604
    /// The closure's return value is wrapped in `Some` and returned.
2605
    ///
2606
    /// This function only requires a mutable reference to the guard, unlike
2607
    /// `try_upgrade` which takes the guard by value.
2608
    #[track_caller]
2609
    pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> {
2610
        if unsafe { self.rwlock.raw.try_upgrade() } {
2611
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2612
            // This will restore the state the lock was in at the start of the function.
2613
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2614
2615
            // Safety: We upgraded the lock, so we have mutable access to the data.
2616
            // When this function returns, whether by drop or panic,
2617
            // the drop guard will downgrade it back to an upgradeable lock.
2618
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2619
        } else {
2620
            None
2621
        }
2622
    }
2623
}
2624
2625
#[cfg(feature = "arc_lock")]
2626
impl<R: RawRwLockUpgradeTimed, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2627
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2628
    /// write lock, until a timeout is reached.
2629
    ///
2630
    /// If the access could not be granted before the timeout expires, then
2631
    /// the current guard is returned.
2632
    #[track_caller]
2633
    pub fn try_upgrade_for(
2634
        s: Self,
2635
        timeout: R::Duration,
2636
    ) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2637
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2638
        if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
2639
            // SAFETY: same as above
2640
            let s = ManuallyDrop::new(s);
2641
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2642
2643
            Ok(ArcRwLockWriteGuard {
2644
                rwlock,
2645
                marker: PhantomData,
2646
            })
2647
        } else {
2648
            Err(s)
2649
        }
2650
    }
2651
2652
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2653
    /// write lock, until a timeout is reached.
2654
    ///
2655
    /// If the access could not be granted before the timeout expires, then
2656
    /// the current guard is returned.
2657
    #[inline]
2658
    #[track_caller]
2659
    pub fn try_upgrade_until(
2660
        s: Self,
2661
        timeout: R::Instant,
2662
    ) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2663
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2664
        if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
2665
            // SAFETY: same as above
2666
            let s = ManuallyDrop::new(s);
2667
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2668
2669
            Ok(ArcRwLockWriteGuard {
2670
                rwlock,
2671
                marker: PhantomData,
2672
            })
2673
        } else {
2674
            Err(s)
2675
        }
2676
    }
2677
}
2678
2679
#[cfg(feature = "arc_lock")]
2680
impl<R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade, T: ?Sized>
2681
    ArcRwLockUpgradableReadGuard<R, T>
2682
{
2683
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2684
    /// write lock, until a timeout is reached.
2685
    ///
2686
    /// If the access could not be granted before the timeout expires, then
2687
    /// `None` is returned.
2688
    ///
2689
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2690
    /// and finally downgrades the lock back to an upgradable read lock.
2691
    /// The closure's return value is wrapped in `Some` and returned.
2692
    ///
2693
    /// This function only requires a mutable reference to the guard, unlike
2694
    /// `try_upgrade_for` which takes the guard by value.
2695
    #[track_caller]
2696
    pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>(
2697
        &mut self,
2698
        timeout: R::Duration,
2699
        f: F,
2700
    ) -> Option<Ret> {
2701
        if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } {
2702
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2703
            // This will restore the state the lock was in at the start of the function.
2704
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2705
2706
            // Safety: We upgraded the lock, so we have mutable access to the data.
2707
            // When this function returns, whether by drop or panic,
2708
            // the drop guard will downgrade it back to an upgradeable lock.
2709
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2710
        } else {
2711
            None
2712
        }
2713
    }
2714
2715
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2716
    /// write lock, until a timeout is reached.
2717
    ///
2718
    /// If the access could not be granted before the timeout expires, then
2719
    /// `None` is returned.
2720
    ///
2721
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2722
    /// and finally downgrades the lock back to an upgradable read lock.
2723
    /// The closure's return value is wrapped in `Some` and returned.
2724
    ///
2725
    /// This function only requires a mutable reference to the guard, unlike
2726
    /// `try_upgrade_until` which takes the guard by value.
2727
    #[track_caller]
2728
    pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>(
2729
        &mut self,
2730
        timeout: R::Instant,
2731
        f: F,
2732
    ) -> Option<Ret> {
2733
        if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } {
2734
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2735
            // This will restore the state the lock was in at the start of the function.
2736
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2737
2738
            // Safety: We upgraded the lock, so we have mutable access to the data.
2739
            // When this function returns, whether by drop or panic,
2740
            // the drop guard will downgrade it back to an upgradeable lock.
2741
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2742
        } else {
2743
            None
2744
        }
2745
    }
2746
}
2747
2748
#[cfg(feature = "arc_lock")]
2749
impl<R: RawRwLockUpgrade, T: ?Sized> Deref for ArcRwLockUpgradableReadGuard<R, T> {
2750
    type Target = T;
2751
    #[inline]
2752
    fn deref(&self) -> &T {
2753
        unsafe { &*self.rwlock.data.get() }
2754
    }
2755
}
2756
2757
#[cfg(feature = "arc_lock")]
2758
impl<R: RawRwLockUpgrade, T: ?Sized> Drop for ArcRwLockUpgradableReadGuard<R, T> {
2759
    #[inline]
2760
    fn drop(&mut self) {
2761
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2762
        unsafe {
2763
            self.rwlock.raw.unlock_upgradable();
2764
        }
2765
    }
2766
}
2767
2768
#[cfg(feature = "arc_lock")]
2769
impl<R: RawRwLockUpgrade, T: fmt::Debug + ?Sized> fmt::Debug
2770
    for ArcRwLockUpgradableReadGuard<R, T>
2771
{
2772
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2773
        fmt::Debug::fmt(&**self, f)
2774
    }
2775
}
2776
2777
#[cfg(feature = "arc_lock")]
2778
impl<R: RawRwLockUpgrade, T: fmt::Display + ?Sized> fmt::Display
2779
    for ArcRwLockUpgradableReadGuard<R, T>
2780
{
2781
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2782
        (**self).fmt(f)
2783
    }
2784
}
2785
2786
/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
2787
/// subfield of the protected data.
2788
///
2789
/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
2790
/// former doesn't support temporarily unlocking and re-locking, since that
2791
/// could introduce soundness issues if the locked object is modified by another
2792
/// thread.
2793
#[clippy::has_significant_drop]
2794
#[must_use = "if unused the RwLock will immediately unlock"]
2795
pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
2796
    raw: &'a R,
2797
    data: *const T,
2798
    marker: PhantomData<&'a T>,
2799
}
2800
2801
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
2802
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
2803
    R::GuardMarker: Send
2804
{
2805
}
2806
2807
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
2808
    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
2809
    ///
2810
    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
2811
    /// in already locked the data.
2812
    ///
2813
    /// This is an associated function that needs to be
2814
    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
2815
    /// the same name on the contents of the locked data.
2816
    #[inline]
2817
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
2818
0
    where
2819
0
        F: FnOnce(&T) -> &U,
2820
    {
2821
0
        let raw = s.raw;
2822
0
        let data = f(unsafe { &*s.data });
2823
0
        mem::forget(s);
2824
0
        MappedRwLockReadGuard {
2825
0
            raw,
2826
0
            data,
2827
0
            marker: PhantomData,
2828
0
        }
2829
0
    }
2830
2831
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
2832
    /// locked data. The original guard is return if the closure returns `None`.
2833
    ///
2834
    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
2835
    /// in already locked the data.
2836
    ///
2837
    /// This is an associated function that needs to be
2838
    /// used as `MappedRwLockReadGuard::try_map(...)`. A method would interfere with methods of
2839
    /// the same name on the contents of the locked data.
2840
    #[inline]
2841
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
2842
0
    where
2843
0
        F: FnOnce(&T) -> Option<&U>,
2844
    {
2845
0
        let raw = s.raw;
2846
0
        let data = match f(unsafe { &*s.data }) {
2847
0
            Some(data) => data,
2848
0
            None => return Err(s),
2849
        };
2850
0
        mem::forget(s);
2851
0
        Ok(MappedRwLockReadGuard {
2852
0
            raw,
2853
0
            data,
2854
0
            marker: PhantomData,
2855
0
        })
2856
0
    }
2857
2858
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
2859
    /// locked data. The original guard is returned alongside arbitrary user data
2860
    /// if the closure returns `Err`.
2861
    ///
2862
    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
2863
    /// in already locked the data.
2864
    ///
2865
    /// This is an associated function that needs to be
2866
    /// used as `MappedRwLockReadGuard::try_map_or_err(...)`. A method would interfere with methods of
2867
    /// the same name on the contents of the locked data.
2868
    #[inline]
2869
0
    pub fn try_map_or_else<U: ?Sized, F, E>(
2870
0
        s: Self,
2871
0
        f: F,
2872
0
    ) -> Result<MappedRwLockReadGuard<'a, R, U>, (Self, E)>
2873
0
    where
2874
0
        F: FnOnce(&T) -> Result<&U, E>,
2875
    {
2876
0
        let raw = s.raw;
2877
0
        let data = match f(unsafe { &*s.data }) {
2878
0
            Ok(data) => data,
2879
0
            Err(e) => return Err((s, e)),
2880
        };
2881
0
        mem::forget(s);
2882
0
        Ok(MappedRwLockReadGuard {
2883
0
            raw,
2884
0
            data,
2885
0
            marker: PhantomData,
2886
0
        })
2887
0
    }
2888
}
2889
2890
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
2891
    /// Unlocks the `RwLock` using a fair unlock protocol.
2892
    ///
2893
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
2894
    /// the `RwLock` before another has the chance to acquire the lock, even if
2895
    /// that thread has been blocked on the `RwLock` for a long time. This is
2896
    /// the default because it allows much higher throughput as it avoids
2897
    /// forcing a context switch on every `RwLock` unlock. This can result in one
2898
    /// thread acquiring a `RwLock` many more times than other threads.
2899
    ///
2900
    /// However in some cases it can be beneficial to ensure fairness by forcing
2901
    /// the lock to pass on to a waiting thread if there is one. This is done by
2902
    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
2903
    #[inline]
2904
    #[track_caller]
2905
0
    pub fn unlock_fair(s: Self) {
2906
        // Safety: A MappedRwLockReadGuard always holds a shared lock.
2907
0
        unsafe {
2908
0
            s.raw.unlock_shared_fair();
2909
0
        }
2910
0
        mem::forget(s);
2911
0
    }
2912
}
2913
2914
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
2915
    type Target = T;
2916
    #[inline]
2917
0
    fn deref(&self) -> &T {
2918
0
        unsafe { &*self.data }
2919
0
    }
2920
}
2921
2922
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
2923
    #[inline]
2924
0
    fn drop(&mut self) {
2925
        // Safety: A MappedRwLockReadGuard always holds a shared lock.
2926
0
        unsafe {
2927
0
            self.raw.unlock_shared();
2928
0
        }
2929
0
    }
2930
}
2931
2932
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
2933
    for MappedRwLockReadGuard<'a, R, T>
2934
{
2935
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2936
0
        fmt::Debug::fmt(&**self, f)
2937
0
    }
2938
}
2939
2940
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
2941
    for MappedRwLockReadGuard<'a, R, T>
2942
{
2943
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2944
0
        (**self).fmt(f)
2945
0
    }
2946
}
2947
2948
#[cfg(feature = "owning_ref")]
2949
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
2950
    for MappedRwLockReadGuard<'a, R, T>
2951
{
2952
}
2953
2954
/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
2955
/// subfield of the protected data.
2956
///
2957
/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
2958
/// former doesn't support temporarily unlocking and re-locking, since that
2959
/// could introduce soundness issues if the locked object is modified by another
2960
/// thread.
2961
#[clippy::has_significant_drop]
2962
#[must_use = "if unused the RwLock will immediately unlock"]
2963
pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
2964
    raw: &'a R,
2965
    data: *mut T,
2966
    marker: PhantomData<&'a mut T>,
2967
}
2968
2969
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
2970
    for MappedRwLockWriteGuard<'a, R, T>
2971
{
2972
}
2973
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
2974
    R::GuardMarker: Send
2975
{
2976
}
2977
2978
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
2979
    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
2980
    ///
2981
    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
2982
    /// in already locked the data.
2983
    ///
2984
    /// This is an associated function that needs to be
2985
    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
2986
    /// the same name on the contents of the locked data.
2987
    #[inline]
2988
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
2989
0
    where
2990
0
        F: FnOnce(&mut T) -> &mut U,
2991
    {
2992
0
        let raw = s.raw;
2993
0
        let data = f(unsafe { &mut *s.data });
2994
0
        mem::forget(s);
2995
0
        MappedRwLockWriteGuard {
2996
0
            raw,
2997
0
            data,
2998
0
            marker: PhantomData,
2999
0
        }
3000
0
    }
3001
3002
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
3003
    /// locked data. The original guard is return if the closure returns `None`.
3004
    ///
3005
    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
3006
    /// in already locked the data.
3007
    ///
3008
    /// This is an associated function that needs to be
3009
    /// used as `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with methods of
3010
    /// the same name on the contents of the locked data.
3011
    #[inline]
3012
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
3013
0
    where
3014
0
        F: FnOnce(&mut T) -> Option<&mut U>,
3015
    {
3016
0
        let raw = s.raw;
3017
0
        let data = match f(unsafe { &mut *s.data }) {
3018
0
            Some(data) => data,
3019
0
            None => return Err(s),
3020
        };
3021
0
        mem::forget(s);
3022
0
        Ok(MappedRwLockWriteGuard {
3023
0
            raw,
3024
0
            data,
3025
0
            marker: PhantomData,
3026
0
        })
3027
0
    }
3028
3029
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
3030
    /// locked data. The original guard is returned alongside arbitrary user data
3031
    /// if the closure returns `Err`.
3032
    ///
3033
    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
3034
    /// in already locked the data.
3035
    ///
3036
    /// This is an associated function that needs to be
3037
    /// used as `MappedRwLockWriteGuard::try_map_or_err(...)`. A method would interfere with methods of
3038
    /// the same name on the contents of the locked data.
3039
    #[inline]
3040
0
    pub fn try_map_or_err<U: ?Sized, F, E>(
3041
0
        s: Self,
3042
0
        f: F,
3043
0
    ) -> Result<MappedRwLockWriteGuard<'a, R, U>, (Self, E)>
3044
0
    where
3045
0
        F: FnOnce(&mut T) -> Result<&mut U, E>,
3046
    {
3047
0
        let raw = s.raw;
3048
0
        let data = match f(unsafe { &mut *s.data }) {
3049
0
            Ok(data) => data,
3050
0
            Err(e) => return Err((s, e)),
3051
        };
3052
0
        mem::forget(s);
3053
0
        Ok(MappedRwLockWriteGuard {
3054
0
            raw,
3055
0
            data,
3056
0
            marker: PhantomData,
3057
0
        })
3058
0
    }
3059
}
3060
3061
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
3062
    /// Unlocks the `RwLock` using a fair unlock protocol.
3063
    ///
3064
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
3065
    /// the `RwLock` before another has the chance to acquire the lock, even if
3066
    /// that thread has been blocked on the `RwLock` for a long time. This is
3067
    /// the default because it allows much higher throughput as it avoids
3068
    /// forcing a context switch on every `RwLock` unlock. This can result in one
3069
    /// thread acquiring a `RwLock` many more times than other threads.
3070
    ///
3071
    /// However in some cases it can be beneficial to ensure fairness by forcing
3072
    /// the lock to pass on to a waiting thread if there is one. This is done by
3073
    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
3074
    #[inline]
3075
    #[track_caller]
3076
0
    pub fn unlock_fair(s: Self) {
3077
        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
3078
0
        unsafe {
3079
0
            s.raw.unlock_exclusive_fair();
3080
0
        }
3081
0
        mem::forget(s);
3082
0
    }
3083
}
3084
3085
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
3086
    type Target = T;
3087
    #[inline]
3088
0
    fn deref(&self) -> &T {
3089
0
        unsafe { &*self.data }
3090
0
    }
3091
}
3092
3093
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
3094
    #[inline]
3095
0
    fn deref_mut(&mut self) -> &mut T {
3096
0
        unsafe { &mut *self.data }
3097
0
    }
3098
}
3099
3100
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
3101
    #[inline]
3102
0
    fn drop(&mut self) {
3103
        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
3104
0
        unsafe {
3105
0
            self.raw.unlock_exclusive();
3106
0
        }
3107
0
    }
3108
}
3109
3110
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
3111
    for MappedRwLockWriteGuard<'a, R, T>
3112
{
3113
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3114
0
        fmt::Debug::fmt(&**self, f)
3115
0
    }
3116
}
3117
3118
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
3119
    for MappedRwLockWriteGuard<'a, R, T>
3120
{
3121
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3122
0
        (**self).fmt(f)
3123
0
    }
3124
}
3125
3126
#[cfg(feature = "owning_ref")]
3127
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
3128
    for MappedRwLockWriteGuard<'a, R, T>
3129
{
3130
}