Coverage Report

Created: 2025-10-29 07:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/lock_api-0.4.12/src/rwlock.rs
Line
Count
Source
1
// Copyright 2016 Amanieu d'Antras
2
//
3
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
// copied, modified, or distributed except according to those terms.
7
8
use core::cell::UnsafeCell;
9
use core::fmt;
10
use core::marker::PhantomData;
11
use core::mem;
12
use core::ops::{Deref, DerefMut};
13
14
#[cfg(feature = "arc_lock")]
15
use alloc::sync::Arc;
16
#[cfg(feature = "arc_lock")]
17
use core::mem::ManuallyDrop;
18
#[cfg(feature = "arc_lock")]
19
use core::ptr;
20
21
#[cfg(feature = "owning_ref")]
22
use owning_ref::StableAddress;
23
24
#[cfg(feature = "serde")]
25
use serde::{Deserialize, Deserializer, Serialize, Serializer};
26
27
/// Basic operations for a reader-writer lock.
28
///
29
/// Types implementing this trait can be used by `RwLock` to form a safe and
30
/// fully-functioning `RwLock` type.
31
///
32
/// # Safety
33
///
34
/// Implementations of this trait must ensure that the `RwLock` is actually
35
/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
36
/// lock exists, and a shared lock can't be acquire while an exclusive lock
37
/// exists.
38
pub unsafe trait RawRwLock {
39
    /// Initial value for an unlocked `RwLock`.
40
    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
41
    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
42
    #[allow(clippy::declare_interior_mutable_const)]
43
    const INIT: Self;
44
45
    /// Marker type which determines whether a lock guard should be `Send`. Use
46
    /// one of the `GuardSend` or `GuardNoSend` helper types here.
47
    type GuardMarker;
48
49
    /// Acquires a shared lock, blocking the current thread until it is able to do so.
50
    fn lock_shared(&self);
51
52
    /// Attempts to acquire a shared lock without blocking.
53
    fn try_lock_shared(&self) -> bool;
54
55
    /// Releases a shared lock.
56
    ///
57
    /// # Safety
58
    ///
59
    /// This method may only be called if a shared lock is held in the current context.
60
    unsafe fn unlock_shared(&self);
61
62
    /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
63
    fn lock_exclusive(&self);
64
65
    /// Attempts to acquire an exclusive lock without blocking.
66
    fn try_lock_exclusive(&self) -> bool;
67
68
    /// Releases an exclusive lock.
69
    ///
70
    /// # Safety
71
    ///
72
    /// This method may only be called if an exclusive lock is held in the current context.
73
    unsafe fn unlock_exclusive(&self);
74
75
    /// Checks if this `RwLock` is currently locked in any way.
76
    #[inline]
77
0
    fn is_locked(&self) -> bool {
78
0
        let acquired_lock = self.try_lock_exclusive();
79
0
        if acquired_lock {
80
            // Safety: A lock was successfully acquired above.
81
0
            unsafe {
82
0
                self.unlock_exclusive();
83
0
            }
84
0
        }
85
0
        !acquired_lock
86
0
    }
87
88
    /// Check if this `RwLock` is currently exclusively locked.
89
0
    fn is_locked_exclusive(&self) -> bool {
90
0
        let acquired_lock = self.try_lock_shared();
91
0
        if acquired_lock {
92
            // Safety: A shared lock was successfully acquired above.
93
0
            unsafe {
94
0
                self.unlock_shared();
95
0
            }
96
0
        }
97
0
        !acquired_lock
98
0
    }
99
}
100
101
/// Additional methods for `RwLock`s which support fair unlocking.
102
///
103
/// Fair unlocking means that a lock is handed directly over to the next waiting
104
/// thread if there is one, without giving other threads the opportunity to
105
/// "steal" the lock in the meantime. This is typically slower than unfair
106
/// unlocking, but may be necessary in certain circumstances.
107
pub unsafe trait RawRwLockFair: RawRwLock {
108
    /// Releases a shared lock using a fair unlock protocol.
109
    ///
110
    /// # Safety
111
    ///
112
    /// This method may only be called if a shared lock is held in the current context.
113
    unsafe fn unlock_shared_fair(&self);
114
115
    /// Releases an exclusive lock using a fair unlock protocol.
116
    ///
117
    /// # Safety
118
    ///
119
    /// This method may only be called if an exclusive lock is held in the current context.
120
    unsafe fn unlock_exclusive_fair(&self);
121
122
    /// Temporarily yields a shared lock to a waiting thread if there is one.
123
    ///
124
    /// This method is functionally equivalent to calling `unlock_shared_fair` followed
125
    /// by `lock_shared`, however it can be much more efficient in the case where there
126
    /// are no waiting threads.
127
    ///
128
    /// # Safety
129
    ///
130
    /// This method may only be called if a shared lock is held in the current context.
131
0
    unsafe fn bump_shared(&self) {
132
0
        self.unlock_shared_fair();
133
0
        self.lock_shared();
134
0
    }
135
136
    /// Temporarily yields an exclusive lock to a waiting thread if there is one.
137
    ///
138
    /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
139
    /// by `lock_exclusive`, however it can be much more efficient in the case where there
140
    /// are no waiting threads.
141
    ///
142
    /// # Safety
143
    ///
144
    /// This method may only be called if an exclusive lock is held in the current context.
145
0
    unsafe fn bump_exclusive(&self) {
146
0
        self.unlock_exclusive_fair();
147
0
        self.lock_exclusive();
148
0
    }
149
}
150
151
/// Additional methods for `RwLock`s which support atomically downgrading an
152
/// exclusive lock to a shared lock.
153
pub unsafe trait RawRwLockDowngrade: RawRwLock {
154
    /// Atomically downgrades an exclusive lock into a shared lock without
155
    /// allowing any thread to take an exclusive lock in the meantime.
156
    ///
157
    /// # Safety
158
    ///
159
    /// This method may only be called if an exclusive lock is held in the current context.
160
    unsafe fn downgrade(&self);
161
}
162
163
/// Additional methods for `RwLock`s which support locking with timeouts.
164
///
165
/// The `Duration` and `Instant` types are specified as associated types so that
166
/// this trait is usable even in `no_std` environments.
167
pub unsafe trait RawRwLockTimed: RawRwLock {
168
    /// Duration type used for `try_lock_for`.
169
    type Duration;
170
171
    /// Instant type used for `try_lock_until`.
172
    type Instant;
173
174
    /// Attempts to acquire a shared lock until a timeout is reached.
175
    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
176
177
    /// Attempts to acquire a shared lock until a timeout is reached.
178
    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
179
180
    /// Attempts to acquire an exclusive lock until a timeout is reached.
181
    fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
182
183
    /// Attempts to acquire an exclusive lock until a timeout is reached.
184
    fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
185
}
186
187
/// Additional methods for `RwLock`s which support recursive read locks.
188
///
189
/// These are guaranteed to succeed without blocking if
190
/// another read lock is held at the time of the call. This allows a thread
191
/// to recursively lock a `RwLock`. However using this method can cause
192
/// writers to starve since readers no longer block if a writer is waiting
193
/// for the lock.
194
pub unsafe trait RawRwLockRecursive: RawRwLock {
195
    /// Acquires a shared lock without deadlocking in case of a recursive lock.
196
    fn lock_shared_recursive(&self);
197
198
    /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
199
    fn try_lock_shared_recursive(&self) -> bool;
200
}
201
202
/// Additional methods for `RwLock`s which support recursive read locks and timeouts.
203
pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
204
    /// Attempts to acquire a shared lock until a timeout is reached, without
205
    /// deadlocking in case of a recursive lock.
206
    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
207
208
    /// Attempts to acquire a shared lock until a timeout is reached, without
209
    /// deadlocking in case of a recursive lock.
210
    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
211
}
212
213
/// Additional methods for `RwLock`s which support atomically upgrading a shared
214
/// lock to an exclusive lock.
215
///
216
/// This requires acquiring a special "upgradable read lock" instead of a
217
/// normal shared lock. There may only be one upgradable lock at any time,
218
/// otherwise deadlocks could occur when upgrading.
219
pub unsafe trait RawRwLockUpgrade: RawRwLock {
220
    /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
221
    fn lock_upgradable(&self);
222
223
    /// Attempts to acquire an upgradable lock without blocking.
224
    fn try_lock_upgradable(&self) -> bool;
225
226
    /// Releases an upgradable lock.
227
    ///
228
    /// # Safety
229
    ///
230
    /// This method may only be called if an upgradable lock is held in the current context.
231
    unsafe fn unlock_upgradable(&self);
232
233
    /// Upgrades an upgradable lock to an exclusive lock.
234
    ///
235
    /// # Safety
236
    ///
237
    /// This method may only be called if an upgradable lock is held in the current context.
238
    unsafe fn upgrade(&self);
239
240
    /// Attempts to upgrade an upgradable lock to an exclusive lock without
241
    /// blocking.
242
    ///
243
    /// # Safety
244
    ///
245
    /// This method may only be called if an upgradable lock is held in the current context.
246
    unsafe fn try_upgrade(&self) -> bool;
247
}
248
249
/// Additional methods for `RwLock`s which support upgradable locks and fair
250
/// unlocking.
251
pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
252
    /// Releases an upgradable lock using a fair unlock protocol.
253
    ///
254
    /// # Safety
255
    ///
256
    /// This method may only be called if an upgradable lock is held in the current context.
257
    unsafe fn unlock_upgradable_fair(&self);
258
259
    /// Temporarily yields an upgradable lock to a waiting thread if there is one.
260
    ///
261
    /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
262
    /// by `lock_upgradable`, however it can be much more efficient in the case where there
263
    /// are no waiting threads.
264
    ///
265
    /// # Safety
266
    ///
267
    /// This method may only be called if an upgradable lock is held in the current context.
268
0
    unsafe fn bump_upgradable(&self) {
269
0
        self.unlock_upgradable_fair();
270
0
        self.lock_upgradable();
271
0
    }
272
}
273
274
/// Additional methods for `RwLock`s which support upgradable locks and lock
275
/// downgrading.
276
pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
277
    /// Downgrades an upgradable lock to a shared lock.
278
    ///
279
    /// # Safety
280
    ///
281
    /// This method may only be called if an upgradable lock is held in the current context.
282
    unsafe fn downgrade_upgradable(&self);
283
284
    /// Downgrades an exclusive lock to an upgradable lock.
285
    ///
286
    /// # Safety
287
    ///
288
    /// This method may only be called if an exclusive lock is held in the current context.
289
    unsafe fn downgrade_to_upgradable(&self);
290
}
291
292
/// Additional methods for `RwLock`s which support upgradable locks and locking
293
/// with timeouts.
294
pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
295
    /// Attempts to acquire an upgradable lock until a timeout is reached.
296
    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
297
298
    /// Attempts to acquire an upgradable lock until a timeout is reached.
299
    fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
300
301
    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
302
    /// timeout is reached.
303
    ///
304
    /// # Safety
305
    ///
306
    /// This method may only be called if an upgradable lock is held in the current context.
307
    unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
308
309
    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
310
    /// timeout is reached.
311
    ///
312
    /// # Safety
313
    ///
314
    /// This method may only be called if an upgradable lock is held in the current context.
315
    unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
316
}
317
318
/// A reader-writer lock
319
///
320
/// This type of lock allows a number of readers or at most one writer at any
321
/// point in time. The write portion of this lock typically allows modification
322
/// of the underlying data (exclusive access) and the read portion of this lock
323
/// typically allows for read-only access (shared access).
324
///
325
/// The type parameter `T` represents the data that this lock protects. It is
326
/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
327
/// allow concurrent access through readers. The RAII guards returned from the
328
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
329
/// to allow access to the contained of the lock.
330
pub struct RwLock<R, T: ?Sized> {
331
    raw: R,
332
    data: UnsafeCell<T>,
333
}
334
335
// Copied and modified from serde
336
#[cfg(feature = "serde")]
337
impl<R, T> Serialize for RwLock<R, T>
338
where
339
    R: RawRwLock,
340
    T: Serialize + ?Sized,
341
{
342
    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
343
    where
344
        S: Serializer,
345
    {
346
        self.read().serialize(serializer)
347
    }
348
}
349
350
#[cfg(feature = "serde")]
351
impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
352
where
353
    R: RawRwLock,
354
    T: Deserialize<'de> + ?Sized,
355
{
356
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
357
    where
358
        D: Deserializer<'de>,
359
    {
360
        Deserialize::deserialize(deserializer).map(RwLock::new)
361
    }
362
}
363
364
unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
365
unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
366
367
impl<R: RawRwLock, T> RwLock<R, T> {
368
    /// Creates a new instance of an `RwLock<T>` which is unlocked.
369
    #[cfg(has_const_fn_trait_bound)]
370
    #[inline]
371
0
    pub const fn new(val: T) -> RwLock<R, T> {
372
0
        RwLock {
373
0
            data: UnsafeCell::new(val),
374
0
            raw: R::INIT,
375
0
        }
376
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, thread_local::ThreadLocal<core::cell::RefCell<lru::LruCache<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, bool>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, usize>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::new
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::new
377
378
    /// Creates a new instance of an `RwLock<T>` which is unlocked.
379
    #[cfg(not(has_const_fn_trait_bound))]
380
    #[inline]
381
    pub fn new(val: T) -> RwLock<R, T> {
382
        RwLock {
383
            data: UnsafeCell::new(val),
384
            raw: R::INIT,
385
        }
386
    }
387
388
    /// Consumes this `RwLock`, returning the underlying data.
389
    #[inline]
390
    #[allow(unused_unsafe)]
391
0
    pub fn into_inner(self) -> T {
392
0
        unsafe { self.data.into_inner() }
393
0
    }
394
}
395
396
impl<R, T> RwLock<R, T> {
397
    /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
398
    /// `RawRwLock<T>`.
399
    #[inline]
400
0
    pub const fn from_raw(raw_rwlock: R, val: T) -> RwLock<R, T> {
401
0
        RwLock {
402
0
            data: UnsafeCell::new(val),
403
0
            raw: raw_rwlock,
404
0
        }
405
0
    }
406
407
    /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
408
    /// `RawRwLock<T>`.
409
    ///
410
    /// This allows creating a `RwLock<T>` in a constant context on stable
411
    /// Rust.
412
    ///
413
    /// This method is a legacy alias for [`from_raw`](Self::from_raw).
414
    #[inline]
415
0
    pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
416
0
        Self::from_raw(raw_rwlock, val)
417
0
    }
418
}
419
420
impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
421
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
422
    ///
423
    /// # Safety
424
    ///
425
    /// This method must only be called if the thread logically holds a read lock.
426
    ///
427
    /// This function does not increment the read count of the lock. Calling this function when a
428
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
429
    /// with `mem::forget`.
430
    #[inline]
431
0
    pub unsafe fn make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T> {
432
0
        RwLockReadGuard {
433
0
            rwlock: self,
434
0
            marker: PhantomData,
435
0
        }
436
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, thread_local::ThreadLocal<core::cell::RefCell<lru::LruCache<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::make_read_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::make_read_guard_unchecked
437
438
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
439
    ///
440
    /// # Safety
441
    ///
442
    /// This method must only be called if the thread logically holds a write lock.
443
    ///
444
    /// Calling this function when a guard has already been produced is undefined behaviour unless
445
    /// the guard was forgotten with `mem::forget`.
446
    #[inline]
447
0
    pub unsafe fn make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T> {
448
0
        RwLockWriteGuard {
449
0
            rwlock: self,
450
0
            marker: PhantomData,
451
0
        }
452
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, bool>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::make_write_guard_unchecked
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::make_write_guard_unchecked
453
454
    /// Locks this `RwLock` with shared read access, blocking the current thread
455
    /// until it can be acquired.
456
    ///
457
    /// The calling thread will be blocked until there are no more writers which
458
    /// hold the lock. There may be other readers currently inside the lock when
459
    /// this method returns.
460
    ///
461
    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
462
    /// the current thread already holds one may result in a deadlock.
463
    ///
464
    /// Returns an RAII guard which will release this thread's shared access
465
    /// once it is dropped.
466
    #[inline]
467
0
    pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
468
0
        self.raw.lock_shared();
469
        // SAFETY: The lock is held, as required.
470
0
        unsafe { self.make_read_guard_unchecked() }
471
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, thread_local::ThreadLocal<core::cell::RefCell<lru::LruCache<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel>>::read
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::read
472
473
    /// Attempts to acquire this `RwLock` with shared read access.
474
    ///
475
    /// If the access could not be granted at this time, then `None` is returned.
476
    /// Otherwise, an RAII guard is returned which will release the shared access
477
    /// when it is dropped.
478
    ///
479
    /// This function does not block.
480
    #[inline]
481
0
    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
482
0
        if self.raw.try_lock_shared() {
483
            // SAFETY: The lock is held, as required.
484
0
            Some(unsafe { self.make_read_guard_unchecked() })
485
        } else {
486
0
            None
487
        }
488
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::try_read
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::try_read
489
490
    /// Locks this `RwLock` with exclusive write access, blocking the current
491
    /// thread until it can be acquired.
492
    ///
493
    /// This function will not return while other writers or other readers
494
    /// currently have access to the lock.
495
    ///
496
    /// Returns an RAII guard which will drop the write access of this `RwLock`
497
    /// when dropped.
498
    #[inline]
499
0
    pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
500
0
        self.raw.lock_exclusive();
501
        // SAFETY: The lock is held, as required.
502
0
        unsafe { self.make_write_guard_unchecked() }
503
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, bool>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()>>::write
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _>>::write
504
505
    /// Attempts to lock this `RwLock` with exclusive write access.
506
    ///
507
    /// If the lock could not be acquired at this time, then `None` is returned.
508
    /// Otherwise, an RAII guard is returned which will release the lock when
509
    /// it is dropped.
510
    ///
511
    /// This function does not block.
512
    #[inline]
513
0
    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
514
0
        if self.raw.try_lock_exclusive() {
515
            // SAFETY: The lock is held, as required.
516
0
            Some(unsafe { self.make_write_guard_unchecked() })
517
        } else {
518
0
            None
519
        }
520
0
    }
521
522
    /// Returns a mutable reference to the underlying data.
523
    ///
524
    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
525
    /// take place---the mutable borrow statically guarantees no locks exist.
526
    #[inline]
527
0
    pub fn get_mut(&mut self) -> &mut T {
528
0
        unsafe { &mut *self.data.get() }
529
0
    }
530
531
    /// Checks whether this `RwLock` is currently locked in any way.
532
    #[inline]
533
0
    pub fn is_locked(&self) -> bool {
534
0
        self.raw.is_locked()
535
0
    }
536
537
    /// Check if this `RwLock` is currently exclusively locked.
538
    #[inline]
539
0
    pub fn is_locked_exclusive(&self) -> bool {
540
0
        self.raw.is_locked_exclusive()
541
0
    }
542
543
    /// Forcibly unlocks a read lock.
544
    ///
545
    /// This is useful when combined with `mem::forget` to hold a lock without
546
    /// the need to maintain a `RwLockReadGuard` object alive, for example when
547
    /// dealing with FFI.
548
    ///
549
    /// # Safety
550
    ///
551
    /// This method must only be called if the current thread logically owns a
552
    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
553
    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
554
    #[inline]
555
0
    pub unsafe fn force_unlock_read(&self) {
556
0
        self.raw.unlock_shared();
557
0
    }
558
559
    /// Forcibly unlocks a write lock.
560
    ///
561
    /// This is useful when combined with `mem::forget` to hold a lock without
562
    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
563
    /// dealing with FFI.
564
    ///
565
    /// # Safety
566
    ///
567
    /// This method must only be called if the current thread logically owns a
568
    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
569
    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
570
    #[inline]
571
0
    pub unsafe fn force_unlock_write(&self) {
572
0
        self.raw.unlock_exclusive();
573
0
    }
574
575
    /// Returns the underlying raw reader-writer lock object.
576
    ///
577
    /// Note that you will most likely need to import the `RawRwLock` trait from
578
    /// `lock_api` to be able to call functions on the raw
579
    /// reader-writer lock.
580
    ///
581
    /// # Safety
582
    ///
583
    /// This method is unsafe because it allows unlocking a mutex while
584
    /// still holding a reference to a lock guard.
585
0
    pub unsafe fn raw(&self) -> &R {
586
0
        &self.raw
587
0
    }
588
589
    /// Returns a raw pointer to the underlying data.
590
    ///
591
    /// This is useful when combined with `mem::forget` to hold a lock without
592
    /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
593
    /// alive, for example when dealing with FFI.
594
    ///
595
    /// # Safety
596
    ///
597
    /// You must ensure that there are no data races when dereferencing the
598
    /// returned pointer, for example if the current thread logically owns a
599
    /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
600
    /// using `mem::forget`.
601
    #[inline]
602
0
    pub fn data_ptr(&self) -> *mut T {
603
0
        self.data.get()
604
0
    }
605
606
    /// Creates a new `RwLockReadGuard` without checking if the lock is held.
607
    ///
608
    /// # Safety
609
    ///
610
    /// This method must only be called if the thread logically holds a read lock.
611
    ///
612
    /// This function does not increment the read count of the lock. Calling this function when a
613
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
614
    /// with `mem::forget`.`
615
    #[cfg(feature = "arc_lock")]
616
    #[inline]
617
    pub unsafe fn make_arc_read_guard_unchecked(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
618
        ArcRwLockReadGuard {
619
            rwlock: self.clone(),
620
            marker: PhantomData,
621
        }
622
    }
623
624
    /// Creates a new `RwLockWriteGuard` without checking if the lock is held.
625
    ///
626
    /// # Safety
627
    ///
628
    /// This method must only be called if the thread logically holds a write lock.
629
    ///
630
    /// Calling this function when a guard has already been produced is undefined behaviour unless
631
    /// the guard was forgotten with `mem::forget`.
632
    #[cfg(feature = "arc_lock")]
633
    #[inline]
634
    pub unsafe fn make_arc_write_guard_unchecked(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> {
635
        ArcRwLockWriteGuard {
636
            rwlock: self.clone(),
637
            marker: PhantomData,
638
        }
639
    }
640
641
    /// Locks this `RwLock` with read access, through an `Arc`.
642
    ///
643
    /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc`
644
    /// and the resulting read guard has no lifetime requirements.
645
    #[cfg(feature = "arc_lock")]
646
    #[inline]
647
    pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
648
        self.raw.lock_shared();
649
        // SAFETY: locking guarantee is upheld
650
        unsafe { self.make_arc_read_guard_unchecked() }
651
    }
652
653
    /// Attempts to lock this `RwLock` with read access, through an `Arc`.
654
    ///
655
    /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an
656
    /// `Arc` and the resulting read guard has no lifetime requirements.
657
    #[cfg(feature = "arc_lock")]
658
    #[inline]
659
    pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> {
660
        if self.raw.try_lock_shared() {
661
            // SAFETY: locking guarantee is upheld
662
            Some(unsafe { self.make_arc_read_guard_unchecked() })
663
        } else {
664
            None
665
        }
666
    }
667
668
    /// Locks this `RwLock` with write access, through an `Arc`.
669
    ///
670
    /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc`
671
    /// and the resulting write guard has no lifetime requirements.
672
    #[cfg(feature = "arc_lock")]
673
    #[inline]
674
    pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> {
675
        self.raw.lock_exclusive();
676
        // SAFETY: locking guarantee is upheld
677
        unsafe { self.make_arc_write_guard_unchecked() }
678
    }
679
680
    /// Attempts to lock this `RwLock` with writ access, through an `Arc`.
681
    ///
682
    /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an
683
    /// `Arc` and the resulting write guard has no lifetime requirements.
684
    #[cfg(feature = "arc_lock")]
685
    #[inline]
686
    pub fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>> {
687
        if self.raw.try_lock_exclusive() {
688
            // SAFETY: locking guarantee is upheld
689
            Some(unsafe { self.make_arc_write_guard_unchecked() })
690
        } else {
691
            None
692
        }
693
    }
694
}
695
696
impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
697
    /// Forcibly unlocks a read lock using a fair unlock protocol.
698
    ///
699
    /// This is useful when combined with `mem::forget` to hold a lock without
700
    /// the need to maintain a `RwLockReadGuard` object alive, for example when
701
    /// dealing with FFI.
702
    ///
703
    /// # Safety
704
    ///
705
    /// This method must only be called if the current thread logically owns a
706
    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
707
    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
708
    #[inline]
709
0
    pub unsafe fn force_unlock_read_fair(&self) {
710
0
        self.raw.unlock_shared_fair();
711
0
    }
712
713
    /// Forcibly unlocks a write lock using a fair unlock protocol.
714
    ///
715
    /// This is useful when combined with `mem::forget` to hold a lock without
716
    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
717
    /// dealing with FFI.
718
    ///
719
    /// # Safety
720
    ///
721
    /// This method must only be called if the current thread logically owns a
722
    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
723
    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
724
    #[inline]
725
0
    pub unsafe fn force_unlock_write_fair(&self) {
726
0
        self.raw.unlock_exclusive_fair();
727
0
    }
728
}
729
730
impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
731
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
732
    /// is reached.
733
    ///
734
    /// If the access could not be granted before the timeout expires, then
735
    /// `None` is returned. Otherwise, an RAII guard is returned which will
736
    /// release the shared access when it is dropped.
737
    #[inline]
738
0
    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
739
0
        if self.raw.try_lock_shared_for(timeout) {
740
            // SAFETY: The lock is held, as required.
741
0
            Some(unsafe { self.make_read_guard_unchecked() })
742
        } else {
743
0
            None
744
        }
745
0
    }
746
747
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
748
    /// is reached.
749
    ///
750
    /// If the access could not be granted before the timeout expires, then
751
    /// `None` is returned. Otherwise, an RAII guard is returned which will
752
    /// release the shared access when it is dropped.
753
    #[inline]
754
0
    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
755
0
        if self.raw.try_lock_shared_until(timeout) {
756
            // SAFETY: The lock is held, as required.
757
0
            Some(unsafe { self.make_read_guard_unchecked() })
758
        } else {
759
0
            None
760
        }
761
0
    }
762
763
    /// Attempts to acquire this `RwLock` with exclusive write access until a
764
    /// timeout is reached.
765
    ///
766
    /// If the access could not be granted before the timeout expires, then
767
    /// `None` is returned. Otherwise, an RAII guard is returned which will
768
    /// release the exclusive access when it is dropped.
769
    #[inline]
770
0
    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
771
0
        if self.raw.try_lock_exclusive_for(timeout) {
772
            // SAFETY: The lock is held, as required.
773
0
            Some(unsafe { self.make_write_guard_unchecked() })
774
        } else {
775
0
            None
776
        }
777
0
    }
778
779
    /// Attempts to acquire this `RwLock` with exclusive write access until a
780
    /// timeout is reached.
781
    ///
782
    /// If the access could not be granted before the timeout expires, then
783
    /// `None` is returned. Otherwise, an RAII guard is returned which will
784
    /// release the exclusive access when it is dropped.
785
    #[inline]
786
0
    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
787
0
        if self.raw.try_lock_exclusive_until(timeout) {
788
            // SAFETY: The lock is held, as required.
789
0
            Some(unsafe { self.make_write_guard_unchecked() })
790
        } else {
791
0
            None
792
        }
793
0
    }
794
795
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
796
    ///
797
    /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an
798
    /// `Arc` and the resulting read guard has no lifetime requirements.
799
    #[cfg(feature = "arc_lock")]
800
    #[inline]
801
    pub fn try_read_arc_for(
802
        self: &Arc<Self>,
803
        timeout: R::Duration,
804
    ) -> Option<ArcRwLockReadGuard<R, T>> {
805
        if self.raw.try_lock_shared_for(timeout) {
806
            // SAFETY: locking guarantee is upheld
807
            Some(unsafe { self.make_arc_read_guard_unchecked() })
808
        } else {
809
            None
810
        }
811
    }
812
813
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
814
    ///
815
    /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of
816
    /// an `Arc` and the resulting read guard has no lifetime requirements.
817
    #[cfg(feature = "arc_lock")]
818
    #[inline]
819
    pub fn try_read_arc_until(
820
        self: &Arc<Self>,
821
        timeout: R::Instant,
822
    ) -> Option<ArcRwLockReadGuard<R, T>> {
823
        if self.raw.try_lock_shared_until(timeout) {
824
            // SAFETY: locking guarantee is upheld
825
            Some(unsafe { self.make_arc_read_guard_unchecked() })
826
        } else {
827
            None
828
        }
829
    }
830
831
    /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`.
832
    ///
833
    /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of
834
    /// an `Arc` and the resulting write guard has no lifetime requirements.
835
    #[cfg(feature = "arc_lock")]
836
    #[inline]
837
    pub fn try_write_arc_for(
838
        self: &Arc<Self>,
839
        timeout: R::Duration,
840
    ) -> Option<ArcRwLockWriteGuard<R, T>> {
841
        if self.raw.try_lock_exclusive_for(timeout) {
842
            // SAFETY: locking guarantee is upheld
843
            Some(unsafe { self.make_arc_write_guard_unchecked() })
844
        } else {
845
            None
846
        }
847
    }
848
849
    /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`.
850
    ///
851
    /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of
852
    /// an `Arc` and the resulting read guard has no lifetime requirements.
853
    #[cfg(feature = "arc_lock")]
854
    #[inline]
855
    pub fn try_write_arc_until(
856
        self: &Arc<Self>,
857
        timeout: R::Instant,
858
    ) -> Option<ArcRwLockWriteGuard<R, T>> {
859
        if self.raw.try_lock_exclusive_until(timeout) {
860
            // SAFETY: locking guarantee is upheld
861
            Some(unsafe { self.make_arc_write_guard_unchecked() })
862
        } else {
863
            None
864
        }
865
    }
866
}
867
868
impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
869
    /// Locks this `RwLock` with shared read access, blocking the current thread
870
    /// until it can be acquired.
871
    ///
872
    /// The calling thread will be blocked until there are no more writers which
873
    /// hold the lock. There may be other readers currently inside the lock when
874
    /// this method returns.
875
    ///
876
    /// Unlike `read`, this method is guaranteed to succeed without blocking if
877
    /// another read lock is held at the time of the call. This allows a thread
878
    /// to recursively lock a `RwLock`. However using this method can cause
879
    /// writers to starve since readers no longer block if a writer is waiting
880
    /// for the lock.
881
    ///
882
    /// Returns an RAII guard which will release this thread's shared access
883
    /// once it is dropped.
884
    #[inline]
885
0
    pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
886
0
        self.raw.lock_shared_recursive();
887
        // SAFETY: The lock is held, as required.
888
0
        unsafe { self.make_read_guard_unchecked() }
889
0
    }
890
891
    /// Attempts to acquire this `RwLock` with shared read access.
892
    ///
893
    /// If the access could not be granted at this time, then `None` is returned.
894
    /// Otherwise, an RAII guard is returned which will release the shared access
895
    /// when it is dropped.
896
    ///
897
    /// This method is guaranteed to succeed if another read lock is held at the
898
    /// time of the call. See the documentation for `read_recursive` for details.
899
    ///
900
    /// This function does not block.
901
    #[inline]
902
0
    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
903
0
        if self.raw.try_lock_shared_recursive() {
904
            // SAFETY: The lock is held, as required.
905
0
            Some(unsafe { self.make_read_guard_unchecked() })
906
        } else {
907
0
            None
908
        }
909
0
    }
910
911
    /// Locks this `RwLock` with shared read access, through an `Arc`.
912
    ///
913
    /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of
914
    /// an `Arc` and the resulting read guard has no lifetime requirements.
915
    #[cfg(feature = "arc_lock")]
916
    #[inline]
917
    pub fn read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> {
918
        self.raw.lock_shared_recursive();
919
        // SAFETY: locking guarantee is upheld
920
        unsafe { self.make_arc_read_guard_unchecked() }
921
    }
922
923
    /// Attempts to lock this `RwLock` with shared read access, through an `Arc`.
924
    ///
925
    /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside
926
    /// of an `Arc` and the resulting read guard has no lifetime requirements.
927
    #[cfg(feature = "arc_lock")]
928
    #[inline]
929
    pub fn try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> {
930
        if self.raw.try_lock_shared_recursive() {
931
            // SAFETY: locking guarantee is upheld
932
            Some(unsafe { self.make_arc_read_guard_unchecked() })
933
        } else {
934
            None
935
        }
936
    }
937
}
938
939
impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
940
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
941
    /// is reached.
942
    ///
943
    /// If the access could not be granted before the timeout expires, then
944
    /// `None` is returned. Otherwise, an RAII guard is returned which will
945
    /// release the shared access when it is dropped.
946
    ///
947
    /// This method is guaranteed to succeed without blocking if another read
948
    /// lock is held at the time of the call. See the documentation for
949
    /// `read_recursive` for details.
950
    #[inline]
951
0
    pub fn try_read_recursive_for(
952
0
        &self,
953
0
        timeout: R::Duration,
954
0
    ) -> Option<RwLockReadGuard<'_, R, T>> {
955
0
        if self.raw.try_lock_shared_recursive_for(timeout) {
956
            // SAFETY: The lock is held, as required.
957
0
            Some(unsafe { self.make_read_guard_unchecked() })
958
        } else {
959
0
            None
960
        }
961
0
    }
962
963
    /// Attempts to acquire this `RwLock` with shared read access until a timeout
964
    /// is reached.
965
    ///
966
    /// If the access could not be granted before the timeout expires, then
967
    /// `None` is returned. Otherwise, an RAII guard is returned which will
968
    /// release the shared access when it is dropped.
969
    #[inline]
970
0
    pub fn try_read_recursive_until(
971
0
        &self,
972
0
        timeout: R::Instant,
973
0
    ) -> Option<RwLockReadGuard<'_, R, T>> {
974
0
        if self.raw.try_lock_shared_recursive_until(timeout) {
975
            // SAFETY: The lock is held, as required.
976
0
            Some(unsafe { self.make_read_guard_unchecked() })
977
        } else {
978
0
            None
979
        }
980
0
    }
981
982
    /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`.
983
    ///
984
    /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be
985
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
986
    #[cfg(feature = "arc_lock")]
987
    #[inline]
988
    pub fn try_read_arc_recursive_for(
989
        self: &Arc<Self>,
990
        timeout: R::Duration,
991
    ) -> Option<ArcRwLockReadGuard<R, T>> {
992
        if self.raw.try_lock_shared_recursive_for(timeout) {
993
            // SAFETY: locking guarantee is upheld
994
            Some(unsafe { self.make_arc_read_guard_unchecked() })
995
        } else {
996
            None
997
        }
998
    }
999
1000
    /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`.
1001
    ///
1002
    /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be
1003
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1004
    #[cfg(feature = "arc_lock")]
1005
    #[inline]
1006
    pub fn try_read_arc_recursive_until(
1007
        self: &Arc<Self>,
1008
        timeout: R::Instant,
1009
    ) -> Option<ArcRwLockReadGuard<R, T>> {
1010
        if self.raw.try_lock_shared_recursive_until(timeout) {
1011
            // SAFETY: locking guarantee is upheld
1012
            Some(unsafe { self.make_arc_read_guard_unchecked() })
1013
        } else {
1014
            None
1015
        }
1016
    }
1017
}
1018
1019
impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
1020
    /// Creates a new `RwLockUpgradableReadGuard` without checking if the lock is held.
1021
    ///
1022
    /// # Safety
1023
    ///
1024
    /// This method must only be called if the thread logically holds an upgradable read lock.
1025
    ///
1026
    /// This function does not increment the read count of the lock. Calling this function when a
1027
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
1028
    /// with `mem::forget`.
1029
    #[inline]
1030
0
    pub unsafe fn make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
1031
0
        RwLockUpgradableReadGuard {
1032
0
            rwlock: self,
1033
0
            marker: PhantomData,
1034
0
        }
1035
0
    }
1036
1037
    /// Locks this `RwLock` with upgradable read access, blocking the current thread
1038
    /// until it can be acquired.
1039
    ///
1040
    /// The calling thread will be blocked until there are no more writers or other
1041
    /// upgradable reads which hold the lock. There may be other readers currently
1042
    /// inside the lock when this method returns.
1043
    ///
1044
    /// Returns an RAII guard which will release this thread's shared access
1045
    /// once it is dropped.
1046
    #[inline]
1047
0
    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
1048
0
        self.raw.lock_upgradable();
1049
        // SAFETY: The lock is held, as required.
1050
0
        unsafe { self.make_upgradable_guard_unchecked() }
1051
0
    }
1052
1053
    /// Attempts to acquire this `RwLock` with upgradable read access.
1054
    ///
1055
    /// If the access could not be granted at this time, then `None` is returned.
1056
    /// Otherwise, an RAII guard is returned which will release the shared access
1057
    /// when it is dropped.
1058
    ///
1059
    /// This function does not block.
1060
    #[inline]
1061
0
    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1062
0
        if self.raw.try_lock_upgradable() {
1063
            // SAFETY: The lock is held, as required.
1064
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1065
        } else {
1066
0
            None
1067
        }
1068
0
    }
1069
1070
    /// Creates a new `ArcRwLockUpgradableReadGuard` without checking if the lock is held.
1071
    ///
1072
    /// # Safety
1073
    ///
1074
    /// This method must only be called if the thread logically holds an upgradable read lock.
1075
    ///
1076
    /// This function does not increment the read count of the lock. Calling this function when a
1077
    /// guard has already been produced is undefined behaviour unless the guard was forgotten
1078
    /// with `mem::forget`.`
1079
    #[cfg(feature = "arc_lock")]
1080
    #[inline]
1081
    pub unsafe fn make_upgradable_arc_guard_unchecked(
1082
        self: &Arc<Self>,
1083
    ) -> ArcRwLockUpgradableReadGuard<R, T> {
1084
        ArcRwLockUpgradableReadGuard {
1085
            rwlock: self.clone(),
1086
            marker: PhantomData,
1087
        }
1088
    }
1089
1090
    /// Locks this `RwLock` with upgradable read access, through an `Arc`.
1091
    ///
1092
    /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be
1093
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1094
    #[cfg(feature = "arc_lock")]
1095
    #[inline]
1096
    pub fn upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> {
1097
        self.raw.lock_upgradable();
1098
        // SAFETY: locking guarantee is upheld
1099
        unsafe { self.make_upgradable_arc_guard_unchecked() }
1100
    }
1101
1102
    /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`.
1103
    ///
1104
    /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be
1105
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1106
    #[cfg(feature = "arc_lock")]
1107
    #[inline]
1108
    pub fn try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1109
        if self.raw.try_lock_upgradable() {
1110
            // SAFETY: locking guarantee is upheld
1111
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1112
        } else {
1113
            None
1114
        }
1115
    }
1116
}
1117
1118
impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
1119
    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
1120
    /// is reached.
1121
    ///
1122
    /// If the access could not be granted before the timeout expires, then
1123
    /// `None` is returned. Otherwise, an RAII guard is returned which will
1124
    /// release the shared access when it is dropped.
1125
    #[inline]
1126
0
    pub fn try_upgradable_read_for(
1127
0
        &self,
1128
0
        timeout: R::Duration,
1129
0
    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1130
0
        if self.raw.try_lock_upgradable_for(timeout) {
1131
            // SAFETY: The lock is held, as required.
1132
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1133
        } else {
1134
0
            None
1135
        }
1136
0
    }
1137
1138
    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
1139
    /// is reached.
1140
    ///
1141
    /// If the access could not be granted before the timeout expires, then
1142
    /// `None` is returned. Otherwise, an RAII guard is returned which will
1143
    /// release the shared access when it is dropped.
1144
    #[inline]
1145
0
    pub fn try_upgradable_read_until(
1146
0
        &self,
1147
0
        timeout: R::Instant,
1148
0
    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
1149
0
        if self.raw.try_lock_upgradable_until(timeout) {
1150
            // SAFETY: The lock is held, as required.
1151
0
            Some(unsafe { self.make_upgradable_guard_unchecked() })
1152
        } else {
1153
0
            None
1154
        }
1155
0
    }
1156
1157
    /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`.
1158
    ///
1159
    /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be
1160
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1161
    #[cfg(feature = "arc_lock")]
1162
    #[inline]
1163
    pub fn try_upgradable_read_arc_for(
1164
        self: &Arc<Self>,
1165
        timeout: R::Duration,
1166
    ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1167
        if self.raw.try_lock_upgradable_for(timeout) {
1168
            // SAFETY: locking guarantee is upheld
1169
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1170
        } else {
1171
            None
1172
        }
1173
    }
1174
1175
    /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`.
1176
    ///
1177
    /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be
1178
    /// inside of an `Arc` and the resulting read guard has no lifetime requirements.
1179
    #[cfg(feature = "arc_lock")]
1180
    #[inline]
1181
    pub fn try_upgradable_read_arc_until(
1182
        self: &Arc<Self>,
1183
        timeout: R::Instant,
1184
    ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> {
1185
        if self.raw.try_lock_upgradable_until(timeout) {
1186
            // SAFETY: locking guarantee is upheld
1187
            Some(unsafe { self.make_upgradable_arc_guard_unchecked() })
1188
        } else {
1189
            None
1190
        }
1191
    }
1192
}
1193
1194
impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
1195
    #[inline]
1196
0
    fn default() -> RwLock<R, T> {
1197
0
        RwLock::new(Default::default())
1198
0
    }
1199
}
1200
1201
impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
1202
    #[inline]
1203
0
    fn from(t: T) -> RwLock<R, T> {
1204
0
        RwLock::new(t)
1205
0
    }
1206
}
1207
1208
impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
1209
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1210
0
        let mut d = f.debug_struct("RwLock");
1211
0
        match self.try_read() {
1212
0
            Some(guard) => d.field("data", &&*guard),
1213
            None => {
1214
                // Additional format_args! here is to remove quotes around <locked> in debug output.
1215
0
                d.field("data", &format_args!("<locked>"))
1216
            }
1217
        };
1218
0
        d.finish()
1219
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, ()> as core::fmt::Debug>::fmt
Unexecuted instantiation: <lock_api::rwlock::RwLock<_, _> as core::fmt::Debug>::fmt
1220
}
1221
1222
/// RAII structure used to release the shared read access of a lock when
1223
/// dropped.
1224
#[clippy::has_significant_drop]
1225
#[must_use = "if unused the RwLock will immediately unlock"]
1226
pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
1227
    rwlock: &'a RwLock<R, T>,
1228
    marker: PhantomData<(&'a T, R::GuardMarker)>,
1229
}
1230
1231
unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockReadGuard<'_, R, T> {}
1232
1233
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
1234
    /// Returns a reference to the original reader-writer lock object.
1235
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1236
0
        s.rwlock
1237
0
    }
1238
1239
    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1240
    ///
1241
    /// This operation cannot fail as the `RwLockReadGuard` passed
1242
    /// in already locked the data.
1243
    ///
1244
    /// This is an associated function that needs to be
1245
    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
1246
    /// the same name on the contents of the locked data.
1247
    #[inline]
1248
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1249
0
    where
1250
0
        F: FnOnce(&T) -> &U,
1251
    {
1252
0
        let raw = &s.rwlock.raw;
1253
0
        let data = f(unsafe { &*s.rwlock.data.get() });
1254
0
        mem::forget(s);
1255
0
        MappedRwLockReadGuard {
1256
0
            raw,
1257
0
            data,
1258
0
            marker: PhantomData,
1259
0
        }
1260
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::map::<prometheus_client::metrics::histogram::Histogram, <prometheus_client::metrics::family::Family<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::map::<prometheus_client::metrics::gauge::Gauge, <prometheus_client::metrics::family::Family<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>::get_or_create::{closure#1}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner>>::map::<alloc::vec::Vec<(f64, u64)>, <prometheus_client::metrics::histogram::Histogram>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _>>::map::<_, _>
1261
1262
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1263
    /// locked data. Returns the original guard if the closure returns `None`.
1264
    ///
1265
    /// This operation cannot fail as the `RwLockReadGuard` passed
1266
    /// in already locked the data.
1267
    ///
1268
    /// This is an associated function that needs to be
1269
    /// used as `RwLockReadGuard::try_map(...)`. A method would interfere with methods of
1270
    /// the same name on the contents of the locked data.
1271
    #[inline]
1272
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1273
0
    where
1274
0
        F: FnOnce(&T) -> Option<&U>,
1275
    {
1276
0
        let raw = &s.rwlock.raw;
1277
0
        let data = match f(unsafe { &*s.rwlock.data.get() }) {
1278
0
            Some(data) => data,
1279
0
            None => return Err(s),
1280
        };
1281
0
        mem::forget(s);
1282
0
        Ok(MappedRwLockReadGuard {
1283
0
            raw,
1284
0
            data,
1285
0
            marker: PhantomData,
1286
0
        })
1287
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::try_map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::try_map::<prometheus_client::metrics::histogram::Histogram, <prometheus_client::metrics::family::Family<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::try_map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::try_map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::try_map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::try_map::<prometheus_client::metrics::counter::Counter, <prometheus_client::metrics::family::Family<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::try_map::<prometheus_client::metrics::gauge::Gauge, <prometheus_client::metrics::family::Family<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>::get::{closure#0}>
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _>>::try_map::<_, _>
1288
1289
    /// Temporarily unlocks the `RwLock` to execute the given function.
1290
    ///
1291
    /// This is safe because `&mut` guarantees that there exist no other
1292
    /// references to the data protected by the `RwLock`.
1293
    #[inline]
1294
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1295
0
    where
1296
0
        F: FnOnce() -> U,
1297
    {
1298
        // Safety: An RwLockReadGuard always holds a shared lock.
1299
0
        unsafe {
1300
0
            s.rwlock.raw.unlock_shared();
1301
0
        }
1302
0
        defer!(s.rwlock.raw.lock_shared());
1303
0
        f()
1304
0
    }
1305
}
1306
1307
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
1308
    /// Unlocks the `RwLock` using a fair unlock protocol.
1309
    ///
1310
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1311
    /// the `RwLock` before another has the chance to acquire the lock, even if
1312
    /// that thread has been blocked on the `RwLock` for a long time. This is
1313
    /// the default because it allows much higher throughput as it avoids
1314
    /// forcing a context switch on every `RwLock` unlock. This can result in one
1315
    /// thread acquiring a `RwLock` many more times than other threads.
1316
    ///
1317
    /// However in some cases it can be beneficial to ensure fairness by forcing
1318
    /// the lock to pass on to a waiting thread if there is one. This is done by
1319
    /// using this method instead of dropping the `RwLockReadGuard` normally.
1320
    #[inline]
1321
0
    pub fn unlock_fair(s: Self) {
1322
        // Safety: An RwLockReadGuard always holds a shared lock.
1323
0
        unsafe {
1324
0
            s.rwlock.raw.unlock_shared_fair();
1325
0
        }
1326
0
        mem::forget(s);
1327
0
    }
1328
1329
    /// Temporarily unlocks the `RwLock` to execute the given function.
1330
    ///
1331
    /// The `RwLock` is unlocked a fair unlock protocol.
1332
    ///
1333
    /// This is safe because `&mut` guarantees that there exist no other
1334
    /// references to the data protected by the `RwLock`.
1335
    #[inline]
1336
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1337
0
    where
1338
0
        F: FnOnce() -> U,
1339
    {
1340
        // Safety: An RwLockReadGuard always holds a shared lock.
1341
0
        unsafe {
1342
0
            s.rwlock.raw.unlock_shared_fair();
1343
0
        }
1344
0
        defer!(s.rwlock.raw.lock_shared());
1345
0
        f()
1346
0
    }
1347
1348
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1349
    ///
1350
    /// This method is functionally equivalent to calling `unlock_fair` followed
1351
    /// by `read`, however it can be much more efficient in the case where there
1352
    /// are no waiting threads.
1353
    #[inline]
1354
0
    pub fn bump(s: &mut Self) {
1355
        // Safety: An RwLockReadGuard always holds a shared lock.
1356
0
        unsafe {
1357
0
            s.rwlock.raw.bump_shared();
1358
0
        }
1359
0
    }
1360
}
1361
1362
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
1363
    type Target = T;
1364
    #[inline]
1365
0
    fn deref(&self) -> &T {
1366
0
        unsafe { &*self.rwlock.data.get() }
1367
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, thread_local::ThreadLocal<core::cell::RefCell<lru::LruCache<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _> as core::ops::deref::Deref>::deref
1368
}
1369
1370
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
1371
    #[inline]
1372
0
    fn drop(&mut self) {
1373
        // Safety: An RwLockReadGuard always holds a shared lock.
1374
0
        unsafe {
1375
0
            self.rwlock.raw.unlock_shared();
1376
0
        }
1377
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, thread_local::ThreadLocal<core::cell::RefCell<lru::LruCache<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockReadGuard<_, _> as core::ops::drop::Drop>::drop
1378
}
1379
1380
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
1381
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1382
0
        fmt::Debug::fmt(&**self, f)
1383
0
    }
1384
}
1385
1386
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1387
    for RwLockReadGuard<'a, R, T>
1388
{
1389
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1390
0
        (**self).fmt(f)
1391
0
    }
1392
}
1393
1394
#[cfg(feature = "owning_ref")]
1395
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
1396
1397
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
1398
///
1399
/// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock`
1400
/// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime.
1401
#[cfg(feature = "arc_lock")]
1402
#[clippy::has_significant_drop]
1403
#[must_use = "if unused the RwLock will immediately unlock"]
1404
pub struct ArcRwLockReadGuard<R: RawRwLock, T: ?Sized> {
1405
    rwlock: Arc<RwLock<R, T>>,
1406
    marker: PhantomData<R::GuardMarker>,
1407
}
1408
1409
#[cfg(feature = "arc_lock")]
1410
impl<R: RawRwLock, T: ?Sized> ArcRwLockReadGuard<R, T> {
1411
    /// Returns a reference to the rwlock, contained in its `Arc`.
1412
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
1413
        &s.rwlock
1414
    }
1415
1416
    /// Temporarily unlocks the `RwLock` to execute the given function.
1417
    ///
1418
    /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`].
1419
    #[inline]
1420
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1421
    where
1422
        F: FnOnce() -> U,
1423
    {
1424
        // Safety: An RwLockReadGuard always holds a shared lock.
1425
        unsafe {
1426
            s.rwlock.raw.unlock_shared();
1427
        }
1428
        defer!(s.rwlock.raw.lock_shared());
1429
        f()
1430
    }
1431
}
1432
1433
#[cfg(feature = "arc_lock")]
1434
impl<R: RawRwLockFair, T: ?Sized> ArcRwLockReadGuard<R, T> {
1435
    /// Unlocks the `RwLock` using a fair unlock protocol.
1436
    ///
1437
    /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`].
1438
    #[inline]
1439
    pub fn unlock_fair(s: Self) {
1440
        // Safety: An RwLockReadGuard always holds a shared lock.
1441
        unsafe {
1442
            s.rwlock.raw.unlock_shared_fair();
1443
        }
1444
1445
        // SAFETY: ensure the Arc has its refcount decremented
1446
        let mut s = ManuallyDrop::new(s);
1447
        unsafe { ptr::drop_in_place(&mut s.rwlock) };
1448
    }
1449
1450
    /// Temporarily unlocks the `RwLock` to execute the given function.
1451
    ///
1452
    /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`].
1453
    #[inline]
1454
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1455
    where
1456
        F: FnOnce() -> U,
1457
    {
1458
        // Safety: An RwLockReadGuard always holds a shared lock.
1459
        unsafe {
1460
            s.rwlock.raw.unlock_shared_fair();
1461
        }
1462
        defer!(s.rwlock.raw.lock_shared());
1463
        f()
1464
    }
1465
1466
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1467
    ///
1468
    /// This is functionally identical to the `bump` method on [`RwLockReadGuard`].
1469
    #[inline]
1470
    pub fn bump(s: &mut Self) {
1471
        // Safety: An RwLockReadGuard always holds a shared lock.
1472
        unsafe {
1473
            s.rwlock.raw.bump_shared();
1474
        }
1475
    }
1476
}
1477
1478
#[cfg(feature = "arc_lock")]
1479
impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockReadGuard<R, T> {
1480
    type Target = T;
1481
    #[inline]
1482
    fn deref(&self) -> &T {
1483
        unsafe { &*self.rwlock.data.get() }
1484
    }
1485
}
1486
1487
#[cfg(feature = "arc_lock")]
1488
impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockReadGuard<R, T> {
1489
    #[inline]
1490
    fn drop(&mut self) {
1491
        // Safety: An RwLockReadGuard always holds a shared lock.
1492
        unsafe {
1493
            self.rwlock.raw.unlock_shared();
1494
        }
1495
    }
1496
}
1497
1498
#[cfg(feature = "arc_lock")]
1499
impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockReadGuard<R, T> {
1500
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1501
        fmt::Debug::fmt(&**self, f)
1502
    }
1503
}
1504
1505
#[cfg(feature = "arc_lock")]
1506
impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockReadGuard<R, T> {
1507
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1508
        (**self).fmt(f)
1509
    }
1510
}
1511
1512
/// RAII structure used to release the exclusive write access of a lock when
1513
/// dropped.
1514
#[clippy::has_significant_drop]
1515
#[must_use = "if unused the RwLock will immediately unlock"]
1516
pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1517
    rwlock: &'a RwLock<R, T>,
1518
    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
1519
}
1520
1521
unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockWriteGuard<'_, R, T> {}
1522
1523
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1524
    /// Returns a reference to the original reader-writer lock object.
1525
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1526
0
        s.rwlock
1527
0
    }
1528
1529
    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1530
    ///
1531
    /// This operation cannot fail as the `RwLockWriteGuard` passed
1532
    /// in already locked the data.
1533
    ///
1534
    /// This is an associated function that needs to be
1535
    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
1536
    /// the same name on the contents of the locked data.
1537
    #[inline]
1538
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1539
0
    where
1540
0
        F: FnOnce(&mut T) -> &mut U,
1541
    {
1542
0
        let raw = &s.rwlock.raw;
1543
0
        let data = f(unsafe { &mut *s.rwlock.data.get() });
1544
0
        mem::forget(s);
1545
0
        MappedRwLockWriteGuard {
1546
0
            raw,
1547
0
            data,
1548
0
            marker: PhantomData,
1549
0
        }
1550
0
    }
1551
1552
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1553
    /// locked data. The original guard is return if the closure returns `None`.
1554
    ///
1555
    /// This operation cannot fail as the `RwLockWriteGuard` passed
1556
    /// in already locked the data.
1557
    ///
1558
    /// This is an associated function that needs to be
1559
    /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with methods of
1560
    /// the same name on the contents of the locked data.
1561
    #[inline]
1562
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1563
0
    where
1564
0
        F: FnOnce(&mut T) -> Option<&mut U>,
1565
    {
1566
0
        let raw = &s.rwlock.raw;
1567
0
        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
1568
0
            Some(data) => data,
1569
0
            None => return Err(s),
1570
        };
1571
0
        mem::forget(s);
1572
0
        Ok(MappedRwLockWriteGuard {
1573
0
            raw,
1574
0
            data,
1575
0
            marker: PhantomData,
1576
0
        })
1577
0
    }
1578
1579
    /// Temporarily unlocks the `RwLock` to execute the given function.
1580
    ///
1581
    /// This is safe because `&mut` guarantees that there exist no other
1582
    /// references to the data protected by the `RwLock`.
1583
    #[inline]
1584
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1585
0
    where
1586
0
        F: FnOnce() -> U,
1587
    {
1588
        // Safety: An RwLockReadGuard always holds a shared lock.
1589
0
        unsafe {
1590
0
            s.rwlock.raw.unlock_exclusive();
1591
0
        }
1592
0
        defer!(s.rwlock.raw.lock_exclusive());
1593
0
        f()
1594
0
    }
1595
}
1596
1597
impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1598
    /// Atomically downgrades a write lock into a read lock without allowing any
1599
    /// writers to take exclusive access of the lock in the meantime.
1600
    ///
1601
    /// Note that if there are any writers currently waiting to take the lock
1602
    /// then other readers may not be able to acquire the lock even if it was
1603
    /// downgraded.
1604
0
    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1605
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1606
0
        unsafe {
1607
0
            s.rwlock.raw.downgrade();
1608
0
        }
1609
0
        let rwlock = s.rwlock;
1610
0
        mem::forget(s);
1611
0
        RwLockReadGuard {
1612
0
            rwlock,
1613
0
            marker: PhantomData,
1614
0
        }
1615
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>>>::downgrade
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _>>::downgrade
1616
}
1617
1618
impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1619
    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1620
    /// writers to take exclusive access of the lock in the meantime.
1621
    ///
1622
    /// Note that if there are any writers currently waiting to take the lock
1623
    /// then other readers may not be able to acquire the lock even if it was
1624
    /// downgraded.
1625
0
    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
1626
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1627
0
        unsafe {
1628
0
            s.rwlock.raw.downgrade_to_upgradable();
1629
0
        }
1630
0
        let rwlock = s.rwlock;
1631
0
        mem::forget(s);
1632
0
        RwLockUpgradableReadGuard {
1633
0
            rwlock,
1634
0
            marker: PhantomData,
1635
0
        }
1636
0
    }
1637
}
1638
1639
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1640
    /// Unlocks the `RwLock` using a fair unlock protocol.
1641
    ///
1642
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1643
    /// the `RwLock` before another has the chance to acquire the lock, even if
1644
    /// that thread has been blocked on the `RwLock` for a long time. This is
1645
    /// the default because it allows much higher throughput as it avoids
1646
    /// forcing a context switch on every `RwLock` unlock. This can result in one
1647
    /// thread acquiring a `RwLock` many more times than other threads.
1648
    ///
1649
    /// However in some cases it can be beneficial to ensure fairness by forcing
1650
    /// the lock to pass on to a waiting thread if there is one. This is done by
1651
    /// using this method instead of dropping the `RwLockWriteGuard` normally.
1652
    #[inline]
1653
0
    pub fn unlock_fair(s: Self) {
1654
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1655
0
        unsafe {
1656
0
            s.rwlock.raw.unlock_exclusive_fair();
1657
0
        }
1658
0
        mem::forget(s);
1659
0
    }
1660
1661
    /// Temporarily unlocks the `RwLock` to execute the given function.
1662
    ///
1663
    /// The `RwLock` is unlocked a fair unlock protocol.
1664
    ///
1665
    /// This is safe because `&mut` guarantees that there exist no other
1666
    /// references to the data protected by the `RwLock`.
1667
    #[inline]
1668
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1669
0
    where
1670
0
        F: FnOnce() -> U,
1671
    {
1672
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1673
0
        unsafe {
1674
0
            s.rwlock.raw.unlock_exclusive_fair();
1675
0
        }
1676
0
        defer!(s.rwlock.raw.lock_exclusive());
1677
0
        f()
1678
0
    }
1679
1680
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1681
    ///
1682
    /// This method is functionally equivalent to calling `unlock_fair` followed
1683
    /// by `write`, however it can be much more efficient in the case where there
1684
    /// are no waiting threads.
1685
    #[inline]
1686
0
    pub fn bump(s: &mut Self) {
1687
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1688
0
        unsafe {
1689
0
            s.rwlock.raw.bump_exclusive();
1690
0
        }
1691
0
    }
1692
}
1693
1694
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
1695
    type Target = T;
1696
    #[inline]
1697
0
    fn deref(&self) -> &T {
1698
0
        unsafe { &*self.rwlock.data.get() }
1699
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _> as core::ops::deref::Deref>::deref
1700
}
1701
1702
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
1703
    #[inline]
1704
0
    fn deref_mut(&mut self) -> &mut T {
1705
0
        unsafe { &mut *self.rwlock.data.get() }
1706
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, bool> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _> as core::ops::deref::DerefMut>::deref_mut
1707
}
1708
1709
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1710
    #[inline]
1711
0
    fn drop(&mut self) {
1712
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1713
0
        unsafe {
1714
0
            self.rwlock.raw.unlock_exclusive();
1715
0
        }
1716
0
    }
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<ztunnel::drain::internal::DrainMode>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::dns::metrics::DnsLabels, prometheus_client::metrics::histogram::Histogram>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::ConnectionTermination, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::xds::metrics::TypeUrl, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::OnDemandDnsLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::proxy::metrics::CommonTrafficLabels, prometheus_client::metrics::counter::Counter>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<ztunnel::metrics::meta::IstioBuildLabel, prometheus_client::metrics::gauge::Gauge>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, std::collections::hash::map::HashMap<u64, alloc::sync::Arc<pingora_pool::connection::PoolNode<pingora_pool::connection::PoolConnection<ztunnel::proxy::h2::client::H2ConnectClient>>>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ztunnel::identity::manager::CertState> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, bool> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, alloc::collections::btree::map::BTreeMap<pingora_timeout::timer::Time, pingora_timeout::timer::Timer>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, core::option::Option<hyper_util::client::legacy::connect::Connected>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, moka::common::frequency_sketch::FrequencySketch> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Inner> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, tokio::runtime::time::ShardedWheel> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<parking_lot::raw_rwlock::RawRwLock, ()> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::RwLockWriteGuard<_, _> as core::ops::drop::Drop>::drop
1717
}
1718
1719
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
1720
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1721
0
        fmt::Debug::fmt(&**self, f)
1722
0
    }
1723
}
1724
1725
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1726
    for RwLockWriteGuard<'a, R, T>
1727
{
1728
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1729
0
        (**self).fmt(f)
1730
0
    }
1731
}
1732
1733
#[cfg(feature = "owning_ref")]
1734
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1735
1736
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
1737
/// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock`
1738
/// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime.
1739
#[cfg(feature = "arc_lock")]
1740
#[clippy::has_significant_drop]
1741
#[must_use = "if unused the RwLock will immediately unlock"]
1742
pub struct ArcRwLockWriteGuard<R: RawRwLock, T: ?Sized> {
1743
    rwlock: Arc<RwLock<R, T>>,
1744
    marker: PhantomData<R::GuardMarker>,
1745
}
1746
1747
#[cfg(feature = "arc_lock")]
1748
impl<R: RawRwLock, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1749
    /// Returns a reference to the rwlock, contained in its `Arc`.
1750
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
1751
        &s.rwlock
1752
    }
1753
1754
    /// Temporarily unlocks the `RwLock` to execute the given function.
1755
    ///
1756
    /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`].
1757
    #[inline]
1758
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1759
    where
1760
        F: FnOnce() -> U,
1761
    {
1762
        // Safety: An RwLockWriteGuard always holds a shared lock.
1763
        unsafe {
1764
            s.rwlock.raw.unlock_exclusive();
1765
        }
1766
        defer!(s.rwlock.raw.lock_exclusive());
1767
        f()
1768
    }
1769
}
1770
1771
#[cfg(feature = "arc_lock")]
1772
impl<R: RawRwLockDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1773
    /// Atomically downgrades a write lock into a read lock without allowing any
1774
    /// writers to take exclusive access of the lock in the meantime.
1775
    ///
1776
    /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`].
1777
    pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> {
1778
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1779
        unsafe {
1780
            s.rwlock.raw.downgrade();
1781
        }
1782
1783
        // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read
1784
        let s = ManuallyDrop::new(s);
1785
        let rwlock = unsafe { ptr::read(&s.rwlock) };
1786
1787
        ArcRwLockReadGuard {
1788
            rwlock,
1789
            marker: PhantomData,
1790
        }
1791
    }
1792
}
1793
1794
#[cfg(feature = "arc_lock")]
1795
impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1796
    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1797
    /// writers to take exclusive access of the lock in the meantime.
1798
    ///
1799
    /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`].
1800
    pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T> {
1801
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1802
        unsafe {
1803
            s.rwlock.raw.downgrade_to_upgradable();
1804
        }
1805
1806
        // SAFETY: same as above
1807
        let s = ManuallyDrop::new(s);
1808
        let rwlock = unsafe { ptr::read(&s.rwlock) };
1809
1810
        ArcRwLockUpgradableReadGuard {
1811
            rwlock,
1812
            marker: PhantomData,
1813
        }
1814
    }
1815
}
1816
1817
#[cfg(feature = "arc_lock")]
1818
impl<R: RawRwLockFair, T: ?Sized> ArcRwLockWriteGuard<R, T> {
1819
    /// Unlocks the `RwLock` using a fair unlock protocol.
1820
    ///
1821
    /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`].
1822
    #[inline]
1823
    pub fn unlock_fair(s: Self) {
1824
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1825
        unsafe {
1826
            s.rwlock.raw.unlock_exclusive_fair();
1827
        }
1828
1829
        // SAFETY: prevent the Arc from leaking memory
1830
        let mut s = ManuallyDrop::new(s);
1831
        unsafe { ptr::drop_in_place(&mut s.rwlock) };
1832
    }
1833
1834
    /// Temporarily unlocks the `RwLock` to execute the given function.
1835
    ///
1836
    /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`].
1837
    #[inline]
1838
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1839
    where
1840
        F: FnOnce() -> U,
1841
    {
1842
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1843
        unsafe {
1844
            s.rwlock.raw.unlock_exclusive_fair();
1845
        }
1846
        defer!(s.rwlock.raw.lock_exclusive());
1847
        f()
1848
    }
1849
1850
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1851
    ///
1852
    /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`].
1853
    #[inline]
1854
    pub fn bump(s: &mut Self) {
1855
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1856
        unsafe {
1857
            s.rwlock.raw.bump_exclusive();
1858
        }
1859
    }
1860
}
1861
1862
#[cfg(feature = "arc_lock")]
1863
impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockWriteGuard<R, T> {
1864
    type Target = T;
1865
    #[inline]
1866
    fn deref(&self) -> &T {
1867
        unsafe { &*self.rwlock.data.get() }
1868
    }
1869
}
1870
1871
#[cfg(feature = "arc_lock")]
1872
impl<R: RawRwLock, T: ?Sized> DerefMut for ArcRwLockWriteGuard<R, T> {
1873
    #[inline]
1874
    fn deref_mut(&mut self) -> &mut T {
1875
        unsafe { &mut *self.rwlock.data.get() }
1876
    }
1877
}
1878
1879
#[cfg(feature = "arc_lock")]
1880
impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockWriteGuard<R, T> {
1881
    #[inline]
1882
    fn drop(&mut self) {
1883
        // Safety: An RwLockWriteGuard always holds an exclusive lock.
1884
        unsafe {
1885
            self.rwlock.raw.unlock_exclusive();
1886
        }
1887
    }
1888
}
1889
1890
#[cfg(feature = "arc_lock")]
1891
impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockWriteGuard<R, T> {
1892
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1893
        fmt::Debug::fmt(&**self, f)
1894
    }
1895
}
1896
1897
#[cfg(feature = "arc_lock")]
1898
impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockWriteGuard<R, T> {
1899
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1900
        (**self).fmt(f)
1901
    }
1902
}
1903
1904
/// RAII structure used to release the upgradable read access of a lock when
1905
/// dropped.
1906
#[clippy::has_significant_drop]
1907
#[must_use = "if unused the RwLock will immediately unlock"]
1908
pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
1909
    rwlock: &'a RwLock<R, T>,
1910
    marker: PhantomData<(&'a T, R::GuardMarker)>,
1911
}
1912
1913
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
1914
    for RwLockUpgradableReadGuard<'a, R, T>
1915
{
1916
}
1917
1918
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1919
    /// Returns a reference to the original reader-writer lock object.
1920
0
    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1921
0
        s.rwlock
1922
0
    }
1923
1924
    /// Temporarily unlocks the `RwLock` to execute the given function.
1925
    ///
1926
    /// This is safe because `&mut` guarantees that there exist no other
1927
    /// references to the data protected by the `RwLock`.
1928
    #[inline]
1929
0
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1930
0
    where
1931
0
        F: FnOnce() -> U,
1932
    {
1933
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1934
0
        unsafe {
1935
0
            s.rwlock.raw.unlock_upgradable();
1936
0
        }
1937
0
        defer!(s.rwlock.raw.lock_upgradable());
1938
0
        f()
1939
0
    }
1940
1941
    /// Atomically upgrades an upgradable read lock lock into an exclusive write lock,
1942
    /// blocking the current thread until it can be acquired.
1943
0
    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
1944
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1945
0
        unsafe {
1946
0
            s.rwlock.raw.upgrade();
1947
0
        }
1948
0
        let rwlock = s.rwlock;
1949
0
        mem::forget(s);
1950
0
        RwLockWriteGuard {
1951
0
            rwlock,
1952
0
            marker: PhantomData,
1953
0
        }
1954
0
    }
1955
1956
    /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock.
1957
    ///
1958
    /// If the access could not be granted at this time, then the current guard is returned.
1959
0
    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1960
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1961
0
        if unsafe { s.rwlock.raw.try_upgrade() } {
1962
0
            let rwlock = s.rwlock;
1963
0
            mem::forget(s);
1964
0
            Ok(RwLockWriteGuard {
1965
0
                rwlock,
1966
0
                marker: PhantomData,
1967
0
            })
1968
        } else {
1969
0
            Err(s)
1970
        }
1971
0
    }
1972
}
1973
1974
impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1975
    /// Unlocks the `RwLock` using a fair unlock protocol.
1976
    ///
1977
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1978
    /// the `RwLock` before another has the chance to acquire the lock, even if
1979
    /// that thread has been blocked on the `RwLock` for a long time. This is
1980
    /// the default because it allows much higher throughput as it avoids
1981
    /// forcing a context switch on every `RwLock` unlock. This can result in one
1982
    /// thread acquiring a `RwLock` many more times than other threads.
1983
    ///
1984
    /// However in some cases it can be beneficial to ensure fairness by forcing
1985
    /// the lock to pass on to a waiting thread if there is one. This is done by
1986
    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
1987
    #[inline]
1988
0
    pub fn unlock_fair(s: Self) {
1989
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1990
0
        unsafe {
1991
0
            s.rwlock.raw.unlock_upgradable_fair();
1992
0
        }
1993
0
        mem::forget(s);
1994
0
    }
1995
1996
    /// Temporarily unlocks the `RwLock` to execute the given function.
1997
    ///
1998
    /// The `RwLock` is unlocked a fair unlock protocol.
1999
    ///
2000
    /// This is safe because `&mut` guarantees that there exist no other
2001
    /// references to the data protected by the `RwLock`.
2002
    #[inline]
2003
0
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
2004
0
    where
2005
0
        F: FnOnce() -> U,
2006
    {
2007
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2008
0
        unsafe {
2009
0
            s.rwlock.raw.unlock_upgradable_fair();
2010
0
        }
2011
0
        defer!(s.rwlock.raw.lock_upgradable());
2012
0
        f()
2013
0
    }
2014
2015
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
2016
    ///
2017
    /// This method is functionally equivalent to calling `unlock_fair` followed
2018
    /// by `upgradable_read`, however it can be much more efficient in the case where there
2019
    /// are no waiting threads.
2020
    #[inline]
2021
0
    pub fn bump(s: &mut Self) {
2022
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2023
0
        unsafe {
2024
0
            s.rwlock.raw.bump_upgradable();
2025
0
        }
2026
0
    }
2027
}
2028
2029
impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2030
    /// Atomically downgrades an upgradable read lock lock into a shared read lock
2031
    /// without allowing any writers to take exclusive access of the lock in the
2032
    /// meantime.
2033
    ///
2034
    /// Note that if there are any writers currently waiting to take the lock
2035
    /// then other readers may not be able to acquire the lock even if it was
2036
    /// downgraded.
2037
0
    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
2038
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2039
0
        unsafe {
2040
0
            s.rwlock.raw.downgrade_upgradable();
2041
0
        }
2042
0
        let rwlock = s.rwlock;
2043
0
        mem::forget(s);
2044
0
        RwLockReadGuard {
2045
0
            rwlock,
2046
0
            marker: PhantomData,
2047
0
        }
2048
0
    }
2049
2050
    /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock,
2051
    /// blocking the current thread until it can be acquired.
2052
    ///
2053
    /// Then, calls the provided closure with an exclusive reference to the lock's data.
2054
    ///
2055
    /// Finally, atomically downgrades the lock back to an upgradable read lock.
2056
    /// The closure's return value is wrapped in `Some` and returned.
2057
    ///
2058
    /// This function only requires a mutable reference to the guard, unlike
2059
    /// `upgrade` which takes the guard by value.
2060
0
    pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret {
2061
0
        unsafe {
2062
0
            self.rwlock.raw.upgrade();
2063
0
        }
2064
2065
        // Safety: We just upgraded the lock, so we have mutable access to the data.
2066
        // This will restore the state the lock was in at the start of the function.
2067
0
        defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2068
2069
        // Safety: We upgraded the lock, so we have mutable access to the data.
2070
        // When this function returns, whether by drop or panic,
2071
        // the drop guard will downgrade it back to an upgradeable lock.
2072
0
        f(unsafe { &mut *self.rwlock.data.get() })
2073
0
    }
2074
2075
    /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2076
    ///
2077
    /// If the access could not be granted at this time, then `None` is returned.
2078
    ///
2079
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2080
    /// and finally downgrades the lock back to an upgradable read lock.
2081
    /// The closure's return value is wrapped in `Some` and returned.
2082
    ///
2083
    /// This function only requires a mutable reference to the guard, unlike
2084
    /// `try_upgrade` which takes the guard by value.
2085
0
    pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> {
2086
0
        if unsafe { self.rwlock.raw.try_upgrade() } {
2087
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2088
            // This will restore the state the lock was in at the start of the function.
2089
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2090
2091
            // Safety: We upgraded the lock, so we have mutable access to the data.
2092
            // When this function returns, whether by drop or panic,
2093
            // the drop guard will downgrade it back to an upgradeable lock.
2094
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2095
        } else {
2096
0
            None
2097
        }
2098
0
    }
2099
}
2100
2101
impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
2102
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2103
    /// write lock, until a timeout is reached.
2104
    ///
2105
    /// If the access could not be granted before the timeout expires, then
2106
    /// the current guard is returned.
2107
0
    pub fn try_upgrade_for(
2108
0
        s: Self,
2109
0
        timeout: R::Duration,
2110
0
    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
2111
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2112
0
        if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
2113
0
            let rwlock = s.rwlock;
2114
0
            mem::forget(s);
2115
0
            Ok(RwLockWriteGuard {
2116
0
                rwlock,
2117
0
                marker: PhantomData,
2118
0
            })
2119
        } else {
2120
0
            Err(s)
2121
        }
2122
0
    }
2123
2124
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2125
    /// write lock, until a timeout is reached.
2126
    ///
2127
    /// If the access could not be granted before the timeout expires, then
2128
    /// the current guard is returned.
2129
    #[inline]
2130
0
    pub fn try_upgrade_until(
2131
0
        s: Self,
2132
0
        timeout: R::Instant,
2133
0
    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
2134
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2135
0
        if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
2136
0
            let rwlock = s.rwlock;
2137
0
            mem::forget(s);
2138
0
            Ok(RwLockWriteGuard {
2139
0
                rwlock,
2140
0
                marker: PhantomData,
2141
0
            })
2142
        } else {
2143
0
            Err(s)
2144
        }
2145
0
    }
2146
}
2147
2148
impl<'a, R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a>
2149
    RwLockUpgradableReadGuard<'a, R, T>
2150
{
2151
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2152
    /// write lock, until a timeout is reached.
2153
    ///
2154
    /// If the access could not be granted before the timeout expires, then
2155
    /// `None` is returned.
2156
    ///
2157
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2158
    /// and finally downgrades the lock back to an upgradable read lock.
2159
    /// The closure's return value is wrapped in `Some` and returned.
2160
    ///
2161
    /// This function only requires a mutable reference to the guard, unlike
2162
    /// `try_upgrade_for` which takes the guard by value.
2163
0
    pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>(
2164
0
        &mut self,
2165
0
        timeout: R::Duration,
2166
0
        f: F,
2167
0
    ) -> Option<Ret> {
2168
0
        if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } {
2169
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2170
            // This will restore the state the lock was in at the start of the function.
2171
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2172
2173
            // Safety: We upgraded the lock, so we have mutable access to the data.
2174
            // When this function returns, whether by drop or panic,
2175
            // the drop guard will downgrade it back to an upgradeable lock.
2176
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2177
        } else {
2178
0
            None
2179
        }
2180
0
    }
2181
2182
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2183
    /// write lock, until a timeout is reached.
2184
    ///
2185
    /// If the access could not be granted before the timeout expires, then
2186
    /// `None` is returned.
2187
    ///
2188
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2189
    /// and finally downgrades the lock back to an upgradable read lock.
2190
    /// The closure's return value is wrapped in `Some` and returned.
2191
    ///
2192
    /// This function only requires a mutable reference to the guard, unlike
2193
    /// `try_upgrade_until` which takes the guard by value.
2194
0
    pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>(
2195
0
        &mut self,
2196
0
        timeout: R::Instant,
2197
0
        f: F,
2198
0
    ) -> Option<Ret> {
2199
0
        if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } {
2200
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2201
            // This will restore the state the lock was in at the start of the function.
2202
0
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2203
2204
            // Safety: We upgraded the lock, so we have mutable access to the data.
2205
            // When this function returns, whether by drop or panic,
2206
            // the drop guard will downgrade it back to an upgradeable lock.
2207
0
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2208
        } else {
2209
0
            None
2210
        }
2211
0
    }
2212
}
2213
2214
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
2215
    type Target = T;
2216
    #[inline]
2217
0
    fn deref(&self) -> &T {
2218
0
        unsafe { &*self.rwlock.data.get() }
2219
0
    }
2220
}
2221
2222
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
2223
    #[inline]
2224
0
    fn drop(&mut self) {
2225
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2226
0
        unsafe {
2227
0
            self.rwlock.raw.unlock_upgradable();
2228
0
        }
2229
0
    }
2230
}
2231
2232
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
2233
    for RwLockUpgradableReadGuard<'a, R, T>
2234
{
2235
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2236
0
        fmt::Debug::fmt(&**self, f)
2237
0
    }
2238
}
2239
2240
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
2241
    for RwLockUpgradableReadGuard<'a, R, T>
2242
{
2243
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2244
0
        (**self).fmt(f)
2245
0
    }
2246
}
2247
2248
#[cfg(feature = "owning_ref")]
2249
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
2250
    for RwLockUpgradableReadGuard<'a, R, T>
2251
{
2252
}
2253
2254
/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`.
2255
/// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the
2256
/// `RwLock` it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static`
2257
/// lifetime.
2258
#[cfg(feature = "arc_lock")]
2259
#[clippy::has_significant_drop]
2260
#[must_use = "if unused the RwLock will immediately unlock"]
2261
pub struct ArcRwLockUpgradableReadGuard<R: RawRwLockUpgrade, T: ?Sized> {
2262
    rwlock: Arc<RwLock<R, T>>,
2263
    marker: PhantomData<R::GuardMarker>,
2264
}
2265
2266
#[cfg(feature = "arc_lock")]
2267
impl<R: RawRwLockUpgrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2268
    /// Returns a reference to the rwlock, contained in its original `Arc`.
2269
    pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> {
2270
        &s.rwlock
2271
    }
2272
2273
    /// Temporarily unlocks the `RwLock` to execute the given function.
2274
    ///
2275
    /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`].
2276
    #[inline]
2277
    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
2278
    where
2279
        F: FnOnce() -> U,
2280
    {
2281
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2282
        unsafe {
2283
            s.rwlock.raw.unlock_upgradable();
2284
        }
2285
        defer!(s.rwlock.raw.lock_upgradable());
2286
        f()
2287
    }
2288
2289
    /// Atomically upgrades an upgradable read lock lock into an exclusive write lock,
2290
    /// blocking the current thread until it can be acquired.
2291
    pub fn upgrade(s: Self) -> ArcRwLockWriteGuard<R, T> {
2292
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2293
        unsafe {
2294
            s.rwlock.raw.upgrade();
2295
        }
2296
2297
        // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out
2298
        //         of the struct
2299
        let s = ManuallyDrop::new(s);
2300
        let rwlock = unsafe { ptr::read(&s.rwlock) };
2301
2302
        ArcRwLockWriteGuard {
2303
            rwlock,
2304
            marker: PhantomData,
2305
        }
2306
    }
2307
2308
    /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2309
    ///
2310
    /// If the access could not be granted at this time, then the current guard is returned.
2311
    pub fn try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2312
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2313
        if unsafe { s.rwlock.raw.try_upgrade() } {
2314
            // SAFETY: same as above
2315
            let s = ManuallyDrop::new(s);
2316
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2317
2318
            Ok(ArcRwLockWriteGuard {
2319
                rwlock,
2320
                marker: PhantomData,
2321
            })
2322
        } else {
2323
            Err(s)
2324
        }
2325
    }
2326
}
2327
2328
#[cfg(feature = "arc_lock")]
2329
impl<R: RawRwLockUpgradeFair, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2330
    /// Unlocks the `RwLock` using a fair unlock protocol.
2331
    ///
2332
    /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`].
2333
    #[inline]
2334
    pub fn unlock_fair(s: Self) {
2335
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2336
        unsafe {
2337
            s.rwlock.raw.unlock_upgradable_fair();
2338
        }
2339
2340
        // SAFETY: make sure we decrement the refcount properly
2341
        let mut s = ManuallyDrop::new(s);
2342
        unsafe { ptr::drop_in_place(&mut s.rwlock) };
2343
    }
2344
2345
    /// Temporarily unlocks the `RwLock` to execute the given function.
2346
    ///
2347
    /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`].
2348
    #[inline]
2349
    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
2350
    where
2351
        F: FnOnce() -> U,
2352
    {
2353
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2354
        unsafe {
2355
            s.rwlock.raw.unlock_upgradable_fair();
2356
        }
2357
        defer!(s.rwlock.raw.lock_upgradable());
2358
        f()
2359
    }
2360
2361
    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
2362
    ///
2363
    /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`].
2364
    #[inline]
2365
    pub fn bump(s: &mut Self) {
2366
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2367
        unsafe {
2368
            s.rwlock.raw.bump_upgradable();
2369
        }
2370
    }
2371
}
2372
2373
#[cfg(feature = "arc_lock")]
2374
impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2375
    /// Atomically downgrades an upgradable read lock lock into a shared read lock
2376
    /// without allowing any writers to take exclusive access of the lock in the
2377
    /// meantime.
2378
    ///
2379
    /// Note that if there are any writers currently waiting to take the lock
2380
    /// then other readers may not be able to acquire the lock even if it was
2381
    /// downgraded.
2382
    pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> {
2383
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2384
        unsafe {
2385
            s.rwlock.raw.downgrade_upgradable();
2386
        }
2387
2388
        // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed
2389
        let s = ManuallyDrop::new(s);
2390
        let rwlock = unsafe { ptr::read(&s.rwlock) };
2391
2392
        ArcRwLockReadGuard {
2393
            rwlock,
2394
            marker: PhantomData,
2395
        }
2396
    }
2397
2398
    /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock,
2399
    /// blocking the current thread until it can be acquired.
2400
    ///
2401
    /// Then, calls the provided closure with an exclusive reference to the lock's data.
2402
    ///
2403
    /// Finally, atomically downgrades the lock back to an upgradable read lock.
2404
    /// The closure's return value is returned.
2405
    ///
2406
    /// This function only requires a mutable reference to the guard, unlike
2407
    /// `upgrade` which takes the guard by value.
2408
    pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret {
2409
        unsafe {
2410
            self.rwlock.raw.upgrade();
2411
        }
2412
2413
        // Safety: We just upgraded the lock, so we have mutable access to the data.
2414
        // This will restore the state the lock was in at the start of the function.
2415
        defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2416
2417
        // Safety: We upgraded the lock, so we have mutable access to the data.
2418
        // When this function returns, whether by drop or panic,
2419
        // the drop guard will downgrade it back to an upgradeable lock.
2420
        f(unsafe { &mut *self.rwlock.data.get() })
2421
    }
2422
2423
    /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock.
2424
    ///
2425
    /// If the access could not be granted at this time, then `None` is returned.
2426
    ///
2427
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2428
    /// and finally downgrades the lock back to an upgradable read lock.
2429
    /// The closure's return value is wrapped in `Some` and returned.
2430
    ///
2431
    /// This function only requires a mutable reference to the guard, unlike
2432
    /// `try_upgrade` which takes the guard by value.
2433
    pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> {
2434
        if unsafe { self.rwlock.raw.try_upgrade() } {
2435
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2436
            // This will restore the state the lock was in at the start of the function.
2437
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2438
2439
            // Safety: We upgraded the lock, so we have mutable access to the data.
2440
            // When this function returns, whether by drop or panic,
2441
            // the drop guard will downgrade it back to an upgradeable lock.
2442
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2443
        } else {
2444
            None
2445
        }
2446
    }
2447
}
2448
2449
#[cfg(feature = "arc_lock")]
2450
impl<R: RawRwLockUpgradeTimed, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> {
2451
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2452
    /// write lock, until a timeout is reached.
2453
    ///
2454
    /// If the access could not be granted before the timeout expires, then
2455
    /// the current guard is returned.
2456
    pub fn try_upgrade_for(
2457
        s: Self,
2458
        timeout: R::Duration,
2459
    ) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2460
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2461
        if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
2462
            // SAFETY: same as above
2463
            let s = ManuallyDrop::new(s);
2464
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2465
2466
            Ok(ArcRwLockWriteGuard {
2467
                rwlock,
2468
                marker: PhantomData,
2469
            })
2470
        } else {
2471
            Err(s)
2472
        }
2473
    }
2474
2475
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2476
    /// write lock, until a timeout is reached.
2477
    ///
2478
    /// If the access could not be granted before the timeout expires, then
2479
    /// the current guard is returned.
2480
    #[inline]
2481
    pub fn try_upgrade_until(
2482
        s: Self,
2483
        timeout: R::Instant,
2484
    ) -> Result<ArcRwLockWriteGuard<R, T>, Self> {
2485
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2486
        if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
2487
            // SAFETY: same as above
2488
            let s = ManuallyDrop::new(s);
2489
            let rwlock = unsafe { ptr::read(&s.rwlock) };
2490
2491
            Ok(ArcRwLockWriteGuard {
2492
                rwlock,
2493
                marker: PhantomData,
2494
            })
2495
        } else {
2496
            Err(s)
2497
        }
2498
    }
2499
}
2500
2501
#[cfg(feature = "arc_lock")]
2502
impl<R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade, T: ?Sized>
2503
    ArcRwLockUpgradableReadGuard<R, T>
2504
{
2505
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2506
    /// write lock, until a timeout is reached.
2507
    ///
2508
    /// If the access could not be granted before the timeout expires, then
2509
    /// `None` is returned.
2510
    ///
2511
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2512
    /// and finally downgrades the lock back to an upgradable read lock.
2513
    /// The closure's return value is wrapped in `Some` and returned.
2514
    ///
2515
    /// This function only requires a mutable reference to the guard, unlike
2516
    /// `try_upgrade_for` which takes the guard by value.
2517
    pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>(
2518
        &mut self,
2519
        timeout: R::Duration,
2520
        f: F,
2521
    ) -> Option<Ret> {
2522
        if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } {
2523
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2524
            // This will restore the state the lock was in at the start of the function.
2525
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2526
2527
            // Safety: We upgraded the lock, so we have mutable access to the data.
2528
            // When this function returns, whether by drop or panic,
2529
            // the drop guard will downgrade it back to an upgradeable lock.
2530
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2531
        } else {
2532
            None
2533
        }
2534
    }
2535
2536
    /// Tries to atomically upgrade an upgradable read lock into an exclusive
2537
    /// write lock, until a timeout is reached.
2538
    ///
2539
    /// If the access could not be granted before the timeout expires, then
2540
    /// `None` is returned.
2541
    ///
2542
    /// Otherwise, calls the provided closure with an exclusive reference to the lock's data,
2543
    /// and finally downgrades the lock back to an upgradable read lock.
2544
    /// The closure's return value is wrapped in `Some` and returned.
2545
    ///
2546
    /// This function only requires a mutable reference to the guard, unlike
2547
    /// `try_upgrade_until` which takes the guard by value.
2548
    pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>(
2549
        &mut self,
2550
        timeout: R::Instant,
2551
        f: F,
2552
    ) -> Option<Ret> {
2553
        if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } {
2554
            // Safety: We just upgraded the lock, so we have mutable access to the data.
2555
            // This will restore the state the lock was in at the start of the function.
2556
            defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() });
2557
2558
            // Safety: We upgraded the lock, so we have mutable access to the data.
2559
            // When this function returns, whether by drop or panic,
2560
            // the drop guard will downgrade it back to an upgradeable lock.
2561
            Some(f(unsafe { &mut *self.rwlock.data.get() }))
2562
        } else {
2563
            None
2564
        }
2565
    }
2566
}
2567
2568
#[cfg(feature = "arc_lock")]
2569
impl<R: RawRwLockUpgrade, T: ?Sized> Deref for ArcRwLockUpgradableReadGuard<R, T> {
2570
    type Target = T;
2571
    #[inline]
2572
    fn deref(&self) -> &T {
2573
        unsafe { &*self.rwlock.data.get() }
2574
    }
2575
}
2576
2577
#[cfg(feature = "arc_lock")]
2578
impl<R: RawRwLockUpgrade, T: ?Sized> Drop for ArcRwLockUpgradableReadGuard<R, T> {
2579
    #[inline]
2580
    fn drop(&mut self) {
2581
        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
2582
        unsafe {
2583
            self.rwlock.raw.unlock_upgradable();
2584
        }
2585
    }
2586
}
2587
2588
#[cfg(feature = "arc_lock")]
2589
impl<R: RawRwLockUpgrade, T: fmt::Debug + ?Sized> fmt::Debug
2590
    for ArcRwLockUpgradableReadGuard<R, T>
2591
{
2592
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2593
        fmt::Debug::fmt(&**self, f)
2594
    }
2595
}
2596
2597
#[cfg(feature = "arc_lock")]
2598
impl<R: RawRwLockUpgrade, T: fmt::Display + ?Sized> fmt::Display
2599
    for ArcRwLockUpgradableReadGuard<R, T>
2600
{
2601
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2602
        (**self).fmt(f)
2603
    }
2604
}
2605
2606
/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
2607
/// subfield of the protected data.
2608
///
2609
/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
2610
/// former doesn't support temporarily unlocking and re-locking, since that
2611
/// could introduce soundness issues if the locked object is modified by another
2612
/// thread.
2613
#[clippy::has_significant_drop]
2614
#[must_use = "if unused the RwLock will immediately unlock"]
2615
pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
2616
    raw: &'a R,
2617
    data: *const T,
2618
    marker: PhantomData<&'a T>,
2619
}
2620
2621
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
2622
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
2623
    R::GuardMarker: Send
2624
{
2625
}
2626
2627
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
2628
    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
2629
    ///
2630
    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
2631
    /// in already locked the data.
2632
    ///
2633
    /// This is an associated function that needs to be
2634
    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
2635
    /// the same name on the contents of the locked data.
2636
    #[inline]
2637
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
2638
0
    where
2639
0
        F: FnOnce(&T) -> &U,
2640
    {
2641
0
        let raw = s.raw;
2642
0
        let data = f(unsafe { &*s.data });
2643
0
        mem::forget(s);
2644
0
        MappedRwLockReadGuard {
2645
0
            raw,
2646
0
            data,
2647
0
            marker: PhantomData,
2648
0
        }
2649
0
    }
2650
2651
    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
2652
    /// locked data. The original guard is return if the closure returns `None`.
2653
    ///
2654
    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
2655
    /// in already locked the data.
2656
    ///
2657
    /// This is an associated function that needs to be
2658
    /// used as `MappedRwLockReadGuard::try_map(...)`. A method would interfere with methods of
2659
    /// the same name on the contents of the locked data.
2660
    #[inline]
2661
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
2662
0
    where
2663
0
        F: FnOnce(&T) -> Option<&U>,
2664
    {
2665
0
        let raw = s.raw;
2666
0
        let data = match f(unsafe { &*s.data }) {
2667
0
            Some(data) => data,
2668
0
            None => return Err(s),
2669
        };
2670
0
        mem::forget(s);
2671
0
        Ok(MappedRwLockReadGuard {
2672
0
            raw,
2673
0
            data,
2674
0
            marker: PhantomData,
2675
0
        })
2676
0
    }
2677
}
2678
2679
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
2680
    /// Unlocks the `RwLock` using a fair unlock protocol.
2681
    ///
2682
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
2683
    /// the `RwLock` before another has the chance to acquire the lock, even if
2684
    /// that thread has been blocked on the `RwLock` for a long time. This is
2685
    /// the default because it allows much higher throughput as it avoids
2686
    /// forcing a context switch on every `RwLock` unlock. This can result in one
2687
    /// thread acquiring a `RwLock` many more times than other threads.
2688
    ///
2689
    /// However in some cases it can be beneficial to ensure fairness by forcing
2690
    /// the lock to pass on to a waiting thread if there is one. This is done by
2691
    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
2692
    #[inline]
2693
0
    pub fn unlock_fair(s: Self) {
2694
        // Safety: A MappedRwLockReadGuard always holds a shared lock.
2695
0
        unsafe {
2696
0
            s.raw.unlock_shared_fair();
2697
0
        }
2698
0
        mem::forget(s);
2699
0
    }
2700
}
2701
2702
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
2703
    type Target = T;
2704
    #[inline]
2705
0
    fn deref(&self) -> &T {
2706
0
        unsafe { &*self.data }
2707
0
    }
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::gauge::Gauge> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::counter::Counter> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Histogram> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, alloc::vec::Vec<(f64, u64)>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<_, _> as core::ops::deref::Deref>::deref
2708
}
2709
2710
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
2711
    #[inline]
2712
0
    fn drop(&mut self) {
2713
        // Safety: A MappedRwLockReadGuard always holds a shared lock.
2714
0
        unsafe {
2715
0
            self.raw.unlock_shared();
2716
0
        }
2717
0
    }
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::gauge::Gauge> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::counter::Counter> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, prometheus_client::metrics::histogram::Histogram> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<parking_lot::raw_rwlock::RawRwLock, alloc::vec::Vec<(f64, u64)>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <lock_api::rwlock::MappedRwLockReadGuard<_, _> as core::ops::drop::Drop>::drop
2718
}
2719
2720
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
2721
    for MappedRwLockReadGuard<'a, R, T>
2722
{
2723
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2724
0
        fmt::Debug::fmt(&**self, f)
2725
0
    }
2726
}
2727
2728
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
2729
    for MappedRwLockReadGuard<'a, R, T>
2730
{
2731
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2732
0
        (**self).fmt(f)
2733
0
    }
2734
}
2735
2736
#[cfg(feature = "owning_ref")]
2737
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
2738
    for MappedRwLockReadGuard<'a, R, T>
2739
{
2740
}
2741
2742
/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
2743
/// subfield of the protected data.
2744
///
2745
/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
2746
/// former doesn't support temporarily unlocking and re-locking, since that
2747
/// could introduce soundness issues if the locked object is modified by another
2748
/// thread.
2749
#[clippy::has_significant_drop]
2750
#[must_use = "if unused the RwLock will immediately unlock"]
2751
pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
2752
    raw: &'a R,
2753
    data: *mut T,
2754
    marker: PhantomData<&'a mut T>,
2755
}
2756
2757
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
2758
    for MappedRwLockWriteGuard<'a, R, T>
2759
{
2760
}
2761
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
2762
    R::GuardMarker: Send
2763
{
2764
}
2765
2766
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
2767
    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
2768
    ///
2769
    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
2770
    /// in already locked the data.
2771
    ///
2772
    /// This is an associated function that needs to be
2773
    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
2774
    /// the same name on the contents of the locked data.
2775
    #[inline]
2776
0
    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
2777
0
    where
2778
0
        F: FnOnce(&mut T) -> &mut U,
2779
    {
2780
0
        let raw = s.raw;
2781
0
        let data = f(unsafe { &mut *s.data });
2782
0
        mem::forget(s);
2783
0
        MappedRwLockWriteGuard {
2784
0
            raw,
2785
0
            data,
2786
0
            marker: PhantomData,
2787
0
        }
2788
0
    }
2789
2790
    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
2791
    /// locked data. The original guard is return if the closure returns `None`.
2792
    ///
2793
    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
2794
    /// in already locked the data.
2795
    ///
2796
    /// This is an associated function that needs to be
2797
    /// used as `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with methods of
2798
    /// the same name on the contents of the locked data.
2799
    #[inline]
2800
0
    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
2801
0
    where
2802
0
        F: FnOnce(&mut T) -> Option<&mut U>,
2803
    {
2804
0
        let raw = s.raw;
2805
0
        let data = match f(unsafe { &mut *s.data }) {
2806
0
            Some(data) => data,
2807
0
            None => return Err(s),
2808
        };
2809
0
        mem::forget(s);
2810
0
        Ok(MappedRwLockWriteGuard {
2811
0
            raw,
2812
0
            data,
2813
0
            marker: PhantomData,
2814
0
        })
2815
0
    }
2816
}
2817
2818
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
2819
    /// Unlocks the `RwLock` using a fair unlock protocol.
2820
    ///
2821
    /// By default, `RwLock` is unfair and allow the current thread to re-lock
2822
    /// the `RwLock` before another has the chance to acquire the lock, even if
2823
    /// that thread has been blocked on the `RwLock` for a long time. This is
2824
    /// the default because it allows much higher throughput as it avoids
2825
    /// forcing a context switch on every `RwLock` unlock. This can result in one
2826
    /// thread acquiring a `RwLock` many more times than other threads.
2827
    ///
2828
    /// However in some cases it can be beneficial to ensure fairness by forcing
2829
    /// the lock to pass on to a waiting thread if there is one. This is done by
2830
    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
2831
    #[inline]
2832
0
    pub fn unlock_fair(s: Self) {
2833
        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
2834
0
        unsafe {
2835
0
            s.raw.unlock_exclusive_fair();
2836
0
        }
2837
0
        mem::forget(s);
2838
0
    }
2839
}
2840
2841
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
2842
    type Target = T;
2843
    #[inline]
2844
0
    fn deref(&self) -> &T {
2845
0
        unsafe { &*self.data }
2846
0
    }
2847
}
2848
2849
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
2850
    #[inline]
2851
0
    fn deref_mut(&mut self) -> &mut T {
2852
0
        unsafe { &mut *self.data }
2853
0
    }
2854
}
2855
2856
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
2857
    #[inline]
2858
0
    fn drop(&mut self) {
2859
        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
2860
0
        unsafe {
2861
0
            self.raw.unlock_exclusive();
2862
0
        }
2863
0
    }
2864
}
2865
2866
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
2867
    for MappedRwLockWriteGuard<'a, R, T>
2868
{
2869
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2870
0
        fmt::Debug::fmt(&**self, f)
2871
0
    }
2872
}
2873
2874
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
2875
    for MappedRwLockWriteGuard<'a, R, T>
2876
{
2877
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2878
0
        (**self).fmt(f)
2879
0
    }
2880
}
2881
2882
#[cfg(feature = "owning_ref")]
2883
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
2884
    for MappedRwLockWriteGuard<'a, R, T>
2885
{
2886
}