Coverage Report

Created: 2025-10-10 06:23

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/spin-0.10.0/src/rwlock.rs
Line
Count
Source
1
//! A lock that provides data access to either one writer or many readers.
2
3
use crate::{
4
    atomic::{AtomicUsize, Ordering},
5
    RelaxStrategy, Spin,
6
};
7
use core::{
8
    cell::UnsafeCell,
9
    fmt,
10
    marker::PhantomData,
11
    mem,
12
    mem::ManuallyDrop,
13
    ops::{Deref, DerefMut},
14
};
15
16
/// A lock that provides data access to either one writer or many readers.
17
///
18
/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
19
/// spinning for synchronisation instead. Unlike its namespace, this lock does not
20
/// track lock poisoning.
21
///
22
/// This type of lock allows a number of readers or at most one writer at any
23
/// point in time. The write portion of this lock typically allows modification
24
/// of the underlying data (exclusive access) and the read portion of this lock
25
/// typically allows for read-only access (shared access).
26
///
27
/// The type parameter `T` represents the data that this lock protects. It is
28
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
29
/// allow concurrent access through readers. The RAII guards returned from the
30
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
31
/// to allow access to the contained of the lock.
32
///
33
/// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a
34
/// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade)
35
/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
36
/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
37
/// functions.
38
///
39
/// Based on Facebook's
40
/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
41
/// This implementation is unfair to writers - if the lock always has readers, then no writers will
42
/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
43
/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
44
/// when there are existing readers. However if the lock is that highly contended and writes are
45
/// crucial then this implementation may be a poor choice.
46
///
47
/// # Examples
48
///
49
/// ```
50
/// use spin;
51
///
52
/// let lock = spin::RwLock::new(5);
53
///
54
/// // many reader locks can be held at once
55
/// {
56
///     let r1 = lock.read();
57
///     let r2 = lock.read();
58
///     assert_eq!(*r1, 5);
59
///     assert_eq!(*r2, 5);
60
/// } // read locks are dropped at this point
61
///
62
/// // only one write lock may be held, however
63
/// {
64
///     let mut w = lock.write();
65
///     *w += 1;
66
///     assert_eq!(*w, 6);
67
/// } // write lock is dropped here
68
/// ```
69
pub struct RwLock<T: ?Sized, R = Spin> {
70
    phantom: PhantomData<R>,
71
    lock: AtomicUsize,
72
    data: UnsafeCell<T>,
73
}
74
75
const READER: usize = 1 << 2;
76
const UPGRADED: usize = 1 << 1;
77
const WRITER: usize = 1;
78
79
/// A guard that provides immutable data access.
80
///
81
/// When the guard falls out of scope it will decrement the read count,
82
/// potentially releasing the lock.
83
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
84
    lock: &'a AtomicUsize,
85
    data: *const T,
86
}
87
88
/// A guard that provides mutable data access.
89
///
90
/// When the guard falls out of scope it will release the lock.
91
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> {
92
    phantom: PhantomData<R>,
93
    inner: &'a RwLock<T, R>,
94
    data: *mut T,
95
}
96
97
/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
98
///
99
/// No writers or other upgradeable guards can exist while this is in scope. New reader
100
/// creation is prevented (to alleviate writer starvation) but there may be existing readers
101
/// when the lock is acquired.
102
///
103
/// When the guard falls out of scope it will release the lock.
104
pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> {
105
    phantom: PhantomData<R>,
106
    inner: &'a RwLock<T, R>,
107
    data: *const T,
108
}
109
110
// Same unsafe impls as `std::sync::RwLock`
111
unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {}
112
unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {}
113
114
unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {}
115
unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {}
116
117
unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {}
118
unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
119
120
unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {}
121
unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {}
122
123
impl<T, R> RwLock<T, R> {
124
    /// Creates a new spinlock wrapping the supplied data.
125
    ///
126
    /// May be used statically:
127
    ///
128
    /// ```
129
    /// use spin;
130
    ///
131
    /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
132
    ///
133
    /// fn demo() {
134
    ///     let lock = RW_LOCK.read();
135
    ///     // do something with lock
136
    ///     drop(lock);
137
    /// }
138
    /// ```
139
    #[inline]
140
0
    pub const fn new(data: T) -> Self {
141
0
        RwLock {
142
0
            phantom: PhantomData,
143
0
            lock: AtomicUsize::new(0),
144
0
            data: UnsafeCell::new(data),
145
0
        }
146
0
    }
147
148
    /// Consumes this `RwLock`, returning the underlying data.
149
    #[inline]
150
0
    pub fn into_inner(self) -> T {
151
        // We know statically that there are no outstanding references to
152
        // `self` so there's no need to lock.
153
0
        let RwLock { data, .. } = self;
154
0
        data.into_inner()
155
0
    }
156
    /// Returns a mutable pointer to the underying data.
157
    ///
158
    /// This is mostly meant to be used for applications which require manual unlocking, but where
159
    /// storing both the lock and the pointer to the inner data gets inefficient.
160
    ///
161
    /// While this is safe, writing to the data is undefined behavior unless the current thread has
162
    /// acquired a write lock, and reading requires either a read or write lock.
163
    ///
164
    /// # Example
165
    /// ```
166
    /// let lock = spin::RwLock::new(42);
167
    ///
168
    /// unsafe {
169
    ///     core::mem::forget(lock.write());
170
    ///
171
    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
172
    ///     lock.as_mut_ptr().write(58);
173
    ///
174
    ///     lock.force_write_unlock();
175
    /// }
176
    ///
177
    /// assert_eq!(*lock.read(), 58);
178
    ///
179
    /// ```
180
    #[inline(always)]
181
0
    pub fn as_mut_ptr(&self) -> *mut T {
182
0
        self.data.get()
183
0
    }
184
}
185
186
impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
187
    /// Locks this rwlock with shared read access, blocking the current thread
188
    /// until it can be acquired.
189
    ///
190
    /// The calling thread will be blocked until there are no more writers which
191
    /// hold the lock. There may be other readers currently inside the lock when
192
    /// this method returns. This method does not provide any guarantees with
193
    /// respect to the ordering of whether contentious readers or writers will
194
    /// acquire the lock first.
195
    ///
196
    /// Returns an RAII guard which will release this thread's shared access
197
    /// once it is dropped.
198
    ///
199
    /// ```
200
    /// let mylock = spin::RwLock::new(0);
201
    /// {
202
    ///     let mut data = mylock.read();
203
    ///     // The lock is now locked and the data can be read
204
    ///     println!("{}", *data);
205
    ///     // The lock is dropped
206
    /// }
207
    /// ```
208
    #[inline]
209
0
    pub fn read(&self) -> RwLockReadGuard<T> {
210
        loop {
211
0
            match self.try_read() {
212
0
                Some(guard) => return guard,
213
0
                None => R::relax(),
214
            }
215
        }
216
0
    }
217
218
    /// Lock this rwlock with exclusive write access, blocking the current
219
    /// thread until it can be acquired.
220
    ///
221
    /// This function will not return while other writers or other readers
222
    /// currently have access to the lock.
223
    ///
224
    /// Returns an RAII guard which will drop the write access of this rwlock
225
    /// when dropped.
226
    ///
227
    /// ```
228
    /// let mylock = spin::RwLock::new(0);
229
    /// {
230
    ///     let mut data = mylock.write();
231
    ///     // The lock is now locked and the data can be written
232
    ///     *data += 1;
233
    ///     // The lock is dropped
234
    /// }
235
    /// ```
236
    #[inline]
237
0
    pub fn write(&self) -> RwLockWriteGuard<T, R> {
238
        loop {
239
0
            match self.try_write_internal(false) {
240
0
                Some(guard) => return guard,
241
0
                None => R::relax(),
242
            }
243
        }
244
0
    }
245
246
    /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
247
    /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
248
    #[inline]
249
0
    pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> {
250
        loop {
251
0
            match self.try_upgradeable_read() {
252
0
                Some(guard) => return guard,
253
0
                None => R::relax(),
254
            }
255
        }
256
0
    }
257
}
258
259
impl<T: ?Sized, R> RwLock<T, R> {
260
    // Acquire a read lock, returning the new lock value.
261
0
    fn acquire_reader(&self) -> usize {
262
        // An arbitrary cap that allows us to catch overflows long before they happen
263
        const MAX_READERS: usize = core::usize::MAX / READER / 2;
264
265
0
        let value = self.lock.fetch_add(READER, Ordering::Acquire);
266
267
0
        if value > MAX_READERS * READER {
268
0
            self.lock.fetch_sub(READER, Ordering::Relaxed);
269
0
            panic!("Too many lock readers, cannot safely proceed");
270
        } else {
271
0
            value
272
        }
273
0
    }
274
275
    /// Attempt to acquire this lock with shared read access.
276
    ///
277
    /// This function will never block and will return immediately if `read`
278
    /// would otherwise succeed. Returns `Some` of an RAII guard which will
279
    /// release the shared access of this thread when dropped, or `None` if the
280
    /// access could not be granted. This method does not provide any
281
    /// guarantees with respect to the ordering of whether contentious readers
282
    /// or writers will acquire the lock first.
283
    ///
284
    /// ```
285
    /// let mylock = spin::RwLock::new(0);
286
    /// {
287
    ///     match mylock.try_read() {
288
    ///         Some(data) => {
289
    ///             // The lock is now locked and the data can be read
290
    ///             println!("{}", *data);
291
    ///             // The lock is dropped
292
    ///         },
293
    ///         None => (), // no cigar
294
    ///     };
295
    /// }
296
    /// ```
297
    #[inline]
298
0
    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
299
0
        let value = self.acquire_reader();
300
301
        // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
302
        // This helps reduce writer starvation.
303
0
        if value & (WRITER | UPGRADED) != 0 {
304
            // Lock is taken, undo.
305
0
            self.lock.fetch_sub(READER, Ordering::Release);
306
0
            None
307
        } else {
308
0
            Some(RwLockReadGuard {
309
0
                lock: &self.lock,
310
0
                data: unsafe { &*self.data.get() },
311
0
            })
312
        }
313
0
    }
314
315
    /// Return the number of readers that currently hold the lock (including upgradable readers).
316
    ///
317
    /// # Safety
318
    ///
319
    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
320
    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
321
0
    pub fn reader_count(&self) -> usize {
322
0
        let state = self.lock.load(Ordering::Relaxed);
323
0
        state / READER + (state & UPGRADED) / UPGRADED
324
0
    }
325
326
    /// Return the number of writers that currently hold the lock.
327
    ///
328
    /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
329
    ///
330
    /// # Safety
331
    ///
332
    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
333
    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
334
0
    pub fn writer_count(&self) -> usize {
335
0
        (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
336
0
    }
337
338
    /// Force decrement the reader count.
339
    ///
340
    /// # Safety
341
    ///
342
    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
343
    /// live, or if called more times than `read` has been called, but can be
344
    /// useful in FFI contexts where the caller doesn't know how to deal with
345
    /// RAII. The underlying atomic operation uses `Ordering::Release`.
346
    #[inline]
347
0
    pub unsafe fn force_read_decrement(&self) {
348
0
        debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
349
0
        self.lock.fetch_sub(READER, Ordering::Release);
350
0
    }
351
352
    /// Force unlock exclusive write access.
353
    ///
354
    /// # Safety
355
    ///
356
    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
357
    /// live, or if called when there are current readers, but can be useful in
358
    /// FFI contexts where the caller doesn't know how to deal with RAII. The
359
    /// underlying atomic operation uses `Ordering::Release`.
360
    #[inline]
361
0
    pub unsafe fn force_write_unlock(&self) {
362
0
        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
363
0
        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
364
0
    }
365
366
    #[inline(always)]
367
0
    fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> {
368
0
        if compare_exchange(
369
0
            &self.lock,
370
0
            0,
371
0
            WRITER,
372
0
            Ordering::Acquire,
373
0
            Ordering::Relaxed,
374
0
            strong,
375
0
        )
376
0
        .is_ok()
377
        {
378
0
            Some(RwLockWriteGuard {
379
0
                phantom: PhantomData,
380
0
                inner: self,
381
0
                data: unsafe { &mut *self.data.get() },
382
0
            })
383
        } else {
384
0
            None
385
        }
386
0
    }
387
388
    /// Attempt to lock this rwlock with exclusive write access.
389
    ///
390
    /// This function does not ever block, and it will return `None` if a call
391
    /// to `write` would otherwise block. If successful, an RAII guard is
392
    /// returned.
393
    ///
394
    /// ```
395
    /// let mylock = spin::RwLock::new(0);
396
    /// {
397
    ///     match mylock.try_write() {
398
    ///         Some(mut data) => {
399
    ///             // The lock is now locked and the data can be written
400
    ///             *data += 1;
401
    ///             // The lock is implicitly dropped
402
    ///         },
403
    ///         None => (), // no cigar
404
    ///     };
405
    /// }
406
    /// ```
407
    #[inline]
408
0
    pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> {
409
0
        self.try_write_internal(true)
410
0
    }
411
412
    /// Attempt to lock this rwlock with exclusive write access.
413
    ///
414
    /// Unlike [`RwLock::try_write`], this function is allowed to spuriously fail even when acquiring exclusive write access
415
    /// would otherwise succeed, which can result in more efficient code on some platforms.
416
    #[inline]
417
0
    pub fn try_write_weak(&self) -> Option<RwLockWriteGuard<T, R>> {
418
0
        self.try_write_internal(false)
419
0
    }
420
421
    /// Tries to obtain an upgradeable lock guard.
422
    #[inline]
423
0
    pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> {
424
0
        if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
425
0
            Some(RwLockUpgradableGuard {
426
0
                phantom: PhantomData,
427
0
                inner: self,
428
0
                data: unsafe { &*self.data.get() },
429
0
            })
430
        } else {
431
            // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
432
            // When they unlock, they will clear the bit.
433
0
            None
434
        }
435
0
    }
436
437
    /// Returns a mutable reference to the underlying data.
438
    ///
439
    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
440
    /// take place -- the mutable borrow statically guarantees no locks exist.
441
    ///
442
    /// # Examples
443
    ///
444
    /// ```
445
    /// let mut lock = spin::RwLock::new(0);
446
    /// *lock.get_mut() = 10;
447
    /// assert_eq!(*lock.read(), 10);
448
    /// ```
449
0
    pub fn get_mut(&mut self) -> &mut T {
450
        // We know statically that there are no other references to `self`, so
451
        // there's no need to lock the inner lock.
452
0
        unsafe { &mut *self.data.get() }
453
0
    }
454
}
455
456
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> {
457
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
458
0
        match self.try_read() {
459
0
            Some(guard) => write!(f, "RwLock {{ data: ")
460
0
                .and_then(|()| (&*guard).fmt(f))
461
0
                .and_then(|()| write!(f, " }}")),
462
0
            None => write!(f, "RwLock {{ <locked> }}"),
463
        }
464
0
    }
465
}
466
467
impl<T: ?Sized + Default, R> Default for RwLock<T, R> {
468
0
    fn default() -> Self {
469
0
        Self::new(Default::default())
470
0
    }
471
}
472
473
impl<T, R> From<T> for RwLock<T, R> {
474
0
    fn from(data: T) -> Self {
475
0
        Self::new(data)
476
0
    }
477
}
478
479
impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
480
    /// Leak the lock guard, yielding a reference to the underlying data.
481
    ///
482
    /// Note that this function will permanently lock the original lock for all but reading locks.
483
    ///
484
    /// ```
485
    /// let mylock = spin::RwLock::new(0);
486
    ///
487
    /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
488
    ///
489
    /// assert_eq!(*data, 0);
490
    /// ```
491
    #[inline]
492
0
    pub fn leak(this: Self) -> &'rwlock T {
493
0
        let this = ManuallyDrop::new(this);
494
        // Safety: We know statically that only we are referencing data
495
0
        unsafe { &*this.data }
496
0
    }
497
}
498
499
impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
500
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
501
0
        fmt::Debug::fmt(&**self, f)
502
0
    }
503
}
504
505
impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
506
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
507
0
        fmt::Display::fmt(&**self, f)
508
0
    }
509
}
510
511
impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> {
512
    /// Upgrades an upgradeable lock guard to a writable lock guard.
513
    ///
514
    /// ```
515
    /// let mylock = spin::RwLock::new(0);
516
    ///
517
    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
518
    /// let writable = upgradeable.upgrade();
519
    /// ```
520
    #[inline]
521
0
    pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> {
522
        loop {
523
0
            self = match self.try_upgrade_internal(false) {
524
0
                Ok(guard) => return guard,
525
0
                Err(e) => e,
526
            };
527
528
0
            R::relax();
529
        }
530
0
    }
531
}
532
533
impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
534
    #[inline(always)]
535
0
    fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
536
0
        if compare_exchange(
537
0
            &self.inner.lock,
538
0
            UPGRADED,
539
0
            WRITER,
540
0
            Ordering::Acquire,
541
0
            Ordering::Relaxed,
542
0
            strong,
543
0
        )
544
0
        .is_ok()
545
        {
546
0
            let inner = self.inner;
547
548
            // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
549
0
            mem::forget(self);
550
551
            // Upgrade successful
552
0
            Ok(RwLockWriteGuard {
553
0
                phantom: PhantomData,
554
0
                inner,
555
0
                data: unsafe { &mut *inner.data.get() },
556
0
            })
557
        } else {
558
0
            Err(self)
559
        }
560
0
    }
561
562
    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
563
    ///
564
    /// ```
565
    /// let mylock = spin::RwLock::new(0);
566
    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
567
    ///
568
    /// match upgradeable.try_upgrade() {
569
    ///     Ok(writable) => /* upgrade successful - use writable lock guard */ (),
570
    ///     Err(upgradeable) => /* upgrade unsuccessful */ (),
571
    /// };
572
    /// ```
573
    #[inline]
574
0
    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
575
0
        self.try_upgrade_internal(true)
576
0
    }
577
578
    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
579
    ///
580
    /// Unlike [`RwLockUpgradableGuard::try_upgrade`], this function is allowed to spuriously fail even when upgrading
581
    /// would otherwise succeed, which can result in more efficient code on some platforms.
582
    #[inline]
583
0
    pub fn try_upgrade_weak(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
584
0
        self.try_upgrade_internal(false)
585
0
    }
586
587
    #[inline]
588
    /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
589
    ///
590
    /// ```
591
    /// let mylock = spin::RwLock::new(1);
592
    ///
593
    /// let upgradeable = mylock.upgradeable_read();
594
    /// assert!(mylock.try_read().is_none());
595
    /// assert_eq!(*upgradeable, 1);
596
    ///
597
    /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
598
    /// assert!(mylock.try_read().is_some());
599
    /// assert_eq!(*readable, 1);
600
    /// ```
601
0
    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
602
        // Reserve the read guard for ourselves
603
0
        self.inner.acquire_reader();
604
605
0
        let inner = self.inner;
606
607
        // Dropping self removes the UPGRADED bit
608
0
        mem::drop(self);
609
610
0
        RwLockReadGuard {
611
0
            lock: &inner.lock,
612
0
            data: unsafe { &*inner.data.get() },
613
0
        }
614
0
    }
615
616
    /// Leak the lock guard, yielding a reference to the underlying data.
617
    ///
618
    /// Note that this function will permanently lock the original lock.
619
    ///
620
    /// ```
621
    /// let mylock = spin::RwLock::new(0);
622
    ///
623
    /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
624
    ///
625
    /// assert_eq!(*data, 0);
626
    /// ```
627
    #[inline]
628
0
    pub fn leak(this: Self) -> &'rwlock T {
629
0
        let this = ManuallyDrop::new(this);
630
        // Safety: We know statically that only we are referencing data
631
0
        unsafe { &*this.data }
632
0
    }
633
}
634
635
impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> {
636
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
637
0
        fmt::Debug::fmt(&**self, f)
638
0
    }
639
}
640
641
impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> {
642
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
643
0
        fmt::Display::fmt(&**self, f)
644
0
    }
645
}
646
647
impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
648
    /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
649
    ///
650
    /// ```
651
    /// let mylock = spin::RwLock::new(0);
652
    ///
653
    /// let mut writable = mylock.write();
654
    /// *writable = 1;
655
    ///
656
    /// let readable = writable.downgrade(); // This is guaranteed not to spin
657
    /// # let readable_2 = mylock.try_read().unwrap();
658
    /// assert_eq!(*readable, 1);
659
    /// ```
660
    #[inline]
661
0
    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
662
        // Reserve the read guard for ourselves
663
0
        self.inner.acquire_reader();
664
665
0
        let inner = self.inner;
666
667
        // Dropping self removes the UPGRADED bit
668
0
        mem::drop(self);
669
670
0
        RwLockReadGuard {
671
0
            lock: &inner.lock,
672
0
            data: unsafe { &*inner.data.get() },
673
0
        }
674
0
    }
675
676
    /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
677
    ///
678
    /// ```
679
    /// let mylock = spin::RwLock::new(0);
680
    ///
681
    /// let mut writable = mylock.write();
682
    /// *writable = 1;
683
    ///
684
    /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
685
    /// assert_eq!(*readable, 1);
686
    /// ```
687
    #[inline]
688
0
    pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> {
689
0
        debug_assert_eq!(
690
0
            self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED),
691
            WRITER
692
        );
693
694
        // Reserve the read guard for ourselves
695
0
        self.inner.lock.store(UPGRADED, Ordering::Release);
696
697
0
        let inner = self.inner;
698
699
        // Dropping self removes the UPGRADED bit
700
0
        mem::forget(self);
701
702
0
        RwLockUpgradableGuard {
703
0
            phantom: PhantomData,
704
0
            inner,
705
0
            data: unsafe { &*inner.data.get() },
706
0
        }
707
0
    }
708
709
    /// Leak the lock guard, yielding a mutable reference to the underlying data.
710
    ///
711
    /// Note that this function will permanently lock the original lock.
712
    ///
713
    /// ```
714
    /// let mylock = spin::RwLock::new(0);
715
    ///
716
    /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
717
    ///
718
    /// *data = 1;
719
    /// assert_eq!(*data, 1);
720
    /// ```
721
    #[inline]
722
0
    pub fn leak(this: Self) -> &'rwlock mut T {
723
0
        let mut this = ManuallyDrop::new(this);
724
        // Safety: We know statically that only we are referencing data
725
0
        unsafe { &mut *this.data }
726
0
    }
727
}
728
729
impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> {
730
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
731
0
        fmt::Debug::fmt(&**self, f)
732
0
    }
733
}
734
735
impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> {
736
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
737
0
        fmt::Display::fmt(&**self, f)
738
0
    }
739
}
740
741
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
742
    type Target = T;
743
744
0
    fn deref(&self) -> &T {
745
        // Safety: We know statically that only we are referencing data
746
0
        unsafe { &*self.data }
747
0
    }
748
}
749
750
impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> {
751
    type Target = T;
752
753
0
    fn deref(&self) -> &T {
754
        // Safety: We know statically that only we are referencing data
755
0
        unsafe { &*self.data }
756
0
    }
757
}
758
759
impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> {
760
    type Target = T;
761
762
0
    fn deref(&self) -> &T {
763
        // Safety: We know statically that only we are referencing data
764
0
        unsafe { &*self.data }
765
0
    }
766
}
767
768
impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> {
769
0
    fn deref_mut(&mut self) -> &mut T {
770
        // Safety: We know statically that only we are referencing data
771
0
        unsafe { &mut *self.data }
772
0
    }
773
}
774
775
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
776
0
    fn drop(&mut self) {
777
0
        debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
778
0
        self.lock.fetch_sub(READER, Ordering::Release);
779
0
    }
780
}
781
782
impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> {
783
0
    fn drop(&mut self) {
784
0
        debug_assert_eq!(
785
0
            self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
786
            UPGRADED
787
        );
788
0
        self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
789
0
    }
790
}
791
792
impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> {
793
0
    fn drop(&mut self) {
794
0
        debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
795
796
        // Writer is responsible for clearing both WRITER and UPGRADED bits.
797
        // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
798
0
        self.inner
799
0
            .lock
800
0
            .fetch_and(!(WRITER | UPGRADED), Ordering::Release);
801
0
    }
802
}
803
804
#[inline(always)]
805
0
fn compare_exchange(
806
0
    atomic: &AtomicUsize,
807
0
    current: usize,
808
0
    new: usize,
809
0
    success: Ordering,
810
0
    failure: Ordering,
811
0
    strong: bool,
812
0
) -> Result<usize, usize> {
813
0
    if strong {
814
0
        atomic.compare_exchange(current, new, success, failure)
815
    } else {
816
0
        atomic.compare_exchange_weak(current, new, success, failure)
817
    }
818
0
}
819
820
#[cfg(feature = "lock_api")]
821
unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> {
822
    type GuardMarker = lock_api_crate::GuardSend;
823
824
    const INIT: Self = Self::new(());
825
826
    #[inline(always)]
827
0
    fn lock_exclusive(&self) {
828
        // Prevent guard destructor running
829
0
        core::mem::forget(self.write());
830
0
    }
831
832
    #[inline(always)]
833
0
    fn try_lock_exclusive(&self) -> bool {
834
        // Prevent guard destructor running
835
0
        self.try_write().map(|g| core::mem::forget(g)).is_some()
836
0
    }
837
838
    #[inline(always)]
839
0
    unsafe fn unlock_exclusive(&self) {
840
0
        drop(RwLockWriteGuard {
841
0
            inner: self,
842
0
            data: &mut (),
843
0
            phantom: PhantomData,
844
0
        });
845
0
    }
846
847
    #[inline(always)]
848
0
    fn lock_shared(&self) {
849
        // Prevent guard destructor running
850
0
        core::mem::forget(self.read());
851
0
    }
852
853
    #[inline(always)]
854
0
    fn try_lock_shared(&self) -> bool {
855
        // Prevent guard destructor running
856
0
        self.try_read().map(|g| core::mem::forget(g)).is_some()
857
0
    }
858
859
    #[inline(always)]
860
0
    unsafe fn unlock_shared(&self) {
861
0
        drop(RwLockReadGuard {
862
0
            lock: &self.lock,
863
0
            data: &(),
864
0
        });
865
0
    }
866
867
    #[inline(always)]
868
0
    fn is_locked(&self) -> bool {
869
0
        self.lock.load(Ordering::Relaxed) != 0
870
0
    }
871
}
872
873
#[cfg(feature = "lock_api")]
874
unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> {
875
    #[inline(always)]
876
0
    fn lock_upgradable(&self) {
877
        // Prevent guard destructor running
878
0
        core::mem::forget(self.upgradeable_read());
879
0
    }
880
881
    #[inline(always)]
882
0
    fn try_lock_upgradable(&self) -> bool {
883
        // Prevent guard destructor running
884
0
        self.try_upgradeable_read()
885
0
            .map(|g| core::mem::forget(g))
886
0
            .is_some()
887
0
    }
888
889
    #[inline(always)]
890
0
    unsafe fn unlock_upgradable(&self) {
891
0
        drop(RwLockUpgradableGuard {
892
0
            inner: self,
893
0
            data: &(),
894
0
            phantom: PhantomData,
895
0
        });
896
0
    }
897
898
    #[inline(always)]
899
0
    unsafe fn upgrade(&self) {
900
0
        let tmp_guard = RwLockUpgradableGuard {
901
0
            inner: self,
902
0
            data: &(),
903
0
            phantom: PhantomData,
904
0
        };
905
0
        core::mem::forget(tmp_guard.upgrade());
906
0
    }
907
908
    #[inline(always)]
909
0
    unsafe fn try_upgrade(&self) -> bool {
910
0
        let tmp_guard = RwLockUpgradableGuard {
911
0
            inner: self,
912
0
            data: &(),
913
0
            phantom: PhantomData,
914
0
        };
915
0
        tmp_guard
916
0
            .try_upgrade()
917
0
            .map(|g| core::mem::forget(g))
918
0
            .is_ok()
919
0
    }
920
}
921
922
#[cfg(feature = "lock_api")]
923
unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> {
924
0
    unsafe fn downgrade(&self) {
925
0
        let tmp_guard = RwLockWriteGuard {
926
0
            inner: self,
927
0
            data: &mut (),
928
0
            phantom: PhantomData,
929
0
        };
930
0
        core::mem::forget(tmp_guard.downgrade());
931
0
    }
932
}
933
934
#[cfg(feature = "lock_api1")]
935
unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> {
936
    unsafe fn downgrade_upgradable(&self) {
937
        let tmp_guard = RwLockUpgradableGuard {
938
            inner: self,
939
            data: &(),
940
            phantom: PhantomData,
941
        };
942
        core::mem::forget(tmp_guard.downgrade());
943
    }
944
945
    unsafe fn downgrade_to_upgradable(&self) {
946
        let tmp_guard = RwLockWriteGuard {
947
            inner: self,
948
            data: &mut (),
949
            phantom: PhantomData,
950
        };
951
        core::mem::forget(tmp_guard.downgrade_to_upgradeable());
952
    }
953
}
954
955
#[cfg(test)]
956
mod tests {
957
    use std::prelude::v1::*;
958
959
    use std::sync::atomic::{AtomicUsize, Ordering};
960
    use std::sync::mpsc::channel;
961
    use std::sync::Arc;
962
    use std::thread;
963
964
    type RwLock<T> = super::RwLock<T>;
965
966
    #[derive(Eq, PartialEq, Debug)]
967
    struct NonCopy(i32);
968
969
    #[test]
970
    fn smoke() {
971
        let l = RwLock::new(());
972
        drop(l.read());
973
        drop(l.write());
974
        drop((l.read(), l.read()));
975
        drop(l.write());
976
    }
977
978
    // TODO: needs RNG
979
    //#[test]
980
    //fn frob() {
981
    //    static R: RwLock = RwLock::new();
982
    //    const N: usize = 10;
983
    //    const M: usize = 1000;
984
    //
985
    //    let (tx, rx) = channel::<()>();
986
    //    for _ in 0..N {
987
    //        let tx = tx.clone();
988
    //        thread::spawn(move|| {
989
    //            let mut rng = rand::thread_rng();
990
    //            for _ in 0..M {
991
    //                if rng.gen_weighted_bool(N) {
992
    //                    drop(R.write());
993
    //                } else {
994
    //                    drop(R.read());
995
    //                }
996
    //            }
997
    //            drop(tx);
998
    //        });
999
    //    }
1000
    //    drop(tx);
1001
    //    let _ = rx.recv();
1002
    //    unsafe { R.destroy(); }
1003
    //}
1004
1005
    #[test]
1006
    fn test_rw_arc() {
1007
        let arc = Arc::new(RwLock::new(0));
1008
        let arc2 = arc.clone();
1009
        let (tx, rx) = channel();
1010
1011
        let t = thread::spawn(move || {
1012
            let mut lock = arc2.write();
1013
            for _ in 0..10 {
1014
                let tmp = *lock;
1015
                *lock = -1;
1016
                thread::yield_now();
1017
                *lock = tmp + 1;
1018
            }
1019
            tx.send(()).unwrap();
1020
        });
1021
1022
        // Readers try to catch the writer in the act
1023
        let mut children = Vec::new();
1024
        for _ in 0..5 {
1025
            let arc3 = arc.clone();
1026
            children.push(thread::spawn(move || {
1027
                let lock = arc3.read();
1028
                assert!(*lock >= 0);
1029
            }));
1030
        }
1031
1032
        // Wait for children to pass their asserts
1033
        for r in children {
1034
            assert!(r.join().is_ok());
1035
        }
1036
1037
        // Wait for writer to finish
1038
        rx.recv().unwrap();
1039
        let lock = arc.read();
1040
        assert_eq!(*lock, 10);
1041
1042
        assert!(t.join().is_ok());
1043
    }
1044
1045
    #[test]
1046
    fn test_rw_access_in_unwind() {
1047
        let arc = Arc::new(RwLock::new(1));
1048
        let arc2 = arc.clone();
1049
        let _ = thread::spawn(move || -> () {
1050
            struct Unwinder {
1051
                i: Arc<RwLock<isize>>,
1052
            }
1053
            impl Drop for Unwinder {
1054
                fn drop(&mut self) {
1055
                    let mut lock = self.i.write();
1056
                    *lock += 1;
1057
                }
1058
            }
1059
            let _u = Unwinder { i: arc2 };
1060
            panic!();
1061
        })
1062
        .join();
1063
        let lock = arc.read();
1064
        assert_eq!(*lock, 2);
1065
    }
1066
1067
    #[test]
1068
    fn test_rwlock_unsized() {
1069
        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
1070
        {
1071
            let b = &mut *rw.write();
1072
            b[0] = 4;
1073
            b[2] = 5;
1074
        }
1075
        let comp: &[i32] = &[4, 2, 5];
1076
        assert_eq!(&*rw.read(), comp);
1077
    }
1078
1079
    #[test]
1080
    fn test_rwlock_try_write() {
1081
        use std::mem::drop;
1082
1083
        let lock = RwLock::new(0isize);
1084
        let read_guard = lock.read();
1085
1086
        let write_result = lock.try_write();
1087
        match write_result {
1088
            None => (),
1089
            Some(_) => assert!(
1090
                false,
1091
                "try_write should not succeed while read_guard is in scope"
1092
            ),
1093
        }
1094
1095
        drop(read_guard);
1096
    }
1097
1098
    #[test]
1099
    fn test_rw_try_read() {
1100
        let m = RwLock::new(0);
1101
        ::std::mem::forget(m.write());
1102
        assert!(m.try_read().is_none());
1103
    }
1104
1105
    #[test]
1106
    fn test_into_inner() {
1107
        let m = RwLock::new(NonCopy(10));
1108
        assert_eq!(m.into_inner(), NonCopy(10));
1109
    }
1110
1111
    #[test]
1112
    fn test_into_inner_drop() {
1113
        struct Foo(Arc<AtomicUsize>);
1114
        impl Drop for Foo {
1115
            fn drop(&mut self) {
1116
                self.0.fetch_add(1, Ordering::SeqCst);
1117
            }
1118
        }
1119
        let num_drops = Arc::new(AtomicUsize::new(0));
1120
        let m = RwLock::new(Foo(num_drops.clone()));
1121
        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1122
        {
1123
            let _inner = m.into_inner();
1124
            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1125
        }
1126
        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
1127
    }
1128
1129
    #[test]
1130
    fn test_force_read_decrement() {
1131
        let m = RwLock::new(());
1132
        ::std::mem::forget(m.read());
1133
        ::std::mem::forget(m.read());
1134
        ::std::mem::forget(m.read());
1135
        assert!(m.try_write().is_none());
1136
        unsafe {
1137
            m.force_read_decrement();
1138
            m.force_read_decrement();
1139
        }
1140
        assert!(m.try_write().is_none());
1141
        unsafe {
1142
            m.force_read_decrement();
1143
        }
1144
        assert!(m.try_write().is_some());
1145
    }
1146
1147
    #[test]
1148
    fn test_force_write_unlock() {
1149
        let m = RwLock::new(());
1150
        ::std::mem::forget(m.write());
1151
        assert!(m.try_read().is_none());
1152
        unsafe {
1153
            m.force_write_unlock();
1154
        }
1155
        assert!(m.try_read().is_some());
1156
    }
1157
1158
    #[test]
1159
    fn test_upgrade_downgrade() {
1160
        let m = RwLock::new(());
1161
        {
1162
            let _r = m.read();
1163
            let upg = m.try_upgradeable_read().unwrap();
1164
            assert!(m.try_read().is_none());
1165
            assert!(m.try_write().is_none());
1166
            assert!(upg.try_upgrade().is_err());
1167
        }
1168
        {
1169
            let w = m.write();
1170
            assert!(m.try_upgradeable_read().is_none());
1171
            let _r = w.downgrade();
1172
            assert!(m.try_upgradeable_read().is_some());
1173
            assert!(m.try_read().is_some());
1174
            assert!(m.try_write().is_none());
1175
        }
1176
        {
1177
            let _u = m.upgradeable_read();
1178
            assert!(m.try_upgradeable_read().is_none());
1179
        }
1180
1181
        assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
1182
    }
1183
}