Coverage Report

Created: 2025-07-14 07:05

/rust/registry/src/index.crates.io-6f17d22bba15001f/spinning_top-0.2.5/src/spinlock.rs
Line
Count
Source (jump to first uncovered line)
1
// This implementation is based on:
2
// https://github.com/Amanieu/parking_lot/tree/fa294cd677936bf365afa0497039953b10c722f5/lock_api
3
// and
4
// https://github.com/mvdnes/spin-rs/tree/7516c8037d3d15712ba4d8499ab075e97a19d778
5
6
use core::{
7
    hint,
8
    sync::atomic::{AtomicBool, Ordering},
9
};
10
use lock_api::{GuardSend, RawMutex};
11
12
/// Provides mutual exclusion based on spinning on an `AtomicBool`.
13
///
14
/// It's recommended to use this type either combination with [`lock_api::Mutex`] or
15
/// through the [`Spinlock`] type.
16
///
17
/// ## Example
18
///
19
/// ```rust
20
/// use lock_api::RawMutex;
21
/// let lock = spinning_top::RawSpinlock::INIT;
22
/// assert_eq!(lock.try_lock(), true); // lock it
23
/// assert_eq!(lock.try_lock(), false); // can't be locked a second time
24
/// unsafe { lock.unlock(); } // unlock it
25
/// assert_eq!(lock.try_lock(), true); // now it can be locked again
26
#[derive(Debug)]
27
pub struct RawSpinlock {
28
    /// Whether the spinlock is locked.
29
    locked: AtomicBool,
30
}
31
32
impl RawSpinlock {
33
    // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
34
    // when called in a loop.
35
0
    fn try_lock_weak(&self) -> bool {
36
0
        // The Orderings are the same as try_lock, and are still correct here.
37
0
        self.locked
38
0
            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
39
0
            .is_ok()
40
0
    }
41
}
42
43
unsafe impl RawMutex for RawSpinlock {
44
    const INIT: RawSpinlock = RawSpinlock {
45
        locked: AtomicBool::new(false),
46
    };
47
48
    // A spinlock guard can be sent to another thread and unlocked there
49
    type GuardMarker = GuardSend;
50
51
0
    fn lock(&self) {
52
0
        while !self.try_lock_weak() {
53
            // Wait until the lock looks unlocked before retrying
54
            // Code from https://github.com/mvdnes/spin-rs/commit/d3e60d19adbde8c8e9d3199c7c51e51ee5a20bf6
55
0
            while self.is_locked() {
56
0
                // Tell the CPU that we're inside a busy-wait loop
57
0
                hint::spin_loop();
58
0
            }
59
        }
60
0
    }
61
62
0
    fn try_lock(&self) -> bool {
63
0
        // Code taken from:
64
0
        // https://github.com/Amanieu/parking_lot/blob/fa294cd677936bf365afa0497039953b10c722f5/lock_api/src/lib.rs#L49-L53
65
0
        //
66
0
        // The reason for using a strong compare_exchange is explained here:
67
0
        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
68
0
        //
69
0
        // The second Ordering argument specfies the ordering when the compare_exchange
70
0
        // fails. Since we don't access any critical data if we fail to acquire the lock,
71
0
        // we can use a Relaxed ordering in this case.
72
0
        self.locked
73
0
            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
74
0
            .is_ok()
75
0
    }
76
77
0
    unsafe fn unlock(&self) {
78
0
        self.locked.store(false, Ordering::Release);
79
0
    }
80
81
0
    fn is_locked(&self) -> bool {
82
0
        // Relaxed is sufficient because this operation does not provide synchronization, only atomicity.
83
0
        self.locked.load(Ordering::Relaxed)
84
0
    }
85
}
86
87
/// A mutual exclusion (Mutex) type based on busy-waiting.
88
///
89
/// Calling `lock` (or `try_lock`) on this type returns a [`SpinlockGuard`], which
90
/// automatically frees the lock when it goes out of scope.
91
///
92
/// ## Example
93
///
94
/// ```rust
95
/// use spinning_top::Spinlock;
96
///
97
/// fn main() {
98
///     // Wrap some data in a spinlock
99
///     let data = String::from("Hello");
100
///     let spinlock = Spinlock::new(data);
101
///     make_uppercase(&spinlock); // only pass a shared reference
102
///
103
///     // We have ownership of the spinlock, so we can extract the data without locking
104
///     // Note: this consumes the spinlock
105
///     let data = spinlock.into_inner();
106
///     assert_eq!(data.as_str(), "HELLO");
107
/// }
108
///
109
/// fn make_uppercase(spinlock: &Spinlock<String>) {
110
///     // Lock the spinlock to get a mutable reference to the data
111
///     let mut locked_data = spinlock.lock();
112
///     assert_eq!(locked_data.as_str(), "Hello");
113
///     locked_data.make_ascii_uppercase();
114
///
115
///     // the lock is automatically freed at the end of the scope
116
/// }
117
/// ```
118
///
119
/// ## Usage in statics
120
///
121
/// `Spinlock::new` is a `const` function. This makes the `Spinlock` type
122
/// usable in statics:
123
///
124
/// ```rust,ignore
125
/// use spinning_top::Spinlock;
126
///
127
/// static DATA: Spinlock<u32> = Spinlock::new(0);
128
///
129
/// fn main() {
130
///     let mut data = DATA.lock();
131
///     *data += 1;
132
///     assert_eq!(*data, 1);
133
/// }
134
/// ```
135
pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;
136
137
/// A RAII guard that frees the spinlock when it goes out of scope.
138
///
139
/// Allows access to the locked data through the [`core::ops::Deref`] and [`core::ops::DerefMut`] operations.
140
///
141
/// ## Example
142
///
143
/// ```rust
144
/// use spinning_top::{Spinlock, SpinlockGuard};
145
///
146
/// let spinlock = Spinlock::new(Vec::new());
147
///
148
/// // begin a new scope
149
/// {
150
///     // lock the spinlock to create a `SpinlockGuard`
151
///     let mut guard: SpinlockGuard<_> = spinlock.lock();
152
///
153
///     // guard can be used like a `&mut Vec` since it implements `DerefMut`
154
///     guard.push(1);
155
///     guard.push(2);
156
///     assert_eq!(guard.len(), 2);
157
/// } // guard is dropped -> frees the spinlock again
158
///
159
/// // spinlock is unlocked again
160
/// assert!(spinlock.try_lock().is_some());
161
/// ```
162
pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;
163
164
/// A RAII guard returned by `SpinlockGuard::map`.
165
///
166
/// ## Example
167
/// ```rust
168
/// use spinning_top::{MappedSpinlockGuard, Spinlock, SpinlockGuard};
169
///
170
/// let spinlock = Spinlock::new(Some(3));
171
///
172
/// // Begin a new scope.
173
/// {
174
///     // Lock the spinlock to create a `SpinlockGuard`.
175
///     let mut guard: SpinlockGuard<_> = spinlock.lock();
176
///
177
///     // Map the internal value of `gurad`. `guard` is moved.
178
///     let mut mapped: MappedSpinlockGuard<'_, _> =
179
///         SpinlockGuard::map(guard, |g| g.as_mut().unwrap());
180
///     assert_eq!(*mapped, 3);
181
///
182
///     *mapped = 5;
183
///     assert_eq!(*mapped, 5);
184
/// } // `mapped` is dropped -> frees the spinlock again.
185
///
186
/// // The operation is reflected to the original lock.
187
/// assert_eq!(*spinlock.lock(), Some(5));
188
/// ```
189
pub type MappedSpinlockGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawSpinlock, T>;
190
191
/// Create an unlocked `Spinlock` in a `const` context.
192
///
193
/// ## Example
194
///
195
/// ```rust
196
/// use spinning_top::{const_spinlock, Spinlock};
197
///
198
/// static SPINLOCK: Spinlock<i32> = const_spinlock(42);
199
/// ```
200
0
pub const fn const_spinlock<T>(val: T) -> Spinlock<T> {
201
0
    Spinlock::const_new(<RawSpinlock as lock_api::RawMutex>::INIT, val)
202
0
}
203
204
#[cfg(test)]
205
mod tests {
206
    use super::*;
207
208
    #[test]
209
    fn create_and_lock() {
210
        let spinlock = Spinlock::new(42);
211
        let data = spinlock.try_lock();
212
        assert!(data.is_some());
213
        assert_eq!(*data.unwrap(), 42);
214
    }
215
216
    #[test]
217
    fn mutual_exclusion() {
218
        let spinlock = Spinlock::new(1);
219
        let data = spinlock.try_lock();
220
        assert!(data.is_some());
221
        assert!(spinlock.try_lock().is_none());
222
        assert!(spinlock.try_lock().is_none()); // still None
223
        core::mem::drop(data);
224
        assert!(spinlock.try_lock().is_some());
225
    }
226
227
    #[test]
228
    fn three_locks() {
229
        let spinlock1 = Spinlock::new(1);
230
        let spinlock2 = Spinlock::new(2);
231
        let spinlock3 = Spinlock::new(3);
232
        let data1 = spinlock1.try_lock();
233
        let data2 = spinlock2.try_lock();
234
        let data3 = spinlock3.try_lock();
235
        assert!(data1.is_some());
236
        assert!(data2.is_some());
237
        assert!(data3.is_some());
238
        assert!(spinlock1.try_lock().is_none());
239
        assert!(spinlock1.try_lock().is_none()); // still None
240
        assert!(spinlock2.try_lock().is_none());
241
        assert!(spinlock3.try_lock().is_none());
242
        core::mem::drop(data3);
243
        assert!(spinlock3.try_lock().is_some());
244
    }
245
246
    #[test]
247
    fn mapped_lock() {
248
        let spinlock = Spinlock::new([1, 2, 3]);
249
        let data = spinlock.lock();
250
        let mut mapped = SpinlockGuard::map(data, |d| &mut d[0]);
251
        assert_eq!(*mapped, 1);
252
        *mapped = 4;
253
        assert_eq!(*mapped, 4);
254
        core::mem::drop(mapped);
255
        assert!(!spinlock.is_locked());
256
        assert_eq!(*spinlock.lock(), [4, 2, 3]);
257
    }
258
}