Coverage Report

Created: 2026-02-26 06:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/spin-0.10.0/src/mutex/spin.rs
Line
Count
Source
1
//! A naïve spinning mutex.
2
//!
3
//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
4
//! latency is theoretically infinite.
5
6
use crate::{
7
    atomic::{AtomicBool, Ordering},
8
    RelaxStrategy, Spin,
9
};
10
use core::{
11
    cell::UnsafeCell,
12
    fmt,
13
    marker::PhantomData,
14
    mem::ManuallyDrop,
15
    ops::{Deref, DerefMut},
16
};
17
18
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
19
///
20
/// # Example
21
///
22
/// ```
23
/// use spin;
24
///
25
/// let lock = spin::mutex::SpinMutex::<_>::new(0);
26
///
27
/// // Modify the data
28
/// *lock.lock() = 2;
29
///
30
/// // Read the data
31
/// let answer = *lock.lock();
32
/// assert_eq!(answer, 2);
33
/// ```
34
///
35
/// # Thread safety example
36
///
37
/// ```
38
/// use spin;
39
/// use std::sync::{Arc, Barrier};
40
///
41
/// let thread_count = 1000;
42
/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
43
///
44
/// // We use a barrier to ensure the readout happens after all writing
45
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
46
///
47
/// # let mut ts = Vec::new();
48
/// for _ in (0..thread_count) {
49
///     let my_barrier = barrier.clone();
50
///     let my_lock = spin_mutex.clone();
51
/// # let t =
52
///     std::thread::spawn(move || {
53
///         let mut guard = my_lock.lock();
54
///         *guard += 1;
55
///
56
///         // Release the lock to prevent a deadlock
57
///         drop(guard);
58
///         my_barrier.wait();
59
///     });
60
/// # ts.push(t);
61
/// }
62
///
63
/// barrier.wait();
64
///
65
/// let answer = { *spin_mutex.lock() };
66
/// assert_eq!(answer, thread_count);
67
///
68
/// # for t in ts {
69
/// #     t.join().unwrap();
70
/// # }
71
/// ```
72
pub struct SpinMutex<T: ?Sized, R = Spin> {
73
    phantom: PhantomData<R>,
74
    pub(crate) lock: AtomicBool,
75
    data: UnsafeCell<T>,
76
}
77
78
/// A guard that provides mutable data access.
79
///
80
/// When the guard falls out of scope it will release the lock.
81
pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
82
    lock: &'a AtomicBool,
83
    data: *mut T,
84
}
85
86
// Same unsafe impls as `std::sync::Mutex`
87
unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
88
unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
89
90
unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
91
unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
92
93
impl<T, R> SpinMutex<T, R> {
94
    /// Creates a new [`SpinMutex`] wrapping the supplied data.
95
    ///
96
    /// # Example
97
    ///
98
    /// ```
99
    /// use spin::mutex::SpinMutex;
100
    ///
101
    /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
102
    ///
103
    /// fn demo() {
104
    ///     let lock = MUTEX.lock();
105
    ///     // do something with lock
106
    ///     drop(lock);
107
    /// }
108
    /// ```
109
    #[inline(always)]
110
0
    pub const fn new(data: T) -> Self {
111
0
        SpinMutex {
112
0
            lock: AtomicBool::new(false),
113
0
            data: UnsafeCell::new(data),
114
0
            phantom: PhantomData,
115
0
        }
116
0
    }
117
118
    /// Consumes this [`SpinMutex`] and unwraps the underlying data.
119
    ///
120
    /// # Example
121
    ///
122
    /// ```
123
    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
124
    /// assert_eq!(42, lock.into_inner());
125
    /// ```
126
    #[inline(always)]
127
0
    pub fn into_inner(self) -> T {
128
        // We know statically that there are no outstanding references to
129
        // `self` so there's no need to lock.
130
0
        let SpinMutex { data, .. } = self;
131
0
        data.into_inner()
132
0
    }
133
134
    /// Returns a mutable pointer to the underlying data.
135
    ///
136
    /// This is mostly meant to be used for applications which require manual unlocking, but where
137
    /// storing both the lock and the pointer to the inner data gets inefficient.
138
    ///
139
    /// # Example
140
    /// ```
141
    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
142
    ///
143
    /// unsafe {
144
    ///     core::mem::forget(lock.lock());
145
    ///
146
    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
147
    ///     lock.as_mut_ptr().write(58);
148
    ///
149
    ///     lock.force_unlock();
150
    /// }
151
    ///
152
    /// assert_eq!(*lock.lock(), 58);
153
    ///
154
    /// ```
155
    #[inline(always)]
156
0
    pub fn as_mut_ptr(&self) -> *mut T {
157
0
        self.data.get()
158
0
    }
159
}
160
161
impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
162
    /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
163
    ///
164
    /// The returned value may be dereferenced for data access
165
    /// and the lock will be dropped when the guard falls out of scope.
166
    ///
167
    /// ```
168
    /// let lock = spin::mutex::SpinMutex::<_>::new(0);
169
    /// {
170
    ///     let mut data = lock.lock();
171
    ///     // The lock is now locked and the data can be accessed
172
    ///     *data += 1;
173
    ///     // The lock is implicitly dropped at the end of the scope
174
    /// }
175
    /// ```
176
    #[inline(always)]
177
0
    pub fn lock(&self) -> SpinMutexGuard<T> {
178
        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
179
        // when called in a loop.
180
        loop {
181
0
            if let Some(guard) = self.try_lock_weak() {
182
0
                break guard;
183
0
            }
184
185
0
            while self.is_locked() {
186
0
                R::relax();
187
0
            }
188
        }
189
0
    }
190
}
191
192
impl<T: ?Sized, R> SpinMutex<T, R> {
193
    /// Returns `true` if the lock is currently held.
194
    ///
195
    /// # Safety
196
    ///
197
    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
198
    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
199
    #[inline(always)]
200
0
    pub fn is_locked(&self) -> bool {
201
0
        self.lock.load(Ordering::Relaxed)
202
0
    }
203
204
    /// Force unlock this [`SpinMutex`].
205
    ///
206
    /// # Safety
207
    ///
208
    /// This is *extremely* unsafe if the lock is not held by the current
209
    /// thread. However, this can be useful in some instances for exposing the
210
    /// lock to FFI that doesn't know how to deal with RAII.
211
    #[inline(always)]
212
0
    pub unsafe fn force_unlock(&self) {
213
0
        self.lock.store(false, Ordering::Release);
214
0
    }
215
216
    /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
217
    ///
218
    /// # Example
219
    ///
220
    /// ```
221
    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
222
    ///
223
    /// let maybe_guard = lock.try_lock();
224
    /// assert!(maybe_guard.is_some());
225
    ///
226
    /// // `maybe_guard` is still held, so the second call fails
227
    /// let maybe_guard2 = lock.try_lock();
228
    /// assert!(maybe_guard2.is_none());
229
    /// ```
230
    #[inline(always)]
231
0
    pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
232
        // The reason for using a strong compare_exchange is explained here:
233
        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
234
0
        if self
235
0
            .lock
236
0
            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
237
0
            .is_ok()
238
        {
239
0
            Some(SpinMutexGuard {
240
0
                lock: &self.lock,
241
0
                data: unsafe { &mut *self.data.get() },
242
0
            })
243
        } else {
244
0
            None
245
        }
246
0
    }
247
248
    /// Try to lock this [`SpinMutex`], returning a lock guard if succesful.
249
    ///
250
    /// Unlike [`SpinMutex::try_lock`], this function is allowed to spuriously fail even when the mutex is unlocked,
251
    /// which can result in more efficient code on some platforms.
252
    #[inline(always)]
253
0
    pub fn try_lock_weak(&self) -> Option<SpinMutexGuard<T>> {
254
0
        if self
255
0
            .lock
256
0
            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
257
0
            .is_ok()
258
        {
259
0
            Some(SpinMutexGuard {
260
0
                lock: &self.lock,
261
0
                data: unsafe { &mut *self.data.get() },
262
0
            })
263
        } else {
264
0
            None
265
        }
266
0
    }
267
268
    /// Returns a mutable reference to the underlying data.
269
    ///
270
    /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
271
    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
272
    /// such, this is a 'zero-cost' operation.
273
    ///
274
    /// # Example
275
    ///
276
    /// ```
277
    /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
278
    /// *lock.get_mut() = 10;
279
    /// assert_eq!(*lock.lock(), 10);
280
    /// ```
281
    #[inline(always)]
282
0
    pub fn get_mut(&mut self) -> &mut T {
283
        // We know statically that there are no other references to `self`, so
284
        // there's no need to lock the inner mutex.
285
0
        unsafe { &mut *self.data.get() }
286
0
    }
287
}
288
289
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
290
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
291
0
        match self.try_lock() {
292
0
            Some(guard) => write!(f, "Mutex {{ data: ")
293
0
                .and_then(|()| (&*guard).fmt(f))
294
0
                .and_then(|()| write!(f, " }}")),
295
0
            None => write!(f, "Mutex {{ <locked> }}"),
296
        }
297
0
    }
298
}
299
300
impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
301
0
    fn default() -> Self {
302
0
        Self::new(Default::default())
303
0
    }
304
}
305
306
impl<T, R> From<T> for SpinMutex<T, R> {
307
0
    fn from(data: T) -> Self {
308
0
        Self::new(data)
309
0
    }
310
}
311
312
impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
313
    /// Leak the lock guard, yielding a mutable reference to the underlying data.
314
    ///
315
    /// Note that this function will permanently lock the original [`SpinMutex`].
316
    ///
317
    /// ```
318
    /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
319
    ///
320
    /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
321
    ///
322
    /// *data = 1;
323
    /// assert_eq!(*data, 1);
324
    /// ```
325
    #[inline(always)]
326
0
    pub fn leak(this: Self) -> &'a mut T {
327
        // Use ManuallyDrop to avoid stacked-borrow invalidation
328
0
        let mut this = ManuallyDrop::new(this);
329
        // We know statically that only we are referencing data
330
0
        unsafe { &mut *this.data }
331
0
    }
332
}
333
334
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
335
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
336
0
        fmt::Debug::fmt(&**self, f)
337
0
    }
338
}
339
340
impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
341
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
342
0
        fmt::Display::fmt(&**self, f)
343
0
    }
344
}
345
346
impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
347
    type Target = T;
348
0
    fn deref(&self) -> &T {
349
        // We know statically that only we are referencing data
350
0
        unsafe { &*self.data }
351
0
    }
352
}
353
354
impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
355
0
    fn deref_mut(&mut self) -> &mut T {
356
        // We know statically that only we are referencing data
357
0
        unsafe { &mut *self.data }
358
0
    }
359
}
360
361
impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
362
    /// The dropping of the MutexGuard will release the lock it was created from.
363
0
    fn drop(&mut self) {
364
0
        self.lock.store(false, Ordering::Release);
365
0
    }
Unexecuted instantiation: <spin::mutex::spin::SpinMutexGuard<core::pin::Pin<alloc::boxed::Box<dyn core::future::future::Future<Output = ()> + core::marker::Send>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <spin::mutex::spin::SpinMutexGuard<_> as core::ops::drop::Drop>::drop
366
}
367
368
#[cfg(feature = "lock_api")]
369
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
370
    type GuardMarker = lock_api_crate::GuardSend;
371
372
    const INIT: Self = Self::new(());
373
374
0
    fn lock(&self) {
375
        // Prevent guard destructor running
376
0
        core::mem::forget(Self::lock(self));
377
0
    }
378
379
0
    fn try_lock(&self) -> bool {
380
        // Prevent guard destructor running
381
0
        Self::try_lock(self).map(core::mem::forget).is_some()
382
0
    }
383
384
0
    unsafe fn unlock(&self) {
385
0
        self.force_unlock();
386
0
    }
387
388
0
    fn is_locked(&self) -> bool {
389
0
        Self::is_locked(self)
390
0
    }
391
}
392
393
#[cfg(test)]
394
mod tests {
395
    use std::prelude::v1::*;
396
397
    use std::sync::atomic::{AtomicUsize, Ordering};
398
    use std::sync::mpsc::channel;
399
    use std::sync::Arc;
400
    use std::thread;
401
402
    type SpinMutex<T> = super::SpinMutex<T>;
403
404
    #[derive(Eq, PartialEq, Debug)]
405
    struct NonCopy(i32);
406
407
    #[test]
408
    fn smoke() {
409
        let m = SpinMutex::<_>::new(());
410
        drop(m.lock());
411
        drop(m.lock());
412
    }
413
414
    #[test]
415
    fn lots_and_lots() {
416
        static M: SpinMutex<()> = SpinMutex::<_>::new(());
417
        static mut CNT: u32 = 0;
418
        const J: u32 = 1000;
419
        const K: u32 = 3;
420
421
        fn inc() {
422
            for _ in 0..J {
423
                unsafe {
424
                    let _g = M.lock();
425
                    CNT += 1;
426
                }
427
            }
428
        }
429
430
        let (tx, rx) = channel();
431
        let mut ts = Vec::new();
432
        for _ in 0..K {
433
            let tx2 = tx.clone();
434
            ts.push(thread::spawn(move || {
435
                inc();
436
                tx2.send(()).unwrap();
437
            }));
438
            let tx2 = tx.clone();
439
            ts.push(thread::spawn(move || {
440
                inc();
441
                tx2.send(()).unwrap();
442
            }));
443
        }
444
445
        drop(tx);
446
        for _ in 0..2 * K {
447
            rx.recv().unwrap();
448
        }
449
        assert_eq!(unsafe { CNT }, J * K * 2);
450
451
        for t in ts {
452
            t.join().unwrap();
453
        }
454
    }
455
456
    #[test]
457
    fn try_lock() {
458
        let mutex = SpinMutex::<_>::new(42);
459
460
        // First lock succeeds
461
        let a = mutex.try_lock();
462
        assert_eq!(a.as_ref().map(|r| **r), Some(42));
463
464
        // Additional lock fails
465
        let b = mutex.try_lock();
466
        assert!(b.is_none());
467
468
        // After dropping lock, it succeeds again
469
        ::core::mem::drop(a);
470
        let c = mutex.try_lock();
471
        assert_eq!(c.as_ref().map(|r| **r), Some(42));
472
    }
473
474
    #[test]
475
    fn test_into_inner() {
476
        let m = SpinMutex::<_>::new(NonCopy(10));
477
        assert_eq!(m.into_inner(), NonCopy(10));
478
    }
479
480
    #[test]
481
    fn test_into_inner_drop() {
482
        struct Foo(Arc<AtomicUsize>);
483
        impl Drop for Foo {
484
            fn drop(&mut self) {
485
                self.0.fetch_add(1, Ordering::SeqCst);
486
            }
487
        }
488
        let num_drops = Arc::new(AtomicUsize::new(0));
489
        let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
490
        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
491
        {
492
            let _inner = m.into_inner();
493
            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
494
        }
495
        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
496
    }
497
498
    #[test]
499
    fn test_mutex_arc_nested() {
500
        // Tests nested mutexes and access
501
        // to underlying data.
502
        let arc = Arc::new(SpinMutex::<_>::new(1));
503
        let arc2 = Arc::new(SpinMutex::<_>::new(arc));
504
        let (tx, rx) = channel();
505
        let t = thread::spawn(move || {
506
            let lock = arc2.lock();
507
            let lock2 = lock.lock();
508
            assert_eq!(*lock2, 1);
509
            tx.send(()).unwrap();
510
        });
511
        rx.recv().unwrap();
512
        t.join().unwrap();
513
    }
514
515
    #[test]
516
    fn test_mutex_arc_access_in_unwind() {
517
        let arc = Arc::new(SpinMutex::<_>::new(1));
518
        let arc2 = arc.clone();
519
        let _ = thread::spawn(move || -> () {
520
            struct Unwinder {
521
                i: Arc<SpinMutex<i32>>,
522
            }
523
            impl Drop for Unwinder {
524
                fn drop(&mut self) {
525
                    *self.i.lock() += 1;
526
                }
527
            }
528
            let _u = Unwinder { i: arc2 };
529
            panic!();
530
        })
531
        .join();
532
        let lock = arc.lock();
533
        assert_eq!(*lock, 2);
534
    }
535
536
    #[test]
537
    fn test_mutex_unsized() {
538
        let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
539
        {
540
            let b = &mut *mutex.lock();
541
            b[0] = 4;
542
            b[2] = 5;
543
        }
544
        let comp: &[i32] = &[4, 2, 5];
545
        assert_eq!(&*mutex.lock(), comp);
546
    }
547
548
    #[test]
549
    fn test_mutex_force_lock() {
550
        let lock = SpinMutex::<_>::new(());
551
        ::std::mem::forget(lock.lock());
552
        unsafe {
553
            lock.force_unlock();
554
        }
555
        assert!(lock.try_lock().is_some());
556
    }
557
}