/rust/registry/src/index.crates.io-6f17d22bba15001f/spin-0.10.0/src/mutex.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! Locks that have the same behaviour as a mutex. |
2 | | //! |
3 | | //! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature. |
4 | | //! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`] |
5 | | //! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported. |
6 | | //! |
7 | | //! `ticket_mutex` is disabled by default. |
8 | | //! |
9 | | //! [`Mutex`]: ./struct.Mutex.html |
10 | | //! [`MutexGuard`]: ./struct.MutexGuard.html |
11 | | //! [`TicketMutex`]: ./ticket/struct.TicketMutex.html |
12 | | //! [`TicketMutexGuard`]: ./ticket/struct.TicketMutexGuard.html |
13 | | //! [`SpinMutex`]: ./spin/struct.SpinMutex.html |
14 | | //! [`SpinMutexGuard`]: ./spin/struct.SpinMutexGuard.html |
15 | | |
16 | | #[cfg(feature = "spin_mutex")] |
17 | | #[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] |
18 | | pub mod spin; |
19 | | #[cfg(feature = "spin_mutex")] |
20 | | #[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] |
21 | | pub use self::spin::{SpinMutex, SpinMutexGuard}; |
22 | | |
23 | | #[cfg(feature = "ticket_mutex")] |
24 | | #[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))] |
25 | | pub mod ticket; |
26 | | #[cfg(feature = "ticket_mutex")] |
27 | | #[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))] |
28 | | pub use self::ticket::{TicketMutex, TicketMutexGuard}; |
29 | | |
30 | | #[cfg(feature = "fair_mutex")] |
31 | | #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] |
32 | | pub mod fair; |
33 | | #[cfg(feature = "fair_mutex")] |
34 | | #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] |
35 | | pub use self::fair::{FairMutex, FairMutexGuard, Starvation}; |
36 | | |
37 | | use crate::{RelaxStrategy, Spin}; |
38 | | use core::{ |
39 | | fmt, |
40 | | ops::{Deref, DerefMut}, |
41 | | }; |
42 | | |
43 | | #[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))] |
44 | | compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required."); |
45 | | |
46 | | #[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] |
47 | | type InnerMutex<T, R> = self::spin::SpinMutex<T, R>; |
48 | | #[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] |
49 | | type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>; |
50 | | |
51 | | #[cfg(feature = "use_ticket_mutex")] |
52 | | type InnerMutex<T, R> = self::ticket::TicketMutex<T, R>; |
53 | | #[cfg(feature = "use_ticket_mutex")] |
54 | | type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>; |
55 | | |
56 | | /// A spin-based lock providing mutually exclusive access to data. |
57 | | /// |
58 | | /// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or |
59 | | /// `ticket_mutex` feature flag is enabled. |
60 | | /// |
61 | | /// # Example |
62 | | /// |
63 | | /// ``` |
64 | | /// use spin; |
65 | | /// |
66 | | /// let lock = spin::Mutex::new(0); |
67 | | /// |
68 | | /// // Modify the data |
69 | | /// *lock.lock() = 2; |
70 | | /// |
71 | | /// // Read the data |
72 | | /// let answer = *lock.lock(); |
73 | | /// assert_eq!(answer, 2); |
74 | | /// ``` |
75 | | /// |
76 | | /// # Thread safety example |
77 | | /// |
78 | | /// ``` |
79 | | /// use spin; |
80 | | /// use std::sync::{Arc, Barrier}; |
81 | | /// |
82 | | /// let thread_count = 1000; |
83 | | /// let spin_mutex = Arc::new(spin::Mutex::new(0)); |
84 | | /// |
85 | | /// // We use a barrier to ensure the readout happens after all writing |
86 | | /// let barrier = Arc::new(Barrier::new(thread_count + 1)); |
87 | | /// |
88 | | /// # let mut ts = Vec::new(); |
89 | | /// for _ in 0..thread_count { |
90 | | /// let my_barrier = barrier.clone(); |
91 | | /// let my_lock = spin_mutex.clone(); |
92 | | /// # let t = |
93 | | /// std::thread::spawn(move || { |
94 | | /// let mut guard = my_lock.lock(); |
95 | | /// *guard += 1; |
96 | | /// |
97 | | /// // Release the lock to prevent a deadlock |
98 | | /// drop(guard); |
99 | | /// my_barrier.wait(); |
100 | | /// }); |
101 | | /// # ts.push(t); |
102 | | /// } |
103 | | /// |
104 | | /// barrier.wait(); |
105 | | /// |
106 | | /// let answer = { *spin_mutex.lock() }; |
107 | | /// assert_eq!(answer, thread_count); |
108 | | /// |
109 | | /// # for t in ts { |
110 | | /// # t.join().unwrap(); |
111 | | /// # } |
112 | | /// ``` |
113 | | pub struct Mutex<T: ?Sized, R = Spin> { |
114 | | inner: InnerMutex<T, R>, |
115 | | } |
116 | | |
117 | | unsafe impl<T: ?Sized + Send, R> Sync for Mutex<T, R> {} |
118 | | unsafe impl<T: ?Sized + Send, R> Send for Mutex<T, R> {} |
119 | | |
120 | | /// A generic guard that will protect some data access and |
121 | | /// uses either a ticket lock or a normal spin mutex. |
122 | | /// |
123 | | /// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`]. |
124 | | /// |
125 | | /// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html |
126 | | /// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html |
127 | | pub struct MutexGuard<'a, T: 'a + ?Sized> { |
128 | | inner: InnerMutexGuard<'a, T>, |
129 | | } |
130 | | |
131 | | impl<T, R> Mutex<T, R> { |
132 | | /// Creates a new [`Mutex`] wrapping the supplied data. |
133 | | /// |
134 | | /// # Example |
135 | | /// |
136 | | /// ``` |
137 | | /// use spin::Mutex; |
138 | | /// |
139 | | /// static MUTEX: Mutex<()> = Mutex::new(()); |
140 | | /// |
141 | | /// fn demo() { |
142 | | /// let lock = MUTEX.lock(); |
143 | | /// // do something with lock |
144 | | /// drop(lock); |
145 | | /// } |
146 | | /// ``` |
147 | | #[inline(always)] |
148 | 0 | pub const fn new(value: T) -> Self { |
149 | 0 | Self { |
150 | 0 | inner: InnerMutex::new(value), |
151 | 0 | } |
152 | 0 | } |
153 | | |
154 | | /// Consumes this [`Mutex`] and unwraps the underlying data. |
155 | | /// |
156 | | /// # Example |
157 | | /// |
158 | | /// ``` |
159 | | /// let lock = spin::Mutex::new(42); |
160 | | /// assert_eq!(42, lock.into_inner()); |
161 | | /// ``` |
162 | | #[inline(always)] |
163 | 0 | pub fn into_inner(self) -> T { |
164 | 0 | self.inner.into_inner() |
165 | 0 | } |
166 | | } |
167 | | |
168 | | impl<T: ?Sized, R: RelaxStrategy> Mutex<T, R> { |
169 | | /// Locks the [`Mutex`] and returns a guard that permits access to the inner data. |
170 | | /// |
171 | | /// The returned value may be dereferenced for data access |
172 | | /// and the lock will be dropped when the guard falls out of scope. |
173 | | /// |
174 | | /// ``` |
175 | | /// let lock = spin::Mutex::new(0); |
176 | | /// { |
177 | | /// let mut data = lock.lock(); |
178 | | /// // The lock is now locked and the data can be accessed |
179 | | /// *data += 1; |
180 | | /// // The lock is implicitly dropped at the end of the scope |
181 | | /// } |
182 | | /// ``` |
183 | | #[inline(always)] |
184 | 0 | pub fn lock(&self) -> MutexGuard<T> { |
185 | 0 | MutexGuard { |
186 | 0 | inner: self.inner.lock(), |
187 | 0 | } |
188 | 0 | } |
189 | | } |
190 | | |
191 | | impl<T: ?Sized, R> Mutex<T, R> { |
192 | | /// Returns `true` if the lock is currently held. |
193 | | /// |
194 | | /// # Safety |
195 | | /// |
196 | | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' |
197 | | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. |
198 | | #[inline(always)] |
199 | 0 | pub fn is_locked(&self) -> bool { |
200 | 0 | self.inner.is_locked() |
201 | 0 | } |
202 | | |
203 | | /// Force unlock this [`Mutex`]. |
204 | | /// |
205 | | /// # Safety |
206 | | /// |
207 | | /// This is *extremely* unsafe if the lock is not held by the current |
208 | | /// thread. However, this can be useful in some instances for exposing the |
209 | | /// lock to FFI that doesn't know how to deal with RAII. |
210 | | #[inline(always)] |
211 | 0 | pub unsafe fn force_unlock(&self) { |
212 | 0 | self.inner.force_unlock() |
213 | 0 | } |
214 | | |
215 | | /// Try to lock this [`Mutex`], returning a lock guard if successful. |
216 | | /// |
217 | | /// # Example |
218 | | /// |
219 | | /// ``` |
220 | | /// let lock = spin::Mutex::new(42); |
221 | | /// |
222 | | /// let maybe_guard = lock.try_lock(); |
223 | | /// assert!(maybe_guard.is_some()); |
224 | | /// |
225 | | /// // `maybe_guard` is still held, so the second call fails |
226 | | /// let maybe_guard2 = lock.try_lock(); |
227 | | /// assert!(maybe_guard2.is_none()); |
228 | | /// ``` |
229 | | #[inline(always)] |
230 | 0 | pub fn try_lock(&self) -> Option<MutexGuard<T>> { |
231 | 0 | self.inner |
232 | 0 | .try_lock() |
233 | 0 | .map(|guard| MutexGuard { inner: guard }) |
234 | 0 | } |
235 | | |
236 | | /// Returns a mutable reference to the underlying data. |
237 | | /// |
238 | | /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust, |
239 | | /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such, |
240 | | /// this is a 'zero-cost' operation. |
241 | | /// |
242 | | /// # Example |
243 | | /// |
244 | | /// ``` |
245 | | /// let mut lock = spin::Mutex::new(0); |
246 | | /// *lock.get_mut() = 10; |
247 | | /// assert_eq!(*lock.lock(), 10); |
248 | | /// ``` |
249 | | #[inline(always)] |
250 | 0 | pub fn get_mut(&mut self) -> &mut T { |
251 | 0 | self.inner.get_mut() |
252 | 0 | } |
253 | | } |
254 | | |
255 | | impl<T: ?Sized + fmt::Debug, R> fmt::Debug for Mutex<T, R> { |
256 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
257 | 0 | fmt::Debug::fmt(&self.inner, f) |
258 | 0 | } |
259 | | } |
260 | | |
261 | | impl<T: ?Sized + Default, R> Default for Mutex<T, R> { |
262 | 0 | fn default() -> Self { |
263 | 0 | Self::new(Default::default()) |
264 | 0 | } |
265 | | } |
266 | | |
267 | | impl<T, R> From<T> for Mutex<T, R> { |
268 | 0 | fn from(data: T) -> Self { |
269 | 0 | Self::new(data) |
270 | 0 | } |
271 | | } |
272 | | |
273 | | impl<'a, T: ?Sized> MutexGuard<'a, T> { |
274 | | /// Leak the lock guard, yielding a mutable reference to the underlying data. |
275 | | /// |
276 | | /// Note that this function will permanently lock the original [`Mutex`]. |
277 | | /// |
278 | | /// ``` |
279 | | /// let mylock = spin::Mutex::new(0); |
280 | | /// |
281 | | /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock()); |
282 | | /// |
283 | | /// *data = 1; |
284 | | /// assert_eq!(*data, 1); |
285 | | /// ``` |
286 | | #[inline(always)] |
287 | 0 | pub fn leak(this: Self) -> &'a mut T { |
288 | 0 | InnerMutexGuard::leak(this.inner) |
289 | 0 | } |
290 | | } |
291 | | |
292 | | impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> { |
293 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
294 | 0 | fmt::Debug::fmt(&**self, f) |
295 | 0 | } |
296 | | } |
297 | | |
298 | | impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { |
299 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
300 | 0 | fmt::Display::fmt(&**self, f) |
301 | 0 | } |
302 | | } |
303 | | |
304 | | impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { |
305 | | type Target = T; |
306 | 0 | fn deref(&self) -> &T { |
307 | 0 | &*self.inner |
308 | 0 | } |
309 | | } |
310 | | |
311 | | impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { |
312 | 0 | fn deref_mut(&mut self) -> &mut T { |
313 | 0 | &mut *self.inner |
314 | 0 | } |
315 | | } |
316 | | |
317 | | #[cfg(feature = "lock_api")] |
318 | | unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for Mutex<(), R> { |
319 | | type GuardMarker = lock_api_crate::GuardSend; |
320 | | |
321 | | const INIT: Self = Self::new(()); |
322 | | |
323 | 0 | fn lock(&self) { |
324 | 0 | // Prevent guard destructor running |
325 | 0 | core::mem::forget(Self::lock(self)); |
326 | 0 | } |
327 | | |
328 | 0 | fn try_lock(&self) -> bool { |
329 | 0 | // Prevent guard destructor running |
330 | 0 | Self::try_lock(self).map(core::mem::forget).is_some() |
331 | 0 | } |
332 | | |
333 | 0 | unsafe fn unlock(&self) { |
334 | 0 | self.force_unlock(); |
335 | 0 | } |
336 | | |
337 | 0 | fn is_locked(&self) -> bool { |
338 | 0 | self.inner.is_locked() |
339 | 0 | } |
340 | | } |