/rust/registry/src/index.crates.io-6f17d22bba15001f/spin-0.9.8/src/rwlock.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! A lock that provides data access to either one writer or many readers. |
2 | | |
3 | | use crate::{ |
4 | | atomic::{AtomicUsize, Ordering}, |
5 | | RelaxStrategy, Spin, |
6 | | }; |
7 | | use core::{ |
8 | | cell::UnsafeCell, |
9 | | fmt, |
10 | | marker::PhantomData, |
11 | | mem, |
12 | | mem::ManuallyDrop, |
13 | | ops::{Deref, DerefMut}, |
14 | | }; |
15 | | |
16 | | /// A lock that provides data access to either one writer or many readers. |
17 | | /// |
18 | | /// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses |
19 | | /// spinning for synchronisation instead. Unlike its namespace, this lock does not |
20 | | /// track lock poisoning. |
21 | | /// |
22 | | /// This type of lock allows a number of readers or at most one writer at any |
23 | | /// point in time. The write portion of this lock typically allows modification |
24 | | /// of the underlying data (exclusive access) and the read portion of this lock |
25 | | /// typically allows for read-only access (shared access). |
26 | | /// |
27 | | /// The type parameter `T` represents the data that this lock protects. It is |
28 | | /// required that `T` satisfies `Send` to be shared across tasks and `Sync` to |
29 | | /// allow concurrent access through readers. The RAII guards returned from the |
30 | | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) |
31 | | /// to allow access to the contained of the lock. |
32 | | /// |
33 | | /// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a |
34 | | /// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) |
35 | | /// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions. |
36 | | /// Writable or upgradeable guards can be downgraded through their respective `downgrade` |
37 | | /// functions. |
38 | | /// |
39 | | /// Based on Facebook's |
40 | | /// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h). |
41 | | /// This implementation is unfair to writers - if the lock always has readers, then no writers will |
42 | | /// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no |
43 | | /// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken |
44 | | /// when there are existing readers. However if the lock is that highly contended and writes are |
45 | | /// crucial then this implementation may be a poor choice. |
46 | | /// |
47 | | /// # Examples |
48 | | /// |
49 | | /// ``` |
50 | | /// use spin; |
51 | | /// |
52 | | /// let lock = spin::RwLock::new(5); |
53 | | /// |
54 | | /// // many reader locks can be held at once |
55 | | /// { |
56 | | /// let r1 = lock.read(); |
57 | | /// let r2 = lock.read(); |
58 | | /// assert_eq!(*r1, 5); |
59 | | /// assert_eq!(*r2, 5); |
60 | | /// } // read locks are dropped at this point |
61 | | /// |
62 | | /// // only one write lock may be held, however |
63 | | /// { |
64 | | /// let mut w = lock.write(); |
65 | | /// *w += 1; |
66 | | /// assert_eq!(*w, 6); |
67 | | /// } // write lock is dropped here |
68 | | /// ``` |
69 | | pub struct RwLock<T: ?Sized, R = Spin> { |
70 | | phantom: PhantomData<R>, |
71 | | lock: AtomicUsize, |
72 | | data: UnsafeCell<T>, |
73 | | } |
74 | | |
75 | | const READER: usize = 1 << 2; |
76 | | const UPGRADED: usize = 1 << 1; |
77 | | const WRITER: usize = 1; |
78 | | |
79 | | /// A guard that provides immutable data access. |
80 | | /// |
81 | | /// When the guard falls out of scope it will decrement the read count, |
82 | | /// potentially releasing the lock. |
83 | | pub struct RwLockReadGuard<'a, T: 'a + ?Sized> { |
84 | | lock: &'a AtomicUsize, |
85 | | data: *const T, |
86 | | } |
87 | | |
88 | | /// A guard that provides mutable data access. |
89 | | /// |
90 | | /// When the guard falls out of scope it will release the lock. |
91 | | pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> { |
92 | | phantom: PhantomData<R>, |
93 | | inner: &'a RwLock<T, R>, |
94 | | data: *mut T, |
95 | | } |
96 | | |
97 | | /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. |
98 | | /// |
99 | | /// No writers or other upgradeable guards can exist while this is in scope. New reader |
100 | | /// creation is prevented (to alleviate writer starvation) but there may be existing readers |
101 | | /// when the lock is acquired. |
102 | | /// |
103 | | /// When the guard falls out of scope it will release the lock. |
104 | | pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> { |
105 | | phantom: PhantomData<R>, |
106 | | inner: &'a RwLock<T, R>, |
107 | | data: *const T, |
108 | | } |
109 | | |
110 | | // Same unsafe impls as `std::sync::RwLock` |
111 | | unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {} |
112 | | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {} |
113 | | |
114 | | unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {} |
115 | | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {} |
116 | | |
117 | | unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {} |
118 | | unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {} |
119 | | |
120 | | unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {} |
121 | | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {} |
122 | | |
123 | | impl<T, R> RwLock<T, R> { |
124 | | /// Creates a new spinlock wrapping the supplied data. |
125 | | /// |
126 | | /// May be used statically: |
127 | | /// |
128 | | /// ``` |
129 | | /// use spin; |
130 | | /// |
131 | | /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(()); |
132 | | /// |
133 | | /// fn demo() { |
134 | | /// let lock = RW_LOCK.read(); |
135 | | /// // do something with lock |
136 | | /// drop(lock); |
137 | | /// } |
138 | | /// ``` |
139 | | #[inline] |
140 | 0 | pub const fn new(data: T) -> Self { |
141 | 0 | RwLock { |
142 | 0 | phantom: PhantomData, |
143 | 0 | lock: AtomicUsize::new(0), |
144 | 0 | data: UnsafeCell::new(data), |
145 | 0 | } |
146 | 0 | } |
147 | | |
148 | | /// Consumes this `RwLock`, returning the underlying data. |
149 | | #[inline] |
150 | 0 | pub fn into_inner(self) -> T { |
151 | 0 | // We know statically that there are no outstanding references to |
152 | 0 | // `self` so there's no need to lock. |
153 | 0 | let RwLock { data, .. } = self; |
154 | 0 | data.into_inner() |
155 | 0 | } |
156 | | /// Returns a mutable pointer to the underying data. |
157 | | /// |
158 | | /// This is mostly meant to be used for applications which require manual unlocking, but where |
159 | | /// storing both the lock and the pointer to the inner data gets inefficient. |
160 | | /// |
161 | | /// While this is safe, writing to the data is undefined behavior unless the current thread has |
162 | | /// acquired a write lock, and reading requires either a read or write lock. |
163 | | /// |
164 | | /// # Example |
165 | | /// ``` |
166 | | /// let lock = spin::RwLock::new(42); |
167 | | /// |
168 | | /// unsafe { |
169 | | /// core::mem::forget(lock.write()); |
170 | | /// |
171 | | /// assert_eq!(lock.as_mut_ptr().read(), 42); |
172 | | /// lock.as_mut_ptr().write(58); |
173 | | /// |
174 | | /// lock.force_write_unlock(); |
175 | | /// } |
176 | | /// |
177 | | /// assert_eq!(*lock.read(), 58); |
178 | | /// |
179 | | /// ``` |
180 | | #[inline(always)] |
181 | 0 | pub fn as_mut_ptr(&self) -> *mut T { |
182 | 0 | self.data.get() |
183 | 0 | } |
184 | | } |
185 | | |
186 | | impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> { |
187 | | /// Locks this rwlock with shared read access, blocking the current thread |
188 | | /// until it can be acquired. |
189 | | /// |
190 | | /// The calling thread will be blocked until there are no more writers which |
191 | | /// hold the lock. There may be other readers currently inside the lock when |
192 | | /// this method returns. This method does not provide any guarantees with |
193 | | /// respect to the ordering of whether contentious readers or writers will |
194 | | /// acquire the lock first. |
195 | | /// |
196 | | /// Returns an RAII guard which will release this thread's shared access |
197 | | /// once it is dropped. |
198 | | /// |
199 | | /// ``` |
200 | | /// let mylock = spin::RwLock::new(0); |
201 | | /// { |
202 | | /// let mut data = mylock.read(); |
203 | | /// // The lock is now locked and the data can be read |
204 | | /// println!("{}", *data); |
205 | | /// // The lock is dropped |
206 | | /// } |
207 | | /// ``` |
208 | | #[inline] |
209 | 0 | pub fn read(&self) -> RwLockReadGuard<T> { |
210 | | loop { |
211 | 0 | match self.try_read() { |
212 | 0 | Some(guard) => return guard, |
213 | 0 | None => R::relax(), |
214 | | } |
215 | | } |
216 | 0 | } |
217 | | |
218 | | /// Lock this rwlock with exclusive write access, blocking the current |
219 | | /// thread until it can be acquired. |
220 | | /// |
221 | | /// This function will not return while other writers or other readers |
222 | | /// currently have access to the lock. |
223 | | /// |
224 | | /// Returns an RAII guard which will drop the write access of this rwlock |
225 | | /// when dropped. |
226 | | /// |
227 | | /// ``` |
228 | | /// let mylock = spin::RwLock::new(0); |
229 | | /// { |
230 | | /// let mut data = mylock.write(); |
231 | | /// // The lock is now locked and the data can be written |
232 | | /// *data += 1; |
233 | | /// // The lock is dropped |
234 | | /// } |
235 | | /// ``` |
236 | | #[inline] |
237 | 0 | pub fn write(&self) -> RwLockWriteGuard<T, R> { |
238 | | loop { |
239 | 0 | match self.try_write_internal(false) { |
240 | 0 | Some(guard) => return guard, |
241 | 0 | None => R::relax(), |
242 | | } |
243 | | } |
244 | 0 | } |
245 | | |
246 | | /// Obtain a readable lock guard that can later be upgraded to a writable lock guard. |
247 | | /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method. |
248 | | #[inline] |
249 | 0 | pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> { |
250 | | loop { |
251 | 0 | match self.try_upgradeable_read() { |
252 | 0 | Some(guard) => return guard, |
253 | 0 | None => R::relax(), |
254 | | } |
255 | | } |
256 | 0 | } |
257 | | } |
258 | | |
259 | | impl<T: ?Sized, R> RwLock<T, R> { |
260 | | // Acquire a read lock, returning the new lock value. |
261 | 0 | fn acquire_reader(&self) -> usize { |
262 | | // An arbitrary cap that allows us to catch overflows long before they happen |
263 | | const MAX_READERS: usize = core::usize::MAX / READER / 2; |
264 | | |
265 | 0 | let value = self.lock.fetch_add(READER, Ordering::Acquire); |
266 | 0 |
|
267 | 0 | if value > MAX_READERS * READER { |
268 | 0 | self.lock.fetch_sub(READER, Ordering::Relaxed); |
269 | 0 | panic!("Too many lock readers, cannot safely proceed"); |
270 | | } else { |
271 | 0 | value |
272 | 0 | } |
273 | 0 | } |
274 | | |
275 | | /// Attempt to acquire this lock with shared read access. |
276 | | /// |
277 | | /// This function will never block and will return immediately if `read` |
278 | | /// would otherwise succeed. Returns `Some` of an RAII guard which will |
279 | | /// release the shared access of this thread when dropped, or `None` if the |
280 | | /// access could not be granted. This method does not provide any |
281 | | /// guarantees with respect to the ordering of whether contentious readers |
282 | | /// or writers will acquire the lock first. |
283 | | /// |
284 | | /// ``` |
285 | | /// let mylock = spin::RwLock::new(0); |
286 | | /// { |
287 | | /// match mylock.try_read() { |
288 | | /// Some(data) => { |
289 | | /// // The lock is now locked and the data can be read |
290 | | /// println!("{}", *data); |
291 | | /// // The lock is dropped |
292 | | /// }, |
293 | | /// None => (), // no cigar |
294 | | /// }; |
295 | | /// } |
296 | | /// ``` |
297 | | #[inline] |
298 | 0 | pub fn try_read(&self) -> Option<RwLockReadGuard<T>> { |
299 | 0 | let value = self.acquire_reader(); |
300 | 0 |
|
301 | 0 | // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held. |
302 | 0 | // This helps reduce writer starvation. |
303 | 0 | if value & (WRITER | UPGRADED) != 0 { |
304 | | // Lock is taken, undo. |
305 | 0 | self.lock.fetch_sub(READER, Ordering::Release); |
306 | 0 | None |
307 | | } else { |
308 | 0 | Some(RwLockReadGuard { |
309 | 0 | lock: &self.lock, |
310 | 0 | data: unsafe { &*self.data.get() }, |
311 | 0 | }) |
312 | | } |
313 | 0 | } |
314 | | |
315 | | /// Return the number of readers that currently hold the lock (including upgradable readers). |
316 | | /// |
317 | | /// # Safety |
318 | | /// |
319 | | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' |
320 | | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. |
321 | 0 | pub fn reader_count(&self) -> usize { |
322 | 0 | let state = self.lock.load(Ordering::Relaxed); |
323 | 0 | state / READER + (state & UPGRADED) / UPGRADED |
324 | 0 | } |
325 | | |
326 | | /// Return the number of writers that currently hold the lock. |
327 | | /// |
328 | | /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`. |
329 | | /// |
330 | | /// # Safety |
331 | | /// |
332 | | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' |
333 | | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. |
334 | 0 | pub fn writer_count(&self) -> usize { |
335 | 0 | (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER |
336 | 0 | } |
337 | | |
338 | | /// Force decrement the reader count. |
339 | | /// |
340 | | /// # Safety |
341 | | /// |
342 | | /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s |
343 | | /// live, or if called more times than `read` has been called, but can be |
344 | | /// useful in FFI contexts where the caller doesn't know how to deal with |
345 | | /// RAII. The underlying atomic operation uses `Ordering::Release`. |
346 | | #[inline] |
347 | 0 | pub unsafe fn force_read_decrement(&self) { |
348 | 0 | debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0); |
349 | 0 | self.lock.fetch_sub(READER, Ordering::Release); |
350 | 0 | } |
351 | | |
352 | | /// Force unlock exclusive write access. |
353 | | /// |
354 | | /// # Safety |
355 | | /// |
356 | | /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s |
357 | | /// live, or if called when there are current readers, but can be useful in |
358 | | /// FFI contexts where the caller doesn't know how to deal with RAII. The |
359 | | /// underlying atomic operation uses `Ordering::Release`. |
360 | | #[inline] |
361 | 0 | pub unsafe fn force_write_unlock(&self) { |
362 | 0 | debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0); |
363 | 0 | self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release); |
364 | 0 | } |
365 | | |
366 | | #[inline(always)] |
367 | 0 | fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> { |
368 | 0 | if compare_exchange( |
369 | 0 | &self.lock, |
370 | 0 | 0, |
371 | 0 | WRITER, |
372 | 0 | Ordering::Acquire, |
373 | 0 | Ordering::Relaxed, |
374 | 0 | strong, |
375 | 0 | ) |
376 | 0 | .is_ok() |
377 | | { |
378 | 0 | Some(RwLockWriteGuard { |
379 | 0 | phantom: PhantomData, |
380 | 0 | inner: self, |
381 | 0 | data: unsafe { &mut *self.data.get() }, |
382 | 0 | }) |
383 | | } else { |
384 | 0 | None |
385 | | } |
386 | 0 | } |
387 | | |
388 | | /// Attempt to lock this rwlock with exclusive write access. |
389 | | /// |
390 | | /// This function does not ever block, and it will return `None` if a call |
391 | | /// to `write` would otherwise block. If successful, an RAII guard is |
392 | | /// returned. |
393 | | /// |
394 | | /// ``` |
395 | | /// let mylock = spin::RwLock::new(0); |
396 | | /// { |
397 | | /// match mylock.try_write() { |
398 | | /// Some(mut data) => { |
399 | | /// // The lock is now locked and the data can be written |
400 | | /// *data += 1; |
401 | | /// // The lock is implicitly dropped |
402 | | /// }, |
403 | | /// None => (), // no cigar |
404 | | /// }; |
405 | | /// } |
406 | | /// ``` |
407 | | #[inline] |
408 | 0 | pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> { |
409 | 0 | self.try_write_internal(true) |
410 | 0 | } |
411 | | |
412 | | /// Tries to obtain an upgradeable lock guard. |
413 | | #[inline] |
414 | 0 | pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> { |
415 | 0 | if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 { |
416 | 0 | Some(RwLockUpgradableGuard { |
417 | 0 | phantom: PhantomData, |
418 | 0 | inner: self, |
419 | 0 | data: unsafe { &*self.data.get() }, |
420 | 0 | }) |
421 | | } else { |
422 | | // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock. |
423 | | // When they unlock, they will clear the bit. |
424 | 0 | None |
425 | | } |
426 | 0 | } |
427 | | |
428 | | /// Returns a mutable reference to the underlying data. |
429 | | /// |
430 | | /// Since this call borrows the `RwLock` mutably, no actual locking needs to |
431 | | /// take place -- the mutable borrow statically guarantees no locks exist. |
432 | | /// |
433 | | /// # Examples |
434 | | /// |
435 | | /// ``` |
436 | | /// let mut lock = spin::RwLock::new(0); |
437 | | /// *lock.get_mut() = 10; |
438 | | /// assert_eq!(*lock.read(), 10); |
439 | | /// ``` |
440 | 0 | pub fn get_mut(&mut self) -> &mut T { |
441 | 0 | // We know statically that there are no other references to `self`, so |
442 | 0 | // there's no need to lock the inner lock. |
443 | 0 | unsafe { &mut *self.data.get() } |
444 | 0 | } |
445 | | } |
446 | | |
447 | | impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> { |
448 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
449 | 0 | match self.try_read() { |
450 | 0 | Some(guard) => write!(f, "RwLock {{ data: ") |
451 | 0 | .and_then(|()| (&*guard).fmt(f)) |
452 | 0 | .and_then(|()| write!(f, "}}")), |
453 | 0 | None => write!(f, "RwLock {{ <locked> }}"), |
454 | | } |
455 | 0 | } |
456 | | } |
457 | | |
458 | | impl<T: ?Sized + Default, R> Default for RwLock<T, R> { |
459 | 0 | fn default() -> Self { |
460 | 0 | Self::new(Default::default()) |
461 | 0 | } |
462 | | } |
463 | | |
464 | | impl<T, R> From<T> for RwLock<T, R> { |
465 | 0 | fn from(data: T) -> Self { |
466 | 0 | Self::new(data) |
467 | 0 | } |
468 | | } |
469 | | |
470 | | impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { |
471 | | /// Leak the lock guard, yielding a reference to the underlying data. |
472 | | /// |
473 | | /// Note that this function will permanently lock the original lock for all but reading locks. |
474 | | /// |
475 | | /// ``` |
476 | | /// let mylock = spin::RwLock::new(0); |
477 | | /// |
478 | | /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read()); |
479 | | /// |
480 | | /// assert_eq!(*data, 0); |
481 | | /// ``` |
482 | | #[inline] |
483 | 0 | pub fn leak(this: Self) -> &'rwlock T { |
484 | 0 | let this = ManuallyDrop::new(this); |
485 | 0 | // Safety: We know statically that only we are referencing data |
486 | 0 | unsafe { &*this.data } |
487 | 0 | } |
488 | | } |
489 | | |
490 | | impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> { |
491 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
492 | 0 | fmt::Debug::fmt(&**self, f) |
493 | 0 | } |
494 | | } |
495 | | |
496 | | impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> { |
497 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
498 | 0 | fmt::Display::fmt(&**self, f) |
499 | 0 | } |
500 | | } |
501 | | |
502 | | impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> { |
503 | | /// Upgrades an upgradeable lock guard to a writable lock guard. |
504 | | /// |
505 | | /// ``` |
506 | | /// let mylock = spin::RwLock::new(0); |
507 | | /// |
508 | | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable |
509 | | /// let writable = upgradeable.upgrade(); |
510 | | /// ``` |
511 | | #[inline] |
512 | 0 | pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> { |
513 | | loop { |
514 | 0 | self = match self.try_upgrade_internal(false) { |
515 | 0 | Ok(guard) => return guard, |
516 | 0 | Err(e) => e, |
517 | 0 | }; |
518 | 0 |
|
519 | 0 | R::relax(); |
520 | | } |
521 | 0 | } |
522 | | } |
523 | | |
524 | | impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { |
525 | | #[inline(always)] |
526 | 0 | fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> { |
527 | 0 | if compare_exchange( |
528 | 0 | &self.inner.lock, |
529 | 0 | UPGRADED, |
530 | 0 | WRITER, |
531 | 0 | Ordering::Acquire, |
532 | 0 | Ordering::Relaxed, |
533 | 0 | strong, |
534 | 0 | ) |
535 | 0 | .is_ok() |
536 | | { |
537 | 0 | let inner = self.inner; |
538 | 0 |
|
539 | 0 | // Forget the old guard so its destructor doesn't run (before mutably aliasing data below) |
540 | 0 | mem::forget(self); |
541 | 0 |
|
542 | 0 | // Upgrade successful |
543 | 0 | Ok(RwLockWriteGuard { |
544 | 0 | phantom: PhantomData, |
545 | 0 | inner, |
546 | 0 | data: unsafe { &mut *inner.data.get() }, |
547 | 0 | }) |
548 | | } else { |
549 | 0 | Err(self) |
550 | | } |
551 | 0 | } |
552 | | |
553 | | /// Tries to upgrade an upgradeable lock guard to a writable lock guard. |
554 | | /// |
555 | | /// ``` |
556 | | /// let mylock = spin::RwLock::new(0); |
557 | | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable |
558 | | /// |
559 | | /// match upgradeable.try_upgrade() { |
560 | | /// Ok(writable) => /* upgrade successful - use writable lock guard */ (), |
561 | | /// Err(upgradeable) => /* upgrade unsuccessful */ (), |
562 | | /// }; |
563 | | /// ``` |
564 | | #[inline] |
565 | 0 | pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> { |
566 | 0 | self.try_upgrade_internal(true) |
567 | 0 | } |
568 | | |
569 | | #[inline] |
570 | | /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. |
571 | | /// |
572 | | /// ``` |
573 | | /// let mylock = spin::RwLock::new(1); |
574 | | /// |
575 | | /// let upgradeable = mylock.upgradeable_read(); |
576 | | /// assert!(mylock.try_read().is_none()); |
577 | | /// assert_eq!(*upgradeable, 1); |
578 | | /// |
579 | | /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin |
580 | | /// assert!(mylock.try_read().is_some()); |
581 | | /// assert_eq!(*readable, 1); |
582 | | /// ``` |
583 | 0 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { |
584 | 0 | // Reserve the read guard for ourselves |
585 | 0 | self.inner.acquire_reader(); |
586 | 0 |
|
587 | 0 | let inner = self.inner; |
588 | 0 |
|
589 | 0 | // Dropping self removes the UPGRADED bit |
590 | 0 | mem::drop(self); |
591 | 0 |
|
592 | 0 | RwLockReadGuard { |
593 | 0 | lock: &inner.lock, |
594 | 0 | data: unsafe { &*inner.data.get() }, |
595 | 0 | } |
596 | 0 | } |
597 | | |
598 | | /// Leak the lock guard, yielding a reference to the underlying data. |
599 | | /// |
600 | | /// Note that this function will permanently lock the original lock. |
601 | | /// |
602 | | /// ``` |
603 | | /// let mylock = spin::RwLock::new(0); |
604 | | /// |
605 | | /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read()); |
606 | | /// |
607 | | /// assert_eq!(*data, 0); |
608 | | /// ``` |
609 | | #[inline] |
610 | 0 | pub fn leak(this: Self) -> &'rwlock T { |
611 | 0 | let this = ManuallyDrop::new(this); |
612 | 0 | // Safety: We know statically that only we are referencing data |
613 | 0 | unsafe { &*this.data } |
614 | 0 | } |
615 | | } |
616 | | |
617 | | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> { |
618 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
619 | 0 | fmt::Debug::fmt(&**self, f) |
620 | 0 | } |
621 | | } |
622 | | |
623 | | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> { |
624 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
625 | 0 | fmt::Display::fmt(&**self, f) |
626 | 0 | } |
627 | | } |
628 | | |
629 | | impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { |
630 | | /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. |
631 | | /// |
632 | | /// ``` |
633 | | /// let mylock = spin::RwLock::new(0); |
634 | | /// |
635 | | /// let mut writable = mylock.write(); |
636 | | /// *writable = 1; |
637 | | /// |
638 | | /// let readable = writable.downgrade(); // This is guaranteed not to spin |
639 | | /// # let readable_2 = mylock.try_read().unwrap(); |
640 | | /// assert_eq!(*readable, 1); |
641 | | /// ``` |
642 | | #[inline] |
643 | 0 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { |
644 | 0 | // Reserve the read guard for ourselves |
645 | 0 | self.inner.acquire_reader(); |
646 | 0 |
|
647 | 0 | let inner = self.inner; |
648 | 0 |
|
649 | 0 | // Dropping self removes the UPGRADED bit |
650 | 0 | mem::drop(self); |
651 | 0 |
|
652 | 0 | RwLockReadGuard { |
653 | 0 | lock: &inner.lock, |
654 | 0 | data: unsafe { &*inner.data.get() }, |
655 | 0 | } |
656 | 0 | } |
657 | | |
658 | | /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin. |
659 | | /// |
660 | | /// ``` |
661 | | /// let mylock = spin::RwLock::new(0); |
662 | | /// |
663 | | /// let mut writable = mylock.write(); |
664 | | /// *writable = 1; |
665 | | /// |
666 | | /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin |
667 | | /// assert_eq!(*readable, 1); |
668 | | /// ``` |
669 | | #[inline] |
670 | 0 | pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> { |
671 | 0 | debug_assert_eq!( |
672 | 0 | self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), |
673 | | WRITER |
674 | | ); |
675 | | |
676 | | // Reserve the read guard for ourselves |
677 | 0 | self.inner.lock.store(UPGRADED, Ordering::Release); |
678 | 0 |
|
679 | 0 | let inner = self.inner; |
680 | 0 |
|
681 | 0 | // Dropping self removes the UPGRADED bit |
682 | 0 | mem::forget(self); |
683 | 0 |
|
684 | 0 | RwLockUpgradableGuard { |
685 | 0 | phantom: PhantomData, |
686 | 0 | inner, |
687 | 0 | data: unsafe { &*inner.data.get() }, |
688 | 0 | } |
689 | 0 | } |
690 | | |
691 | | /// Leak the lock guard, yielding a mutable reference to the underlying data. |
692 | | /// |
693 | | /// Note that this function will permanently lock the original lock. |
694 | | /// |
695 | | /// ``` |
696 | | /// let mylock = spin::RwLock::new(0); |
697 | | /// |
698 | | /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write()); |
699 | | /// |
700 | | /// *data = 1; |
701 | | /// assert_eq!(*data, 1); |
702 | | /// ``` |
703 | | #[inline] |
704 | 0 | pub fn leak(this: Self) -> &'rwlock mut T { |
705 | 0 | let mut this = ManuallyDrop::new(this); |
706 | 0 | // Safety: We know statically that only we are referencing data |
707 | 0 | unsafe { &mut *this.data } |
708 | 0 | } |
709 | | } |
710 | | |
711 | | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> { |
712 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
713 | 0 | fmt::Debug::fmt(&**self, f) |
714 | 0 | } |
715 | | } |
716 | | |
717 | | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> { |
718 | 0 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
719 | 0 | fmt::Display::fmt(&**self, f) |
720 | 0 | } |
721 | | } |
722 | | |
723 | | impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { |
724 | | type Target = T; |
725 | | |
726 | 0 | fn deref(&self) -> &T { |
727 | 0 | // Safety: We know statically that only we are referencing data |
728 | 0 | unsafe { &*self.data } |
729 | 0 | } |
730 | | } |
731 | | |
732 | | impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> { |
733 | | type Target = T; |
734 | | |
735 | 0 | fn deref(&self) -> &T { |
736 | 0 | // Safety: We know statically that only we are referencing data |
737 | 0 | unsafe { &*self.data } |
738 | 0 | } |
739 | | } |
740 | | |
741 | | impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> { |
742 | | type Target = T; |
743 | | |
744 | 0 | fn deref(&self) -> &T { |
745 | 0 | // Safety: We know statically that only we are referencing data |
746 | 0 | unsafe { &*self.data } |
747 | 0 | } |
748 | | } |
749 | | |
750 | | impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> { |
751 | 0 | fn deref_mut(&mut self) -> &mut T { |
752 | 0 | // Safety: We know statically that only we are referencing data |
753 | 0 | unsafe { &mut *self.data } |
754 | 0 | } |
755 | | } |
756 | | |
757 | | impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> { |
758 | 0 | fn drop(&mut self) { |
759 | 0 | debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0); |
760 | 0 | self.lock.fetch_sub(READER, Ordering::Release); |
761 | 0 | } |
762 | | } |
763 | | |
764 | | impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> { |
765 | 0 | fn drop(&mut self) { |
766 | 0 | debug_assert_eq!( |
767 | 0 | self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED), |
768 | | UPGRADED |
769 | | ); |
770 | 0 | self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel); |
771 | 0 | } |
772 | | } |
773 | | |
774 | | impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> { |
775 | 0 | fn drop(&mut self) { |
776 | 0 | debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER); |
777 | | |
778 | | // Writer is responsible for clearing both WRITER and UPGRADED bits. |
779 | | // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held. |
780 | 0 | self.inner |
781 | 0 | .lock |
782 | 0 | .fetch_and(!(WRITER | UPGRADED), Ordering::Release); |
783 | 0 | } |
784 | | } |
785 | | |
786 | | #[inline(always)] |
787 | 0 | fn compare_exchange( |
788 | 0 | atomic: &AtomicUsize, |
789 | 0 | current: usize, |
790 | 0 | new: usize, |
791 | 0 | success: Ordering, |
792 | 0 | failure: Ordering, |
793 | 0 | strong: bool, |
794 | 0 | ) -> Result<usize, usize> { |
795 | 0 | if strong { |
796 | 0 | atomic.compare_exchange(current, new, success, failure) |
797 | | } else { |
798 | 0 | atomic.compare_exchange_weak(current, new, success, failure) |
799 | | } |
800 | 0 | } |
801 | | |
802 | | #[cfg(feature = "lock_api")] |
803 | | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> { |
804 | | type GuardMarker = lock_api_crate::GuardSend; |
805 | | |
806 | | const INIT: Self = Self::new(()); |
807 | | |
808 | | #[inline(always)] |
809 | 0 | fn lock_exclusive(&self) { |
810 | 0 | // Prevent guard destructor running |
811 | 0 | core::mem::forget(self.write()); |
812 | 0 | } |
813 | | |
814 | | #[inline(always)] |
815 | 0 | fn try_lock_exclusive(&self) -> bool { |
816 | 0 | // Prevent guard destructor running |
817 | 0 | self.try_write().map(|g| core::mem::forget(g)).is_some() |
818 | 0 | } |
819 | | |
820 | | #[inline(always)] |
821 | 0 | unsafe fn unlock_exclusive(&self) { |
822 | 0 | drop(RwLockWriteGuard { |
823 | 0 | inner: self, |
824 | 0 | data: &mut (), |
825 | 0 | phantom: PhantomData, |
826 | 0 | }); |
827 | 0 | } |
828 | | |
829 | | #[inline(always)] |
830 | 0 | fn lock_shared(&self) { |
831 | 0 | // Prevent guard destructor running |
832 | 0 | core::mem::forget(self.read()); |
833 | 0 | } |
834 | | |
835 | | #[inline(always)] |
836 | 0 | fn try_lock_shared(&self) -> bool { |
837 | 0 | // Prevent guard destructor running |
838 | 0 | self.try_read().map(|g| core::mem::forget(g)).is_some() |
839 | 0 | } |
840 | | |
841 | | #[inline(always)] |
842 | 0 | unsafe fn unlock_shared(&self) { |
843 | 0 | drop(RwLockReadGuard { |
844 | 0 | lock: &self.lock, |
845 | 0 | data: &(), |
846 | 0 | }); |
847 | 0 | } |
848 | | |
849 | | #[inline(always)] |
850 | 0 | fn is_locked(&self) -> bool { |
851 | 0 | self.lock.load(Ordering::Relaxed) != 0 |
852 | 0 | } |
853 | | } |
854 | | |
855 | | #[cfg(feature = "lock_api")] |
856 | | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> { |
857 | | #[inline(always)] |
858 | 0 | fn lock_upgradable(&self) { |
859 | 0 | // Prevent guard destructor running |
860 | 0 | core::mem::forget(self.upgradeable_read()); |
861 | 0 | } |
862 | | |
863 | | #[inline(always)] |
864 | 0 | fn try_lock_upgradable(&self) -> bool { |
865 | 0 | // Prevent guard destructor running |
866 | 0 | self.try_upgradeable_read() |
867 | 0 | .map(|g| core::mem::forget(g)) |
868 | 0 | .is_some() |
869 | 0 | } |
870 | | |
871 | | #[inline(always)] |
872 | 0 | unsafe fn unlock_upgradable(&self) { |
873 | 0 | drop(RwLockUpgradableGuard { |
874 | 0 | inner: self, |
875 | 0 | data: &(), |
876 | 0 | phantom: PhantomData, |
877 | 0 | }); |
878 | 0 | } |
879 | | |
880 | | #[inline(always)] |
881 | 0 | unsafe fn upgrade(&self) { |
882 | 0 | let tmp_guard = RwLockUpgradableGuard { |
883 | 0 | inner: self, |
884 | 0 | data: &(), |
885 | 0 | phantom: PhantomData, |
886 | 0 | }; |
887 | 0 | core::mem::forget(tmp_guard.upgrade()); |
888 | 0 | } |
889 | | |
890 | | #[inline(always)] |
891 | 0 | unsafe fn try_upgrade(&self) -> bool { |
892 | 0 | let tmp_guard = RwLockUpgradableGuard { |
893 | 0 | inner: self, |
894 | 0 | data: &(), |
895 | 0 | phantom: PhantomData, |
896 | 0 | }; |
897 | 0 | tmp_guard |
898 | 0 | .try_upgrade() |
899 | 0 | .map(|g| core::mem::forget(g)) |
900 | 0 | .is_ok() |
901 | 0 | } |
902 | | } |
903 | | |
904 | | #[cfg(feature = "lock_api")] |
905 | | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> { |
906 | 0 | unsafe fn downgrade(&self) { |
907 | 0 | let tmp_guard = RwLockWriteGuard { |
908 | 0 | inner: self, |
909 | 0 | data: &mut (), |
910 | 0 | phantom: PhantomData, |
911 | 0 | }; |
912 | 0 | core::mem::forget(tmp_guard.downgrade()); |
913 | 0 | } |
914 | | } |
915 | | |
916 | | #[cfg(feature = "lock_api1")] |
917 | | unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> { |
918 | | unsafe fn downgrade_upgradable(&self) { |
919 | | let tmp_guard = RwLockUpgradableGuard { |
920 | | inner: self, |
921 | | data: &(), |
922 | | phantom: PhantomData, |
923 | | }; |
924 | | core::mem::forget(tmp_guard.downgrade()); |
925 | | } |
926 | | |
927 | | unsafe fn downgrade_to_upgradable(&self) { |
928 | | let tmp_guard = RwLockWriteGuard { |
929 | | inner: self, |
930 | | data: &mut (), |
931 | | phantom: PhantomData, |
932 | | }; |
933 | | core::mem::forget(tmp_guard.downgrade_to_upgradeable()); |
934 | | } |
935 | | } |
936 | | |
937 | | #[cfg(test)] |
938 | | mod tests { |
939 | | use std::prelude::v1::*; |
940 | | |
941 | | use std::sync::atomic::{AtomicUsize, Ordering}; |
942 | | use std::sync::mpsc::channel; |
943 | | use std::sync::Arc; |
944 | | use std::thread; |
945 | | |
946 | | type RwLock<T> = super::RwLock<T>; |
947 | | |
948 | | #[derive(Eq, PartialEq, Debug)] |
949 | | struct NonCopy(i32); |
950 | | |
951 | | #[test] |
952 | | fn smoke() { |
953 | | let l = RwLock::new(()); |
954 | | drop(l.read()); |
955 | | drop(l.write()); |
956 | | drop((l.read(), l.read())); |
957 | | drop(l.write()); |
958 | | } |
959 | | |
960 | | // TODO: needs RNG |
961 | | //#[test] |
962 | | //fn frob() { |
963 | | // static R: RwLock = RwLock::new(); |
964 | | // const N: usize = 10; |
965 | | // const M: usize = 1000; |
966 | | // |
967 | | // let (tx, rx) = channel::<()>(); |
968 | | // for _ in 0..N { |
969 | | // let tx = tx.clone(); |
970 | | // thread::spawn(move|| { |
971 | | // let mut rng = rand::thread_rng(); |
972 | | // for _ in 0..M { |
973 | | // if rng.gen_weighted_bool(N) { |
974 | | // drop(R.write()); |
975 | | // } else { |
976 | | // drop(R.read()); |
977 | | // } |
978 | | // } |
979 | | // drop(tx); |
980 | | // }); |
981 | | // } |
982 | | // drop(tx); |
983 | | // let _ = rx.recv(); |
984 | | // unsafe { R.destroy(); } |
985 | | //} |
986 | | |
987 | | #[test] |
988 | | fn test_rw_arc() { |
989 | | let arc = Arc::new(RwLock::new(0)); |
990 | | let arc2 = arc.clone(); |
991 | | let (tx, rx) = channel(); |
992 | | |
993 | | let t = thread::spawn(move || { |
994 | | let mut lock = arc2.write(); |
995 | | for _ in 0..10 { |
996 | | let tmp = *lock; |
997 | | *lock = -1; |
998 | | thread::yield_now(); |
999 | | *lock = tmp + 1; |
1000 | | } |
1001 | | tx.send(()).unwrap(); |
1002 | | }); |
1003 | | |
1004 | | // Readers try to catch the writer in the act |
1005 | | let mut children = Vec::new(); |
1006 | | for _ in 0..5 { |
1007 | | let arc3 = arc.clone(); |
1008 | | children.push(thread::spawn(move || { |
1009 | | let lock = arc3.read(); |
1010 | | assert!(*lock >= 0); |
1011 | | })); |
1012 | | } |
1013 | | |
1014 | | // Wait for children to pass their asserts |
1015 | | for r in children { |
1016 | | assert!(r.join().is_ok()); |
1017 | | } |
1018 | | |
1019 | | // Wait for writer to finish |
1020 | | rx.recv().unwrap(); |
1021 | | let lock = arc.read(); |
1022 | | assert_eq!(*lock, 10); |
1023 | | |
1024 | | assert!(t.join().is_ok()); |
1025 | | } |
1026 | | |
1027 | | #[test] |
1028 | | fn test_rw_access_in_unwind() { |
1029 | | let arc = Arc::new(RwLock::new(1)); |
1030 | | let arc2 = arc.clone(); |
1031 | | let _ = thread::spawn(move || -> () { |
1032 | | struct Unwinder { |
1033 | | i: Arc<RwLock<isize>>, |
1034 | | } |
1035 | | impl Drop for Unwinder { |
1036 | | fn drop(&mut self) { |
1037 | | let mut lock = self.i.write(); |
1038 | | *lock += 1; |
1039 | | } |
1040 | | } |
1041 | | let _u = Unwinder { i: arc2 }; |
1042 | | panic!(); |
1043 | | }) |
1044 | | .join(); |
1045 | | let lock = arc.read(); |
1046 | | assert_eq!(*lock, 2); |
1047 | | } |
1048 | | |
1049 | | #[test] |
1050 | | fn test_rwlock_unsized() { |
1051 | | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); |
1052 | | { |
1053 | | let b = &mut *rw.write(); |
1054 | | b[0] = 4; |
1055 | | b[2] = 5; |
1056 | | } |
1057 | | let comp: &[i32] = &[4, 2, 5]; |
1058 | | assert_eq!(&*rw.read(), comp); |
1059 | | } |
1060 | | |
1061 | | #[test] |
1062 | | fn test_rwlock_try_write() { |
1063 | | use std::mem::drop; |
1064 | | |
1065 | | let lock = RwLock::new(0isize); |
1066 | | let read_guard = lock.read(); |
1067 | | |
1068 | | let write_result = lock.try_write(); |
1069 | | match write_result { |
1070 | | None => (), |
1071 | | Some(_) => assert!( |
1072 | | false, |
1073 | | "try_write should not succeed while read_guard is in scope" |
1074 | | ), |
1075 | | } |
1076 | | |
1077 | | drop(read_guard); |
1078 | | } |
1079 | | |
1080 | | #[test] |
1081 | | fn test_rw_try_read() { |
1082 | | let m = RwLock::new(0); |
1083 | | ::std::mem::forget(m.write()); |
1084 | | assert!(m.try_read().is_none()); |
1085 | | } |
1086 | | |
1087 | | #[test] |
1088 | | fn test_into_inner() { |
1089 | | let m = RwLock::new(NonCopy(10)); |
1090 | | assert_eq!(m.into_inner(), NonCopy(10)); |
1091 | | } |
1092 | | |
1093 | | #[test] |
1094 | | fn test_into_inner_drop() { |
1095 | | struct Foo(Arc<AtomicUsize>); |
1096 | | impl Drop for Foo { |
1097 | | fn drop(&mut self) { |
1098 | | self.0.fetch_add(1, Ordering::SeqCst); |
1099 | | } |
1100 | | } |
1101 | | let num_drops = Arc::new(AtomicUsize::new(0)); |
1102 | | let m = RwLock::new(Foo(num_drops.clone())); |
1103 | | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
1104 | | { |
1105 | | let _inner = m.into_inner(); |
1106 | | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
1107 | | } |
1108 | | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
1109 | | } |
1110 | | |
1111 | | #[test] |
1112 | | fn test_force_read_decrement() { |
1113 | | let m = RwLock::new(()); |
1114 | | ::std::mem::forget(m.read()); |
1115 | | ::std::mem::forget(m.read()); |
1116 | | ::std::mem::forget(m.read()); |
1117 | | assert!(m.try_write().is_none()); |
1118 | | unsafe { |
1119 | | m.force_read_decrement(); |
1120 | | m.force_read_decrement(); |
1121 | | } |
1122 | | assert!(m.try_write().is_none()); |
1123 | | unsafe { |
1124 | | m.force_read_decrement(); |
1125 | | } |
1126 | | assert!(m.try_write().is_some()); |
1127 | | } |
1128 | | |
1129 | | #[test] |
1130 | | fn test_force_write_unlock() { |
1131 | | let m = RwLock::new(()); |
1132 | | ::std::mem::forget(m.write()); |
1133 | | assert!(m.try_read().is_none()); |
1134 | | unsafe { |
1135 | | m.force_write_unlock(); |
1136 | | } |
1137 | | assert!(m.try_read().is_some()); |
1138 | | } |
1139 | | |
1140 | | #[test] |
1141 | | fn test_upgrade_downgrade() { |
1142 | | let m = RwLock::new(()); |
1143 | | { |
1144 | | let _r = m.read(); |
1145 | | let upg = m.try_upgradeable_read().unwrap(); |
1146 | | assert!(m.try_read().is_none()); |
1147 | | assert!(m.try_write().is_none()); |
1148 | | assert!(upg.try_upgrade().is_err()); |
1149 | | } |
1150 | | { |
1151 | | let w = m.write(); |
1152 | | assert!(m.try_upgradeable_read().is_none()); |
1153 | | let _r = w.downgrade(); |
1154 | | assert!(m.try_upgradeable_read().is_some()); |
1155 | | assert!(m.try_read().is_some()); |
1156 | | assert!(m.try_write().is_none()); |
1157 | | } |
1158 | | { |
1159 | | let _u = m.upgradeable_read(); |
1160 | | assert!(m.try_upgradeable_read().is_none()); |
1161 | | } |
1162 | | |
1163 | | assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok()); |
1164 | | } |
1165 | | } |