/rust/registry/src/index.crates.io-6f17d22bba15001f/parking_lot-0.12.4/src/rwlock.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2016 Amanieu d'Antras |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | | // copied, modified, or distributed except according to those terms. |
7 | | |
8 | | use crate::raw_rwlock::RawRwLock; |
9 | | |
10 | | /// A reader-writer lock |
11 | | /// |
12 | | /// This type of lock allows a number of readers or at most one writer at any |
13 | | /// point in time. The write portion of this lock typically allows modification |
14 | | /// of the underlying data (exclusive access) and the read portion of this lock |
15 | | /// typically allows for read-only access (shared access). |
16 | | /// |
17 | | /// This lock uses a task-fair locking policy which avoids both reader and |
18 | | /// writer starvation. This means that readers trying to acquire the lock will |
19 | | /// block even if the lock is unlocked when there are writers waiting to acquire |
20 | | /// the lock. Because of this, attempts to recursively acquire a read lock |
21 | | /// within a single thread may result in a deadlock. |
22 | | /// |
23 | | /// The type parameter `T` represents the data that this lock protects. It is |
24 | | /// required that `T` satisfies `Send` to be shared across threads and `Sync` to |
25 | | /// allow concurrent access through readers. The RAII guards returned from the |
26 | | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) |
27 | | /// to allow access to the contained of the lock. |
28 | | /// |
29 | | /// # Fairness |
30 | | /// |
31 | | /// A typical unfair lock can often end up in a situation where a single thread |
32 | | /// quickly acquires and releases the same lock in succession, which can starve |
33 | | /// other threads waiting to acquire the rwlock. While this improves throughput |
34 | | /// because it doesn't force a context switch when a thread tries to re-acquire |
35 | | /// a rwlock it has just released, this can starve other threads. |
36 | | /// |
37 | | /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) |
38 | | /// to ensure that the lock will be fair on average without sacrificing |
39 | | /// throughput. This is done by forcing a fair unlock on average every 0.5ms, |
40 | | /// which will force the lock to go to the next thread waiting for the rwlock. |
41 | | /// |
42 | | /// Additionally, any critical section longer than 1ms will always use a fair |
43 | | /// unlock, which has a negligible impact on throughput considering the length |
44 | | /// of the critical section. |
45 | | /// |
46 | | /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` |
47 | | /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply |
48 | | /// dropping the guard. |
49 | | /// |
50 | | /// # Differences from the standard library `RwLock` |
51 | | /// |
52 | | /// - Supports atomically downgrading a write lock into a read lock. |
53 | | /// - Task-fair locking policy instead of an unspecified platform default. |
54 | | /// - No poisoning, the lock is released normally on panic. |
55 | | /// - Only requires 1 word of space, whereas the standard library boxes the |
56 | | /// `RwLock` due to platform limitations. |
57 | | /// - Can be statically constructed. |
58 | | /// - Does not require any drop glue when dropped. |
59 | | /// - Inline fast path for the uncontended case. |
60 | | /// - Efficient handling of micro-contention using adaptive spinning. |
61 | | /// - Allows raw locking & unlocking without a guard. |
62 | | /// - Supports eventual fairness so that the rwlock is fair on average. |
63 | | /// - Optionally allows making the rwlock fair by calling |
64 | | /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. |
65 | | /// |
66 | | /// # Examples |
67 | | /// |
68 | | /// ``` |
69 | | /// use parking_lot::RwLock; |
70 | | /// |
71 | | /// let lock = RwLock::new(5); |
72 | | /// |
73 | | /// // many reader locks can be held at once |
74 | | /// { |
75 | | /// let r1 = lock.read(); |
76 | | /// let r2 = lock.read(); |
77 | | /// assert_eq!(*r1, 5); |
78 | | /// assert_eq!(*r2, 5); |
79 | | /// } // read locks are dropped at this point |
80 | | /// |
81 | | /// // only one write lock may be held, however |
82 | | /// { |
83 | | /// let mut w = lock.write(); |
84 | | /// *w += 1; |
85 | | /// assert_eq!(*w, 6); |
86 | | /// } // write lock is dropped here |
87 | | /// ``` |
88 | | pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>; |
89 | | |
90 | | /// Creates a new instance of an `RwLock<T>` which is unlocked. |
91 | | /// |
92 | | /// This allows creating a `RwLock<T>` in a constant context on stable Rust. |
93 | 0 | pub const fn const_rwlock<T>(val: T) -> RwLock<T> { |
94 | 0 | RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val) |
95 | 0 | } |
96 | | |
97 | | /// RAII structure used to release the shared read access of a lock when |
98 | | /// dropped. |
99 | | pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; |
100 | | |
101 | | /// RAII structure used to release the exclusive write access of a lock when |
102 | | /// dropped. |
103 | | pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; |
104 | | |
105 | | /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a |
106 | | /// subfield of the protected data. |
107 | | /// |
108 | | /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the |
109 | | /// former doesn't support temporarily unlocking and re-locking, since that |
110 | | /// could introduce soundness issues if the locked object is modified by another |
111 | | /// thread. |
112 | | pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; |
113 | | |
114 | | /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a |
115 | | /// subfield of the protected data. |
116 | | /// |
117 | | /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the |
118 | | /// former doesn't support temporarily unlocking and re-locking, since that |
119 | | /// could introduce soundness issues if the locked object is modified by another |
120 | | /// thread. |
121 | | pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; |
122 | | |
123 | | /// RAII structure used to release the upgradable read access of a lock when |
124 | | /// dropped. |
125 | | pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; |
126 | | |
127 | | #[cfg(test)] |
128 | | mod tests { |
129 | | use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; |
130 | | use rand::Rng; |
131 | | use std::sync::atomic::{AtomicUsize, Ordering}; |
132 | | use std::sync::mpsc::channel; |
133 | | use std::sync::Arc; |
134 | | use std::thread; |
135 | | use std::time::Duration; |
136 | | |
137 | | #[cfg(feature = "serde")] |
138 | | use bincode::{deserialize, serialize}; |
139 | | |
140 | | #[derive(Eq, PartialEq, Debug)] |
141 | | struct NonCopy(i32); |
142 | | |
143 | | #[test] |
144 | | fn smoke() { |
145 | | let l = RwLock::new(()); |
146 | | drop(l.read()); |
147 | | drop(l.write()); |
148 | | drop(l.upgradable_read()); |
149 | | drop((l.read(), l.read())); |
150 | | drop((l.read(), l.upgradable_read())); |
151 | | drop(l.write()); |
152 | | } |
153 | | |
154 | | #[test] |
155 | | fn frob() { |
156 | | const N: u32 = 10; |
157 | | const M: u32 = 1000; |
158 | | |
159 | | let r = Arc::new(RwLock::new(())); |
160 | | |
161 | | let (tx, rx) = channel::<()>(); |
162 | | for _ in 0..N { |
163 | | let tx = tx.clone(); |
164 | | let r = r.clone(); |
165 | | thread::spawn(move || { |
166 | | let mut rng = rand::thread_rng(); |
167 | | for _ in 0..M { |
168 | | if rng.gen_bool(1.0 / N as f64) { |
169 | | drop(r.write()); |
170 | | } else { |
171 | | drop(r.read()); |
172 | | } |
173 | | } |
174 | | drop(tx); |
175 | | }); |
176 | | } |
177 | | drop(tx); |
178 | | let _ = rx.recv(); |
179 | | } |
180 | | |
181 | | #[test] |
182 | | fn test_rw_arc_no_poison_wr() { |
183 | | let arc = Arc::new(RwLock::new(1)); |
184 | | let arc2 = arc.clone(); |
185 | | let _: Result<(), _> = thread::spawn(move || { |
186 | | let _lock = arc2.write(); |
187 | | panic!(); |
188 | | }) |
189 | | .join(); |
190 | | let lock = arc.read(); |
191 | | assert_eq!(*lock, 1); |
192 | | } |
193 | | |
194 | | #[test] |
195 | | fn test_rw_arc_no_poison_ww() { |
196 | | let arc = Arc::new(RwLock::new(1)); |
197 | | let arc2 = arc.clone(); |
198 | | let _: Result<(), _> = thread::spawn(move || { |
199 | | let _lock = arc2.write(); |
200 | | panic!(); |
201 | | }) |
202 | | .join(); |
203 | | let lock = arc.write(); |
204 | | assert_eq!(*lock, 1); |
205 | | } |
206 | | |
207 | | #[test] |
208 | | fn test_rw_arc_no_poison_rr() { |
209 | | let arc = Arc::new(RwLock::new(1)); |
210 | | let arc2 = arc.clone(); |
211 | | let _: Result<(), _> = thread::spawn(move || { |
212 | | let _lock = arc2.read(); |
213 | | panic!(); |
214 | | }) |
215 | | .join(); |
216 | | let lock = arc.read(); |
217 | | assert_eq!(*lock, 1); |
218 | | } |
219 | | |
220 | | #[test] |
221 | | fn test_rw_arc_no_poison_rw() { |
222 | | let arc = Arc::new(RwLock::new(1)); |
223 | | let arc2 = arc.clone(); |
224 | | let _: Result<(), _> = thread::spawn(move || { |
225 | | let _lock = arc2.read(); |
226 | | panic!() |
227 | | }) |
228 | | .join(); |
229 | | let lock = arc.write(); |
230 | | assert_eq!(*lock, 1); |
231 | | } |
232 | | |
233 | | #[test] |
234 | | fn test_ruw_arc() { |
235 | | let arc = Arc::new(RwLock::new(0)); |
236 | | let arc2 = arc.clone(); |
237 | | let (tx, rx) = channel(); |
238 | | |
239 | | thread::spawn(move || { |
240 | | for _ in 0..10 { |
241 | | let mut lock = arc2.write(); |
242 | | let tmp = *lock; |
243 | | *lock = -1; |
244 | | thread::yield_now(); |
245 | | *lock = tmp + 1; |
246 | | } |
247 | | tx.send(()).unwrap(); |
248 | | }); |
249 | | |
250 | | let mut children = Vec::new(); |
251 | | |
252 | | // Upgradable readers try to catch the writer in the act and also |
253 | | // try to touch the value |
254 | | for _ in 0..5 { |
255 | | let arc3 = arc.clone(); |
256 | | children.push(thread::spawn(move || { |
257 | | let lock = arc3.upgradable_read(); |
258 | | let tmp = *lock; |
259 | | assert!(tmp >= 0); |
260 | | thread::yield_now(); |
261 | | let mut lock = RwLockUpgradableReadGuard::upgrade(lock); |
262 | | assert_eq!(tmp, *lock); |
263 | | *lock = -1; |
264 | | thread::yield_now(); |
265 | | *lock = tmp + 1; |
266 | | })); |
267 | | } |
268 | | |
269 | | // Readers try to catch the writers in the act |
270 | | for _ in 0..5 { |
271 | | let arc4 = arc.clone(); |
272 | | children.push(thread::spawn(move || { |
273 | | let lock = arc4.read(); |
274 | | assert!(*lock >= 0); |
275 | | })); |
276 | | } |
277 | | |
278 | | // Wait for children to pass their asserts |
279 | | for r in children { |
280 | | assert!(r.join().is_ok()); |
281 | | } |
282 | | |
283 | | // Wait for writer to finish |
284 | | rx.recv().unwrap(); |
285 | | let lock = arc.read(); |
286 | | assert_eq!(*lock, 15); |
287 | | } |
288 | | |
289 | | #[test] |
290 | | fn test_rw_arc() { |
291 | | let arc = Arc::new(RwLock::new(0)); |
292 | | let arc2 = arc.clone(); |
293 | | let (tx, rx) = channel(); |
294 | | |
295 | | thread::spawn(move || { |
296 | | let mut lock = arc2.write(); |
297 | | for _ in 0..10 { |
298 | | let tmp = *lock; |
299 | | *lock = -1; |
300 | | thread::yield_now(); |
301 | | *lock = tmp + 1; |
302 | | } |
303 | | tx.send(()).unwrap(); |
304 | | }); |
305 | | |
306 | | // Readers try to catch the writer in the act |
307 | | let mut children = Vec::new(); |
308 | | for _ in 0..5 { |
309 | | let arc3 = arc.clone(); |
310 | | children.push(thread::spawn(move || { |
311 | | let lock = arc3.read(); |
312 | | assert!(*lock >= 0); |
313 | | })); |
314 | | } |
315 | | |
316 | | // Wait for children to pass their asserts |
317 | | for r in children { |
318 | | assert!(r.join().is_ok()); |
319 | | } |
320 | | |
321 | | // Wait for writer to finish |
322 | | rx.recv().unwrap(); |
323 | | let lock = arc.read(); |
324 | | assert_eq!(*lock, 10); |
325 | | } |
326 | | |
327 | | #[test] |
328 | | fn test_rw_arc_access_in_unwind() { |
329 | | let arc = Arc::new(RwLock::new(1)); |
330 | | let arc2 = arc.clone(); |
331 | | let _ = thread::spawn(move || { |
332 | | struct Unwinder { |
333 | | i: Arc<RwLock<isize>>, |
334 | | } |
335 | | impl Drop for Unwinder { |
336 | | fn drop(&mut self) { |
337 | | let mut lock = self.i.write(); |
338 | | *lock += 1; |
339 | | } |
340 | | } |
341 | | let _u = Unwinder { i: arc2 }; |
342 | | panic!(); |
343 | | }) |
344 | | .join(); |
345 | | let lock = arc.read(); |
346 | | assert_eq!(*lock, 2); |
347 | | } |
348 | | |
349 | | #[test] |
350 | | fn test_rwlock_unsized() { |
351 | | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); |
352 | | { |
353 | | let b = &mut *rw.write(); |
354 | | b[0] = 4; |
355 | | b[2] = 5; |
356 | | } |
357 | | let comp: &[i32] = &[4, 2, 5]; |
358 | | assert_eq!(&*rw.read(), comp); |
359 | | } |
360 | | |
361 | | #[test] |
362 | | fn test_rwlock_try_read() { |
363 | | let lock = RwLock::new(0isize); |
364 | | { |
365 | | let read_guard = lock.read(); |
366 | | |
367 | | let read_result = lock.try_read(); |
368 | | assert!( |
369 | | read_result.is_some(), |
370 | | "try_read should succeed while read_guard is in scope" |
371 | | ); |
372 | | |
373 | | drop(read_guard); |
374 | | } |
375 | | { |
376 | | let upgrade_guard = lock.upgradable_read(); |
377 | | |
378 | | let read_result = lock.try_read(); |
379 | | assert!( |
380 | | read_result.is_some(), |
381 | | "try_read should succeed while upgrade_guard is in scope" |
382 | | ); |
383 | | |
384 | | drop(upgrade_guard); |
385 | | } |
386 | | { |
387 | | let write_guard = lock.write(); |
388 | | |
389 | | let read_result = lock.try_read(); |
390 | | assert!( |
391 | | read_result.is_none(), |
392 | | "try_read should fail while write_guard is in scope" |
393 | | ); |
394 | | |
395 | | drop(write_guard); |
396 | | } |
397 | | } |
398 | | |
399 | | #[test] |
400 | | fn test_rwlock_try_write() { |
401 | | let lock = RwLock::new(0isize); |
402 | | { |
403 | | let read_guard = lock.read(); |
404 | | |
405 | | let write_result = lock.try_write(); |
406 | | assert!( |
407 | | write_result.is_none(), |
408 | | "try_write should fail while read_guard is in scope" |
409 | | ); |
410 | | assert!(lock.is_locked()); |
411 | | assert!(!lock.is_locked_exclusive()); |
412 | | |
413 | | drop(read_guard); |
414 | | } |
415 | | { |
416 | | let upgrade_guard = lock.upgradable_read(); |
417 | | |
418 | | let write_result = lock.try_write(); |
419 | | assert!( |
420 | | write_result.is_none(), |
421 | | "try_write should fail while upgrade_guard is in scope" |
422 | | ); |
423 | | assert!(lock.is_locked()); |
424 | | assert!(!lock.is_locked_exclusive()); |
425 | | |
426 | | drop(upgrade_guard); |
427 | | } |
428 | | { |
429 | | let write_guard = lock.write(); |
430 | | |
431 | | let write_result = lock.try_write(); |
432 | | assert!( |
433 | | write_result.is_none(), |
434 | | "try_write should fail while write_guard is in scope" |
435 | | ); |
436 | | assert!(lock.is_locked()); |
437 | | assert!(lock.is_locked_exclusive()); |
438 | | |
439 | | drop(write_guard); |
440 | | } |
441 | | } |
442 | | |
443 | | #[test] |
444 | | fn test_rwlock_try_upgrade() { |
445 | | let lock = RwLock::new(0isize); |
446 | | { |
447 | | let read_guard = lock.read(); |
448 | | |
449 | | let upgrade_result = lock.try_upgradable_read(); |
450 | | assert!( |
451 | | upgrade_result.is_some(), |
452 | | "try_upgradable_read should succeed while read_guard is in scope" |
453 | | ); |
454 | | |
455 | | drop(read_guard); |
456 | | } |
457 | | { |
458 | | let upgrade_guard = lock.upgradable_read(); |
459 | | |
460 | | let upgrade_result = lock.try_upgradable_read(); |
461 | | assert!( |
462 | | upgrade_result.is_none(), |
463 | | "try_upgradable_read should fail while upgrade_guard is in scope" |
464 | | ); |
465 | | |
466 | | drop(upgrade_guard); |
467 | | } |
468 | | { |
469 | | let write_guard = lock.write(); |
470 | | |
471 | | let upgrade_result = lock.try_upgradable_read(); |
472 | | assert!( |
473 | | upgrade_result.is_none(), |
474 | | "try_upgradable should fail while write_guard is in scope" |
475 | | ); |
476 | | |
477 | | drop(write_guard); |
478 | | } |
479 | | } |
480 | | |
481 | | #[test] |
482 | | fn test_into_inner() { |
483 | | let m = RwLock::new(NonCopy(10)); |
484 | | assert_eq!(m.into_inner(), NonCopy(10)); |
485 | | } |
486 | | |
487 | | #[test] |
488 | | fn test_into_inner_drop() { |
489 | | struct Foo(Arc<AtomicUsize>); |
490 | | impl Drop for Foo { |
491 | | fn drop(&mut self) { |
492 | | self.0.fetch_add(1, Ordering::SeqCst); |
493 | | } |
494 | | } |
495 | | let num_drops = Arc::new(AtomicUsize::new(0)); |
496 | | let m = RwLock::new(Foo(num_drops.clone())); |
497 | | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
498 | | { |
499 | | let _inner = m.into_inner(); |
500 | | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
501 | | } |
502 | | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
503 | | } |
504 | | |
505 | | #[test] |
506 | | fn test_get_mut() { |
507 | | let mut m = RwLock::new(NonCopy(10)); |
508 | | *m.get_mut() = NonCopy(20); |
509 | | assert_eq!(m.into_inner(), NonCopy(20)); |
510 | | } |
511 | | |
512 | | #[test] |
513 | | fn test_rwlockguard_sync() { |
514 | | fn sync<T: Sync>(_: T) {} |
515 | | |
516 | | let rwlock = RwLock::new(()); |
517 | | sync(rwlock.read()); |
518 | | sync(rwlock.write()); |
519 | | } |
520 | | |
521 | | #[test] |
522 | | fn test_rwlock_downgrade() { |
523 | | let x = Arc::new(RwLock::new(0)); |
524 | | let mut handles = Vec::new(); |
525 | | for _ in 0..8 { |
526 | | let x = x.clone(); |
527 | | handles.push(thread::spawn(move || { |
528 | | for _ in 0..100 { |
529 | | let mut writer = x.write(); |
530 | | *writer += 1; |
531 | | let cur_val = *writer; |
532 | | let reader = RwLockWriteGuard::downgrade(writer); |
533 | | assert_eq!(cur_val, *reader); |
534 | | } |
535 | | })); |
536 | | } |
537 | | for handle in handles { |
538 | | handle.join().unwrap() |
539 | | } |
540 | | assert_eq!(*x.read(), 800); |
541 | | } |
542 | | |
543 | | #[test] |
544 | | fn test_rwlock_recursive() { |
545 | | let arc = Arc::new(RwLock::new(1)); |
546 | | let arc2 = arc.clone(); |
547 | | let lock1 = arc.read(); |
548 | | let t = thread::spawn(move || { |
549 | | let _lock = arc2.write(); |
550 | | }); |
551 | | |
552 | | if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { |
553 | | thread::sleep(Duration::from_millis(100)); |
554 | | } else { |
555 | | // FIXME: https://github.com/fortanix/rust-sgx/issues/31 |
556 | | for _ in 0..100 { |
557 | | thread::yield_now(); |
558 | | } |
559 | | } |
560 | | |
561 | | // A normal read would block here since there is a pending writer |
562 | | let lock2 = arc.read_recursive(); |
563 | | |
564 | | // Unblock the thread and join it. |
565 | | drop(lock1); |
566 | | drop(lock2); |
567 | | t.join().unwrap(); |
568 | | } |
569 | | |
570 | | #[test] |
571 | | fn test_rwlock_debug() { |
572 | | let x = RwLock::new(vec![0u8, 10]); |
573 | | |
574 | | assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); |
575 | | let _lock = x.write(); |
576 | | assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }"); |
577 | | } |
578 | | |
579 | | #[test] |
580 | | fn test_clone() { |
581 | | let rwlock = RwLock::new(Arc::new(1)); |
582 | | let a = rwlock.read_recursive(); |
583 | | let b = a.clone(); |
584 | | assert_eq!(Arc::strong_count(&b), 2); |
585 | | } |
586 | | |
587 | | #[cfg(feature = "serde")] |
588 | | #[test] |
589 | | fn test_serde() { |
590 | | let contents: Vec<u8> = vec![0, 1, 2]; |
591 | | let mutex = RwLock::new(contents.clone()); |
592 | | |
593 | | let serialized = serialize(&mutex).unwrap(); |
594 | | let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap(); |
595 | | |
596 | | assert_eq!(*(mutex.read()), *(deserialized.read())); |
597 | | assert_eq!(contents, *(deserialized.read())); |
598 | | } |
599 | | |
600 | | #[test] |
601 | | fn test_issue_203() { |
602 | | struct Bar(RwLock<()>); |
603 | | |
604 | | impl Drop for Bar { |
605 | | fn drop(&mut self) { |
606 | | let _n = self.0.write(); |
607 | | } |
608 | | } |
609 | | |
610 | | thread_local! { |
611 | | static B: Bar = Bar(RwLock::new(())); |
612 | | } |
613 | | |
614 | | thread::spawn(|| { |
615 | | B.with(|_| ()); |
616 | | |
617 | | let a = RwLock::new(()); |
618 | | let _a = a.read(); |
619 | | }) |
620 | | .join() |
621 | | .unwrap(); |
622 | | } |
623 | | |
624 | | #[test] |
625 | | fn test_rw_write_is_locked() { |
626 | | let lock = RwLock::new(0isize); |
627 | | { |
628 | | let _read_guard = lock.read(); |
629 | | |
630 | | assert!(lock.is_locked()); |
631 | | assert!(!lock.is_locked_exclusive()); |
632 | | } |
633 | | |
634 | | { |
635 | | let _write_guard = lock.write(); |
636 | | |
637 | | assert!(lock.is_locked()); |
638 | | assert!(lock.is_locked_exclusive()); |
639 | | } |
640 | | } |
641 | | |
642 | | #[test] |
643 | | #[cfg(feature = "arc_lock")] |
644 | | fn test_issue_430() { |
645 | | let lock = std::sync::Arc::new(RwLock::new(0)); |
646 | | |
647 | | let mut rl = lock.upgradable_read_arc(); |
648 | | |
649 | | rl.with_upgraded(|_| { |
650 | | println!("lock upgrade"); |
651 | | }); |
652 | | |
653 | | rl.with_upgraded(|_| { |
654 | | println!("lock upgrade"); |
655 | | }); |
656 | | |
657 | | drop(lock); |
658 | | } |
659 | | } |