/rust/registry/src/index.crates.io-6f17d22bba15001f/sharded-slab-0.1.7/src/page/stack.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use crate::cfg; |
2 | | use crate::sync::atomic::{AtomicUsize, Ordering}; |
3 | | use std::{fmt, marker::PhantomData}; |
4 | | |
5 | | pub(super) struct TransferStack<C = cfg::DefaultConfig> { |
6 | | head: AtomicUsize, |
7 | | _cfg: PhantomData<fn(C)>, |
8 | | } |
9 | | |
10 | | impl<C: cfg::Config> TransferStack<C> { |
11 | 0 | pub(super) fn new() -> Self { |
12 | 0 | Self { |
13 | 0 | head: AtomicUsize::new(super::Addr::<C>::NULL), |
14 | 0 | _cfg: PhantomData, |
15 | 0 | } |
16 | 0 | } Unexecuted instantiation: <sharded_slab::page::stack::TransferStack>::new Unexecuted instantiation: <sharded_slab::page::stack::TransferStack<_>>::new |
17 | | |
18 | 0 | pub(super) fn pop_all(&self) -> Option<usize> { |
19 | 0 | let val = self.head.swap(super::Addr::<C>::NULL, Ordering::Acquire); |
20 | 0 | test_println!("-> pop {:#x}", val); |
21 | 0 | if val == super::Addr::<C>::NULL { |
22 | 0 | None |
23 | | } else { |
24 | 0 | Some(val) |
25 | | } |
26 | 0 | } Unexecuted instantiation: <sharded_slab::page::stack::TransferStack>::pop_all Unexecuted instantiation: <sharded_slab::page::stack::TransferStack<_>>::pop_all |
27 | | |
28 | 0 | fn push(&self, new_head: usize, before: impl Fn(usize)) { |
29 | 0 | // We loop to win the race to set the new head. The `next` variable |
30 | 0 | // is the next slot on the stack which needs to be pointed to by the |
31 | 0 | // new head. |
32 | 0 | let mut next = self.head.load(Ordering::Relaxed); |
33 | | loop { |
34 | 0 | test_println!("-> next {:#x}", next); |
35 | 0 | before(next); |
36 | 0 |
|
37 | 0 | match self |
38 | 0 | .head |
39 | 0 | .compare_exchange(next, new_head, Ordering::Release, Ordering::Relaxed) |
40 | | { |
41 | | // lost the race! |
42 | 0 | Err(actual) => { |
43 | 0 | test_println!("-> retry!"); |
44 | 0 | next = actual; |
45 | | } |
46 | | Ok(_) => { |
47 | 0 | test_println!("-> successful; next={:#x}", next); |
48 | 0 | return; |
49 | 0 | } |
50 | 0 | } |
51 | 0 | } |
52 | 0 | } Unexecuted instantiation: <sharded_slab::page::stack::TransferStack>::push::<<sharded_slab::page::stack::TransferStack as sharded_slab::page::FreeList<sharded_slab::cfg::DefaultConfig>>::push<tracing_subscriber::registry::sharded::DataInner>::{closure#0}> Unexecuted instantiation: <sharded_slab::page::stack::TransferStack<_>>::push::<_> |
53 | | } |
54 | | |
55 | | impl<C: cfg::Config> super::FreeList<C> for TransferStack<C> { |
56 | 0 | fn push<T>(&self, new_head: usize, slot: &super::Slot<T, C>) { |
57 | 0 | self.push(new_head, |next| slot.set_next(next)) Unexecuted instantiation: <sharded_slab::page::stack::TransferStack as sharded_slab::page::FreeList<sharded_slab::cfg::DefaultConfig>>::push::<tracing_subscriber::registry::sharded::DataInner>::{closure#0} Unexecuted instantiation: <sharded_slab::page::stack::TransferStack<_> as sharded_slab::page::FreeList<_>>::push::<_>::{closure#0} |
58 | 0 | } Unexecuted instantiation: <sharded_slab::page::stack::TransferStack as sharded_slab::page::FreeList<sharded_slab::cfg::DefaultConfig>>::push::<tracing_subscriber::registry::sharded::DataInner> Unexecuted instantiation: <sharded_slab::page::stack::TransferStack<_> as sharded_slab::page::FreeList<_>>::push::<_> |
59 | | } |
60 | | |
61 | | impl<C> fmt::Debug for TransferStack<C> { |
62 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
63 | 0 | f.debug_struct("TransferStack") |
64 | 0 | .field( |
65 | 0 | "head", |
66 | 0 | &format_args!("{:#0x}", &self.head.load(Ordering::Relaxed)), |
67 | 0 | ) |
68 | 0 | .finish() |
69 | 0 | } |
70 | | } |
71 | | |
72 | | #[cfg(all(loom, test))] |
73 | | mod test { |
74 | | use super::*; |
75 | | use crate::{sync::UnsafeCell, test_util}; |
76 | | use loom::thread; |
77 | | use std::sync::Arc; |
78 | | |
79 | | #[test] |
80 | | fn transfer_stack() { |
81 | | test_util::run_model("transfer_stack", || { |
82 | | let causalities = [UnsafeCell::new(999), UnsafeCell::new(999)]; |
83 | | let shared = Arc::new((causalities, TransferStack::<cfg::DefaultConfig>::new())); |
84 | | let shared1 = shared.clone(); |
85 | | let shared2 = shared.clone(); |
86 | | |
87 | | let t1 = thread::spawn(move || { |
88 | | let (causalities, stack) = &*shared1; |
89 | | stack.push(0, |prev| { |
90 | | causalities[0].with_mut(|c| unsafe { |
91 | | *c = 0; |
92 | | }); |
93 | | test_println!("prev={:#x}", prev) |
94 | | }); |
95 | | }); |
96 | | let t2 = thread::spawn(move || { |
97 | | let (causalities, stack) = &*shared2; |
98 | | stack.push(1, |prev| { |
99 | | causalities[1].with_mut(|c| unsafe { |
100 | | *c = 1; |
101 | | }); |
102 | | test_println!("prev={:#x}", prev) |
103 | | }); |
104 | | }); |
105 | | |
106 | | let (causalities, stack) = &*shared; |
107 | | let mut idx = stack.pop_all(); |
108 | | while idx == None { |
109 | | idx = stack.pop_all(); |
110 | | thread::yield_now(); |
111 | | } |
112 | | let idx = idx.unwrap(); |
113 | | causalities[idx].with(|val| unsafe { |
114 | | assert_eq!( |
115 | | *val, idx, |
116 | | "UnsafeCell write must happen-before index is pushed to the stack!" |
117 | | ); |
118 | | }); |
119 | | |
120 | | t1.join().unwrap(); |
121 | | t2.join().unwrap(); |
122 | | }); |
123 | | } |
124 | | } |