/rust/registry/src/index.crates.io-6f17d22bba15001f/sharded-slab-0.1.7/src/cfg.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use crate::page::{ |
2 | | slot::{Generation, RefCount}, |
3 | | Addr, |
4 | | }; |
5 | | use crate::Pack; |
6 | | use std::{fmt, marker::PhantomData}; |
7 | | /// Configuration parameters which can be overridden to tune the behavior of a slab. |
8 | | pub trait Config: Sized { |
9 | | /// The maximum number of threads which can access the slab. |
10 | | /// |
11 | | /// This value (rounded to a power of two) determines the number of shards |
12 | | /// in the slab. If a thread is created, accesses the slab, and then terminates, |
13 | | /// its shard may be reused and thus does not count against the maximum |
14 | | /// number of threads once the thread has terminated. |
15 | | const MAX_THREADS: usize = DefaultConfig::MAX_THREADS; |
16 | | /// The maximum number of pages in each shard in the slab. |
17 | | /// |
18 | | /// This value, in combination with `INITIAL_PAGE_SIZE`, determines how many |
19 | | /// bits of each index are used to represent page addresses. |
20 | | const MAX_PAGES: usize = DefaultConfig::MAX_PAGES; |
21 | | /// The size of the first page in each shard. |
22 | | /// |
23 | | /// When a page in a shard has been filled with values, a new page |
24 | | /// will be allocated that is twice as large as the previous page. Thus, the |
25 | | /// second page will be twice this size, and the third will be four times |
26 | | /// this size, and so on. |
27 | | /// |
28 | | /// Note that page sizes must be powers of two. If this value is not a power |
29 | | /// of two, it will be rounded to the next power of two. |
30 | | const INITIAL_PAGE_SIZE: usize = DefaultConfig::INITIAL_PAGE_SIZE; |
31 | | /// Sets a number of high-order bits in each index which are reserved from |
32 | | /// user code. |
33 | | /// |
34 | | /// Note that these bits are taken from the generation counter; if the page |
35 | | /// address and thread IDs are configured to use a large number of bits, |
36 | | /// reserving additional bits will decrease the period of the generation |
37 | | /// counter. These should thus be used relatively sparingly, to ensure that |
38 | | /// generation counters are able to effectively prevent the ABA problem. |
39 | | const RESERVED_BITS: usize = 0; |
40 | | } |
41 | | |
42 | | pub(crate) trait CfgPrivate: Config { |
43 | | const USED_BITS: usize = Generation::<Self>::LEN + Generation::<Self>::SHIFT; |
44 | | const INITIAL_SZ: usize = next_pow2(Self::INITIAL_PAGE_SIZE); |
45 | | const MAX_SHARDS: usize = next_pow2(Self::MAX_THREADS - 1); |
46 | | const ADDR_INDEX_SHIFT: usize = Self::INITIAL_SZ.trailing_zeros() as usize + 1; |
47 | | |
48 | 0 | fn page_size(n: usize) -> usize { |
49 | 0 | Self::INITIAL_SZ * 2usize.pow(n as _) |
50 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::page_size Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::page_size |
51 | | |
52 | 0 | fn debug() -> DebugConfig<Self> { |
53 | 0 | DebugConfig { _cfg: PhantomData } |
54 | 0 | } |
55 | | |
56 | 0 | fn validate() { |
57 | 0 | assert!( |
58 | 0 | Self::INITIAL_SZ.is_power_of_two(), |
59 | 0 | "invalid Config: {:#?}", |
60 | 0 | Self::debug(), |
61 | | ); |
62 | 0 | assert!( |
63 | 0 | Self::INITIAL_SZ <= Addr::<Self>::BITS, |
64 | 0 | "invalid Config: {:#?}", |
65 | 0 | Self::debug() |
66 | | ); |
67 | | |
68 | 0 | assert!( |
69 | 0 | Generation::<Self>::BITS >= 3, |
70 | 0 | "invalid Config: {:#?}\ngeneration counter should be at least 3 bits!", |
71 | 0 | Self::debug() |
72 | | ); |
73 | | |
74 | 0 | assert!( |
75 | 0 | Self::USED_BITS <= WIDTH, |
76 | 0 | "invalid Config: {:#?}\ntotal number of bits per index is too large to fit in a word!", |
77 | 0 | Self::debug() |
78 | | ); |
79 | | |
80 | 0 | assert!( |
81 | 0 | WIDTH - Self::USED_BITS >= Self::RESERVED_BITS, |
82 | 0 | "invalid Config: {:#?}\nindices are too large to fit reserved bits!", |
83 | 0 | Self::debug() |
84 | | ); |
85 | | |
86 | 0 | assert!( |
87 | 0 | RefCount::<Self>::MAX > 1, |
88 | 0 | "invalid config: {:#?}\n maximum concurrent references would be {}", |
89 | 0 | Self::debug(), |
90 | | RefCount::<Self>::MAX, |
91 | | ); |
92 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::validate Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::validate |
93 | | |
94 | | #[inline(always)] |
95 | 0 | fn unpack<A: Pack<Self>>(packed: usize) -> A { |
96 | 0 | A::from_packed(packed) |
97 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack::<sharded_slab::page::Addr> Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack::<sharded_slab::page::slot::Generation> Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack::<sharded_slab::tid::Tid<sharded_slab::cfg::DefaultConfig>> Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::unpack::<_> |
98 | | |
99 | | #[inline(always)] |
100 | 0 | fn unpack_addr(packed: usize) -> Addr<Self> { |
101 | 0 | Self::unpack(packed) |
102 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack_addr Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::unpack_addr |
103 | | |
104 | | #[inline(always)] |
105 | 0 | fn unpack_tid(packed: usize) -> crate::Tid<Self> { |
106 | 0 | Self::unpack(packed) |
107 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack_tid Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::unpack_tid |
108 | | |
109 | | #[inline(always)] |
110 | 0 | fn unpack_gen(packed: usize) -> Generation<Self> { |
111 | 0 | Self::unpack(packed) |
112 | 0 | } Unexecuted instantiation: <sharded_slab::cfg::DefaultConfig as sharded_slab::cfg::CfgPrivate>::unpack_gen Unexecuted instantiation: <_ as sharded_slab::cfg::CfgPrivate>::unpack_gen |
113 | | } |
114 | | impl<C: Config> CfgPrivate for C {} |
115 | | |
116 | | /// Default slab configuration values. |
117 | | #[derive(Copy, Clone)] |
118 | | pub struct DefaultConfig { |
119 | | _p: (), |
120 | | } |
121 | | |
122 | | pub(crate) struct DebugConfig<C: Config> { |
123 | | _cfg: PhantomData<fn(C)>, |
124 | | } |
125 | | |
126 | | pub(crate) const WIDTH: usize = std::mem::size_of::<usize>() * 8; |
127 | | |
128 | 0 | pub(crate) const fn next_pow2(n: usize) -> usize { |
129 | 0 | let pow2 = n.count_ones() == 1; |
130 | 0 | let zeros = n.leading_zeros(); |
131 | 0 | 1 << (WIDTH - zeros as usize - pow2 as usize) |
132 | 0 | } |
133 | | |
134 | | // === impl DefaultConfig === |
135 | | |
136 | | impl Config for DefaultConfig { |
137 | | const INITIAL_PAGE_SIZE: usize = 32; |
138 | | |
139 | | #[cfg(target_pointer_width = "64")] |
140 | | const MAX_THREADS: usize = 4096; |
141 | | #[cfg(target_pointer_width = "32")] |
142 | | // TODO(eliza): can we find enough bits to give 32-bit platforms more threads? |
143 | | const MAX_THREADS: usize = 128; |
144 | | |
145 | | const MAX_PAGES: usize = WIDTH / 2; |
146 | | } |
147 | | |
148 | | impl fmt::Debug for DefaultConfig { |
149 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
150 | 0 | Self::debug().fmt(f) |
151 | 0 | } |
152 | | } |
153 | | |
154 | | impl<C: Config> fmt::Debug for DebugConfig<C> { |
155 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
156 | 0 | f.debug_struct(std::any::type_name::<C>()) |
157 | 0 | .field("initial_page_size", &C::INITIAL_SZ) |
158 | 0 | .field("max_shards", &C::MAX_SHARDS) |
159 | 0 | .field("max_pages", &C::MAX_PAGES) |
160 | 0 | .field("used_bits", &C::USED_BITS) |
161 | 0 | .field("reserved_bits", &C::RESERVED_BITS) |
162 | 0 | .field("pointer_width", &WIDTH) |
163 | 0 | .field("max_concurrent_references", &RefCount::<C>::MAX) |
164 | 0 | .finish() |
165 | 0 | } |
166 | | } |
167 | | |
168 | | #[cfg(test)] |
169 | | mod tests { |
170 | | use super::*; |
171 | | use crate::test_util; |
172 | | use crate::Slab; |
173 | | |
174 | | #[test] |
175 | | #[cfg_attr(loom, ignore)] |
176 | | #[should_panic] |
177 | | fn validates_max_refs() { |
178 | | struct GiantGenConfig; |
179 | | |
180 | | // Configure the slab with a very large number of bits for the generation |
181 | | // counter. This will only leave 1 bit to use for the slot reference |
182 | | // counter, which will fail to validate. |
183 | | impl Config for GiantGenConfig { |
184 | | const INITIAL_PAGE_SIZE: usize = 1; |
185 | | const MAX_THREADS: usize = 1; |
186 | | const MAX_PAGES: usize = 1; |
187 | | } |
188 | | |
189 | | let _slab = Slab::<usize>::new_with_config::<GiantGenConfig>(); |
190 | | } |
191 | | |
192 | | #[test] |
193 | | #[cfg_attr(loom, ignore)] |
194 | | fn big() { |
195 | | let slab = Slab::new(); |
196 | | |
197 | | for i in 0..10000 { |
198 | | println!("{:?}", i); |
199 | | let k = slab.insert(i).expect("insert"); |
200 | | assert_eq!(slab.get(k).expect("get"), i); |
201 | | } |
202 | | } |
203 | | |
204 | | #[test] |
205 | | #[cfg_attr(loom, ignore)] |
206 | | fn custom_page_sz() { |
207 | | let slab = Slab::new_with_config::<test_util::TinyConfig>(); |
208 | | |
209 | | for i in 0..4096 { |
210 | | println!("{}", i); |
211 | | let k = slab.insert(i).expect("insert"); |
212 | | assert_eq!(slab.get(k).expect("get"), i); |
213 | | } |
214 | | } |
215 | | } |