/rust/registry/src/index.crates.io-6f17d22bba15001f/bitvec-1.0.1/src/store.rs
Line | Count | Source (jump to first uncovered line) |
1 | | #![doc = include_str!("../doc/store.md")] |
2 | | |
3 | | use core::{ |
4 | | cell::Cell, |
5 | | fmt::Debug, |
6 | | }; |
7 | | |
8 | | use funty::Integral; |
9 | | |
10 | | use crate::{ |
11 | | access::*, |
12 | | index::BitIdx, |
13 | | mem::{ |
14 | | self, |
15 | | BitRegister, |
16 | | }, |
17 | | order::BitOrder, |
18 | | }; |
19 | | |
20 | | #[doc = include_str!("../doc/store/BitStore.md")] |
21 | | pub trait BitStore: 'static + Debug { |
22 | | /// The element type used in the memory region underlying a `BitSlice`. It |
23 | | /// is *always* one of the unsigned integer fundamentals. |
24 | | type Mem: BitRegister + BitStore<Mem = Self::Mem>; |
25 | | /// A type that selects the appropriate load/store instructions when |
26 | | /// accessing the memory bus. It determines what instructions are used when |
27 | | /// moving a `Self::Mem` value between the processor and the memory system. |
28 | | /// |
29 | | /// This must be *at least* able to manage aliasing. |
30 | | type Access: BitAccess<Item = Self::Mem> + BitStore<Mem = Self::Mem>; |
31 | | /// A sibling `BitStore` implementor that is known to be alias-safe. It is |
32 | | /// used when a `BitSlice` introduces multiple handles that view the same |
33 | | /// memory location, and at least one of them has write capabilities to it. |
34 | | /// It must have the same underlying memory type, and can only change access |
35 | | /// patterns or public-facing usage. |
36 | | type Alias: BitStore<Mem = Self::Mem>; |
37 | | /// The inverse of `::Alias`. It is used when a `BitSlice` removes the |
38 | | /// conditions that required a `T -> T::Alias` transition. |
39 | | type Unalias: BitStore<Mem = Self::Mem>; |
40 | | |
41 | | /// The zero constant. |
42 | | const ZERO: Self; |
43 | | |
44 | | /// Wraps a raw memory value as a `BitStore` type. |
45 | | fn new(value: Self::Mem) -> Self; |
46 | | |
47 | | /// Loads a value out of the memory system according to the `::Access` |
48 | | /// rules. This may be called when the value is aliased by a write-capable |
49 | | /// reference. |
50 | | fn load_value(&self) -> Self::Mem; |
51 | | |
52 | | /// Stores a value into the memory system. This is only called when there |
53 | | /// are no other handles to the value, and it may bypass `::Access` |
54 | | /// constraints. |
55 | | fn store_value(&mut self, value: Self::Mem); |
56 | | |
57 | | /// Reads a single bit out of the memory system according to the `::Access` |
58 | | /// rules. This is lifted from [`BitAccess`] so that it can be used |
59 | | /// elsewhere without additional casts. |
60 | | /// |
61 | | /// ## Type Parameters |
62 | | /// |
63 | | /// - `O`: The ordering of bits within `Self::Mem` governing the lookup. |
64 | | /// |
65 | | /// ## Parameters |
66 | | /// |
67 | | /// - `index`: The semantic index of a bit in `*self`. |
68 | | /// |
69 | | /// ## Returns |
70 | | /// |
71 | | /// The value of the bit in `*self` at `BitOrder::at(index)`. |
72 | | /// |
73 | | /// [`BitAccess`]: crate::access::BitAccess |
74 | | #[inline] |
75 | 18.8k | fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool |
76 | 18.8k | where O: BitOrder { |
77 | 18.8k | self.load_value() & index.select::<O>().into_inner() |
78 | 18.8k | != <Self::Mem as Integral>::ZERO |
79 | 18.8k | } <u8 as bitvec::store::BitStore>::get_bit::<bitvec::order::Msb0> Line | Count | Source | 75 | 18.8k | fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool | 76 | 18.8k | where O: BitOrder { | 77 | 18.8k | self.load_value() & index.select::<O>().into_inner() | 78 | 18.8k | != <Self::Mem as Integral>::ZERO | 79 | 18.8k | } |
Unexecuted instantiation: <_ as bitvec::store::BitStore>::get_bit::<_> |
80 | | |
81 | | /// All implementors are required to have their alignment match their size. |
82 | | /// |
83 | | /// Use [`mem::aligned_to_size::<Self>()`][0] to prove this. |
84 | | /// |
85 | | /// [0]: crate::mem::aligned_to_size |
86 | | const ALIGNED_TO_SIZE: [(); 1]; |
87 | | |
88 | | /// All implementors are required to have `Self` and `Self::Alias` be equal |
89 | | /// in representation. This is true by fiat for all types except the |
90 | | /// unsigned integers. |
91 | | /// |
92 | | /// Use [`mem::layout_eq::<Self, Self::Alias>()`][0] to prove this. |
93 | | /// |
94 | | /// [0]: crate::mem::layout_eq |
95 | | const ALIAS_WIDTH: [(); 1]; |
96 | | } |
97 | | |
98 | | /// Generates `BitStore` implementations for ordinary integers and `Cell`s. |
99 | | macro_rules! store { |
100 | | ($($base:ty => $safe:ty);+ $(;)?) => { $( |
101 | | impl BitStore for $base { |
102 | | type Mem = Self; |
103 | | /// The unsigned integers will only be `BitStore` type parameters |
104 | | /// for handles to unaliased memory, following the normal Rust |
105 | | /// reference rules. |
106 | | type Access = Cell<Self>; |
107 | | type Alias = $safe; |
108 | | type Unalias = Self; |
109 | | |
110 | | const ZERO: Self = 0; |
111 | | |
112 | | #[inline] |
113 | 0 | fn new(value: Self::Mem) -> Self { value } Unexecuted instantiation: <u8 as bitvec::store::BitStore>::new Unexecuted instantiation: <u16 as bitvec::store::BitStore>::new Unexecuted instantiation: <u32 as bitvec::store::BitStore>::new Unexecuted instantiation: <u64 as bitvec::store::BitStore>::new Unexecuted instantiation: <usize as bitvec::store::BitStore>::new |
114 | | |
115 | | #[inline] |
116 | 1.04M | fn load_value(&self) -> Self::Mem { |
117 | 1.04M | *self |
118 | 1.04M | } <u8 as bitvec::store::BitStore>::load_value Line | Count | Source | 116 | 1.04M | fn load_value(&self) -> Self::Mem { | 117 | 1.04M | *self | 118 | 1.04M | } |
Unexecuted instantiation: <u8 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <u16 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <u32 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <u64 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <usize as bitvec::store::BitStore>::load_value |
119 | | |
120 | | #[inline] |
121 | 1.15M | fn store_value(&mut self, value: Self::Mem) { |
122 | 1.15M | *self = value; |
123 | 1.15M | } <u8 as bitvec::store::BitStore>::store_value Line | Count | Source | 121 | 1.15M | fn store_value(&mut self, value: Self::Mem) { | 122 | 1.15M | *self = value; | 123 | 1.15M | } |
Unexecuted instantiation: <u8 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <u16 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <u32 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <u64 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <usize as bitvec::store::BitStore>::store_value |
124 | | |
125 | | const ALIGNED_TO_SIZE: [(); 1] |
126 | | = [(); mem::aligned_to_size::<Self>() as usize]; |
127 | | |
128 | | const ALIAS_WIDTH: [(); 1] |
129 | | = [(); mem::layout_eq::<Self, Self::Alias>() as usize]; |
130 | | } |
131 | | |
132 | | impl BitStore for $safe { |
133 | | type Mem = $base; |
134 | | type Access = <Self as BitSafe>::Rad; |
135 | | type Alias = Self; |
136 | | type Unalias = $base; |
137 | | |
138 | | const ZERO: Self = <Self as BitSafe>::ZERO; |
139 | | |
140 | | #[inline] |
141 | 0 | fn new(value: Self::Mem) -> Self { <Self>::new(value) } Unexecuted instantiation: <bitvec::access::BitSafeU8 as bitvec::store::BitStore>::new Unexecuted instantiation: <bitvec::access::BitSafeU16 as bitvec::store::BitStore>::new Unexecuted instantiation: <bitvec::access::BitSafeU32 as bitvec::store::BitStore>::new Unexecuted instantiation: <bitvec::access::BitSafeU64 as bitvec::store::BitStore>::new Unexecuted instantiation: <bitvec::access::BitSafeUsize as bitvec::store::BitStore>::new |
142 | | |
143 | | #[inline] |
144 | 0 | fn load_value(&self) -> Self::Mem { |
145 | 0 | self.load() |
146 | 0 | } Unexecuted instantiation: <bitvec::access::BitSafeU8 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <bitvec::access::BitSafeU16 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <bitvec::access::BitSafeU32 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <bitvec::access::BitSafeU64 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <bitvec::access::BitSafeUsize as bitvec::store::BitStore>::load_value |
147 | | |
148 | | #[inline] |
149 | 0 | fn store_value(&mut self, value: Self::Mem) { |
150 | 0 | *self = Self::new(value); |
151 | 0 | } Unexecuted instantiation: <bitvec::access::BitSafeU8 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <bitvec::access::BitSafeU16 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <bitvec::access::BitSafeU32 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <bitvec::access::BitSafeU64 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <bitvec::access::BitSafeUsize as bitvec::store::BitStore>::store_value |
152 | | |
153 | | const ALIGNED_TO_SIZE: [(); 1] |
154 | | = [(); mem::aligned_to_size::<Self>() as usize]; |
155 | | |
156 | | const ALIAS_WIDTH: [(); 1] = [()]; |
157 | | } |
158 | | |
159 | | impl BitStore for Cell<$base> { |
160 | | type Mem = $base; |
161 | | type Access = Self; |
162 | | type Alias = Self; |
163 | | type Unalias = Self; |
164 | | |
165 | | const ZERO: Self = Self::new(0); |
166 | | |
167 | | #[inline] |
168 | 0 | fn new(value: Self::Mem) -> Self { <Self>::new(value) } Unexecuted instantiation: <core::cell::Cell<u8> as bitvec::store::BitStore>::new Unexecuted instantiation: <core::cell::Cell<u16> as bitvec::store::BitStore>::new Unexecuted instantiation: <core::cell::Cell<u32> as bitvec::store::BitStore>::new Unexecuted instantiation: <core::cell::Cell<u64> as bitvec::store::BitStore>::new Unexecuted instantiation: <core::cell::Cell<usize> as bitvec::store::BitStore>::new |
169 | | |
170 | | #[inline] |
171 | 0 | fn load_value(&self) -> Self::Mem { |
172 | 0 | self.get() |
173 | 0 | } Unexecuted instantiation: <core::cell::Cell<u8> as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::cell::Cell<u16> as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::cell::Cell<u32> as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::cell::Cell<u64> as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::cell::Cell<usize> as bitvec::store::BitStore>::load_value |
174 | | |
175 | | #[inline] |
176 | 0 | fn store_value(&mut self, value: Self::Mem) { |
177 | 0 | *self = Self::new(value); |
178 | 0 | } Unexecuted instantiation: <core::cell::Cell<u8> as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::cell::Cell<u16> as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::cell::Cell<u32> as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::cell::Cell<u64> as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::cell::Cell<usize> as bitvec::store::BitStore>::store_value |
179 | | |
180 | | const ALIGNED_TO_SIZE: [(); 1] |
181 | | = [(); mem::aligned_to_size::<Self>() as usize]; |
182 | | |
183 | | const ALIAS_WIDTH: [(); 1] = [()]; |
184 | | } |
185 | | )+ }; |
186 | | } |
187 | | |
188 | | store! { |
189 | | u8 => BitSafeU8; |
190 | | u16 => BitSafeU16; |
191 | | u32 => BitSafeU32; |
192 | | } |
193 | | |
194 | | #[cfg(target_pointer_width = "64")] |
195 | | store!(u64 => BitSafeU64); |
196 | | |
197 | | store!(usize => BitSafeUsize); |
198 | | |
199 | | /// Generates `BitStore` implementations for atomic types. |
200 | | macro_rules! atomic { |
201 | | ($($size:tt, $base:ty => $atom:ident);+ $(;)?) => { $( |
202 | | radium::if_atomic!(if atomic($size) { |
203 | | use core::sync::atomic::$atom; |
204 | | |
205 | | impl BitStore for $atom { |
206 | | type Mem = $base; |
207 | | type Access = Self; |
208 | | type Alias = Self; |
209 | | type Unalias = Self; |
210 | | |
211 | | const ZERO: Self = <Self>::new(0); |
212 | | |
213 | | #[inline] |
214 | 0 | fn new(value: Self::Mem) -> Self { <Self>::new(value) } Unexecuted instantiation: <core::sync::atomic::AtomicU8 as bitvec::store::BitStore>::new Unexecuted instantiation: <core::sync::atomic::AtomicU16 as bitvec::store::BitStore>::new Unexecuted instantiation: <core::sync::atomic::AtomicU32 as bitvec::store::BitStore>::new Unexecuted instantiation: <core::sync::atomic::AtomicU64 as bitvec::store::BitStore>::new Unexecuted instantiation: <core::sync::atomic::AtomicUsize as bitvec::store::BitStore>::new |
215 | | |
216 | | #[inline] |
217 | 0 | fn load_value(&self) -> Self::Mem { |
218 | 0 | self.load(core::sync::atomic::Ordering::Relaxed) |
219 | 0 | } Unexecuted instantiation: <core::sync::atomic::AtomicU8 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::sync::atomic::AtomicU16 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::sync::atomic::AtomicU32 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::sync::atomic::AtomicU64 as bitvec::store::BitStore>::load_value Unexecuted instantiation: <core::sync::atomic::AtomicUsize as bitvec::store::BitStore>::load_value |
220 | | |
221 | | #[inline] |
222 | 0 | fn store_value(&mut self, value: Self::Mem) { |
223 | 0 | *self = Self::new(value); |
224 | 0 | } Unexecuted instantiation: <core::sync::atomic::AtomicU8 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::sync::atomic::AtomicU16 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::sync::atomic::AtomicU32 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::sync::atomic::AtomicU64 as bitvec::store::BitStore>::store_value Unexecuted instantiation: <core::sync::atomic::AtomicUsize as bitvec::store::BitStore>::store_value |
225 | | |
226 | | const ALIGNED_TO_SIZE: [(); 1] |
227 | | = [(); mem::aligned_to_size::<Self>() as usize]; |
228 | | |
229 | | const ALIAS_WIDTH: [(); 1] = [()]; |
230 | | } |
231 | | }); |
232 | | )+ }; |
233 | | } |
234 | | |
235 | | atomic! { |
236 | | 8, u8 => AtomicU8; |
237 | | 16, u16 => AtomicU16; |
238 | | 32, u32 => AtomicU32; |
239 | | } |
240 | | |
241 | | #[cfg(target_pointer_width = "64")] |
242 | | atomic!(64, u64 => AtomicU64); |
243 | | |
244 | | atomic!(size, usize => AtomicUsize); |
245 | | |
246 | | #[cfg(test)] |
247 | | mod tests { |
248 | | use static_assertions::*; |
249 | | |
250 | | use super::*; |
251 | | use crate::prelude::*; |
252 | | |
253 | | #[test] |
254 | | fn load_store() { |
255 | | let mut word = 0usize; |
256 | | |
257 | | word.store_value(39); |
258 | | assert_eq!(word.load_value(), 39); |
259 | | |
260 | | let mut safe = BitSafeUsize::new(word); |
261 | | safe.store_value(57); |
262 | | assert_eq!(safe.load_value(), 57); |
263 | | |
264 | | let mut cell = Cell::new(0usize); |
265 | | cell.store_value(39); |
266 | | assert_eq!(cell.load_value(), 39); |
267 | | |
268 | | radium::if_atomic!(if atomic(size) { |
269 | | let mut atom = AtomicUsize::new(0); |
270 | | atom.store_value(57); |
271 | | assert_eq!(atom.load_value(), 57); |
272 | | }); |
273 | | } |
274 | | |
275 | | /// Unaliased `BitSlice`s are universally threadsafe, because they satisfy |
276 | | /// Rust’s unsynchronized mutation rules. |
277 | | #[test] |
278 | | fn unaliased_send_sync() { |
279 | | assert_impl_all!(BitSlice<u8, LocalBits>: Send, Sync); |
280 | | assert_impl_all!(BitSlice<u16, LocalBits>: Send, Sync); |
281 | | assert_impl_all!(BitSlice<u32, LocalBits>: Send, Sync); |
282 | | assert_impl_all!(BitSlice<usize, LocalBits>: Send, Sync); |
283 | | |
284 | | #[cfg(target_pointer_width = "64")] |
285 | | assert_impl_all!(BitSlice<u64, LocalBits>: Send, Sync); |
286 | | } |
287 | | |
288 | | #[test] |
289 | | fn cell_unsend_unsync() { |
290 | | assert_not_impl_any!(BitSlice<Cell<u8>, LocalBits>: Send, Sync); |
291 | | assert_not_impl_any!(BitSlice<Cell<u16>, LocalBits>: Send, Sync); |
292 | | assert_not_impl_any!(BitSlice<Cell<u32>, LocalBits>: Send, Sync); |
293 | | assert_not_impl_any!(BitSlice<Cell<usize>, LocalBits>: Send, Sync); |
294 | | |
295 | | #[cfg(target_pointer_width = "64")] |
296 | | assert_not_impl_any!(BitSlice<Cell<u64>, LocalBits>: Send, Sync); |
297 | | } |
298 | | |
299 | | /// In non-atomic builds, aliased `BitSlice`s become universally |
300 | | /// thread-unsafe. An `&mut BitSlice` is an `&Cell`, and `&Cell` cannot be |
301 | | /// sent across threads. |
302 | | /// |
303 | | /// This test cannot be meaningfully expressed in atomic builds, because the |
304 | | /// atomicity of a `BitSafeUN` type is target-specific, and expressed in |
305 | | /// `radium` rather than in `bitvec`. |
306 | | #[test] |
307 | | #[cfg(not(feature = "atomic"))] |
308 | | fn aliased_non_atomic_unsend_unsync() { |
309 | | assert_not_impl_any!(BitSlice<BitSafeU8, LocalBits>: Send, Sync); |
310 | | assert_not_impl_any!(BitSlice<BitSafeU16, LocalBits>: Send, Sync); |
311 | | assert_not_impl_any!(BitSlice<BitSafeU32, LocalBits>: Send, Sync); |
312 | | assert_not_impl_any!(BitSlice<BitSafeUsize, LocalBits>: Send, Sync); |
313 | | |
314 | | #[cfg(target_pointer_width = "64")] |
315 | | assert_not_impl_any!(BitSlice<BitSafeU64, LocalBits>: Send, Sync); |
316 | | } |
317 | | |
318 | | #[test] |
319 | | #[cfg(feature = "atomic")] |
320 | | fn aliased_atomic_send_sync() { |
321 | | assert_impl_all!(BitSlice<AtomicU8, LocalBits>: Send, Sync); |
322 | | assert_impl_all!(BitSlice<AtomicU16, LocalBits>: Send, Sync); |
323 | | assert_impl_all!(BitSlice<AtomicU32, LocalBits>: Send, Sync); |
324 | | assert_impl_all!(BitSlice<AtomicUsize, LocalBits>: Send, Sync); |
325 | | |
326 | | #[cfg(target_pointer_width = "64")] |
327 | | assert_impl_all!(BitSlice<AtomicU64, LocalBits>: Send, Sync); |
328 | | } |
329 | | } |