/rust/registry/src/index.crates.io-1949cf8c6b5b557f/bitvec-1.0.1/src/ptr/single.rs
Line | Count | Source |
1 | | #![doc = include_str!("../../doc/ptr/single.md")] |
2 | | |
3 | | use core::{ |
4 | | any, |
5 | | cmp, |
6 | | convert::TryFrom, |
7 | | fmt::{ |
8 | | self, |
9 | | Debug, |
10 | | Display, |
11 | | Formatter, |
12 | | Pointer, |
13 | | }, |
14 | | hash::{ |
15 | | Hash, |
16 | | Hasher, |
17 | | }, |
18 | | marker::PhantomData, |
19 | | ptr, |
20 | | }; |
21 | | |
22 | | use tap::{ |
23 | | Pipe, |
24 | | TryConv, |
25 | | }; |
26 | | use wyz::{ |
27 | | comu::{ |
28 | | Address, |
29 | | Const, |
30 | | Frozen, |
31 | | Mut, |
32 | | Mutability, |
33 | | NullPtrError, |
34 | | }, |
35 | | fmt::FmtForward, |
36 | | }; |
37 | | |
38 | | use super::{ |
39 | | check_alignment, |
40 | | AddressExt, |
41 | | BitPtrRange, |
42 | | BitRef, |
43 | | BitSpan, |
44 | | BitSpanError, |
45 | | MisalignError, |
46 | | }; |
47 | | use crate::{ |
48 | | access::BitAccess, |
49 | | devel as dvl, |
50 | | index::BitIdx, |
51 | | mem, |
52 | | order::{ |
53 | | BitOrder, |
54 | | Lsb0, |
55 | | }, |
56 | | store::BitStore, |
57 | | }; |
58 | | |
59 | | #[repr(C, packed)] |
60 | | #[doc = include_str!("../../doc/ptr/BitPtr.md")] |
61 | | pub struct BitPtr<M = Const, T = usize, O = Lsb0> |
62 | | where |
63 | | M: Mutability, |
64 | | T: BitStore, |
65 | | O: BitOrder, |
66 | | { |
67 | | /// Memory addresses must be well-aligned and non-null. |
68 | | /// |
69 | | /// This is not actually a requirement of `BitPtr`, but it is a requirement |
70 | | /// of `BitSpan`, and it is extended across the entire crate for |
71 | | /// consistency. |
72 | | ptr: Address<M, T>, |
73 | | /// The index of the referent bit within `*addr`. |
74 | | bit: BitIdx<T::Mem>, |
75 | | /// The ordering used to select the bit at `head` in `*addr`. |
76 | | _or: PhantomData<O>, |
77 | | } |
78 | | |
79 | | impl<M, T, O> BitPtr<M, T, O> |
80 | | where |
81 | | M: Mutability, |
82 | | T: BitStore, |
83 | | O: BitOrder, |
84 | | { |
85 | | /// The canonical dangling pointer. This selects the starting bit of the |
86 | | /// canonical dangling pointer for `T`. |
87 | | pub const DANGLING: Self = Self { |
88 | | ptr: Address::DANGLING, |
89 | | bit: BitIdx::MIN, |
90 | | _or: PhantomData, |
91 | | }; |
92 | | |
93 | | /// Loads the address field, sidestepping any alignment problems. |
94 | | /// |
95 | | /// This is the only safe way to access `(&self).ptr`. Do not perform field |
96 | | /// access on `.ptr` through a reference except through this method. |
97 | | #[inline] |
98 | 873k | fn get_addr(&self) -> Address<M, T> { |
99 | 873k | unsafe { ptr::addr_of!(self.ptr).read_unaligned() } |
100 | 873k | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8>>::get_addr <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8, bitvec::order::Msb0>>::get_addr Line | Count | Source | 98 | 216k | fn get_addr(&self) -> Address<M, T> { | 99 | 216k | unsafe { ptr::addr_of!(self.ptr).read_unaligned() } | 100 | 216k | } |
<bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::get_addr Line | Count | Source | 98 | 217k | fn get_addr(&self) -> Address<M, T> { | 99 | 217k | unsafe { ptr::addr_of!(self.ptr).read_unaligned() } | 100 | 217k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8>>::get_addr <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::get_addr Line | Count | Source | 98 | 439k | fn get_addr(&self) -> Address<M, T> { | 99 | 439k | unsafe { ptr::addr_of!(self.ptr).read_unaligned() } | 100 | 439k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::get_addr |
101 | | |
102 | | /// Tries to construct a `BitPtr` from a memory location and a bit index. |
103 | | /// |
104 | | /// ## Parameters |
105 | | /// |
106 | | /// - `ptr`: The address of a memory element. `Address` wraps raw pointers |
107 | | /// or references, and enforces that they are not null. `BitPtr` |
108 | | /// additionally requires that the address be well-aligned to its type; |
109 | | /// misaligned addresses cause this to return an error. |
110 | | /// - `bit`: The index of the selected bit within `*ptr`. |
111 | | /// |
112 | | /// ## Returns |
113 | | /// |
114 | | /// This returns an error if `ptr` is not aligned to `T`; otherwise, it |
115 | | /// returns a new bit-pointer structure to the given element and bit. |
116 | | /// |
117 | | /// You should typically prefer to use constructors that take directly from |
118 | | /// a memory reference or pointer, such as the `TryFrom<*T>` |
119 | | /// implementations, the `From<&/mut T>` implementations, or the |
120 | | /// [`::from_ref()`], [`::from_mut()`], [`::from_slice()`], or |
121 | | /// [`::from_slice_mut()`] functions. |
122 | | /// |
123 | | /// [`::from_mut()`]: Self::from_mut |
124 | | /// [`::from_ref()`]: Self::from_ref |
125 | | /// [`::from_slice()`]: Self::from_slice |
126 | | /// [`::from_slice_mut()`]: Self::from_slice_mut |
127 | | #[inline] |
128 | 0 | pub fn new( |
129 | 0 | ptr: Address<M, T>, |
130 | 0 | bit: BitIdx<T::Mem>, |
131 | 0 | ) -> Result<Self, MisalignError<T>> { |
132 | | Ok(Self { |
133 | 0 | ptr: check_alignment(ptr)?, |
134 | 0 | bit, |
135 | 0 | ..Self::DANGLING |
136 | | }) |
137 | 0 | } |
138 | | |
139 | | /// Constructs a `BitPtr` from an address and head index, without checking |
140 | | /// the address for validity. |
141 | | /// |
142 | | /// ## Parameters |
143 | | /// |
144 | | /// - `addr`: The memory address to use in the bit-pointer. See the Safety |
145 | | /// section. |
146 | | /// - `head`: The index of the bit in `*addr` that this bit-pointer selects. |
147 | | /// |
148 | | /// ## Returns |
149 | | /// |
150 | | /// A new bit-pointer composed of the parameters. No validity checking is |
151 | | /// performed. |
152 | | /// |
153 | | /// ## Safety |
154 | | /// |
155 | | /// The `Address` type imposes a non-null requirement. `BitPtr` additionally |
156 | | /// requires that `addr` is well-aligned for `T`, and presumes that the |
157 | | /// caller has ensured this with [`bv_ptr::check_alignment`][0]. If this is |
158 | | /// not the case, then the program is incorrect, and subsequent behavior is |
159 | | /// not specified. |
160 | | /// |
161 | | /// [0]: crate::ptr::check_alignment. |
162 | | #[inline] |
163 | 1.19M | pub unsafe fn new_unchecked( |
164 | 1.19M | ptr: Address<M, T>, |
165 | 1.19M | bit: BitIdx<T::Mem>, |
166 | 1.19M | ) -> Self { |
167 | 1.19M | if cfg!(debug_assertions) { |
168 | 0 | Self::new(ptr, bit).unwrap() |
169 | | } |
170 | | else { |
171 | 1.19M | Self { |
172 | 1.19M | ptr, |
173 | 1.19M | bit, |
174 | 1.19M | ..Self::DANGLING |
175 | 1.19M | } |
176 | | } |
177 | 1.19M | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8>>::new_unchecked <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8, bitvec::order::Msb0>>::new_unchecked Line | Count | Source | 163 | 216k | pub unsafe fn new_unchecked( | 164 | 216k | ptr: Address<M, T>, | 165 | 216k | bit: BitIdx<T::Mem>, | 166 | 216k | ) -> Self { | 167 | 216k | if cfg!(debug_assertions) { | 168 | 0 | Self::new(ptr, bit).unwrap() | 169 | | } | 170 | | else { | 171 | 216k | Self { | 172 | 216k | ptr, | 173 | 216k | bit, | 174 | 216k | ..Self::DANGLING | 175 | 216k | } | 176 | | } | 177 | 216k | } |
<bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::new_unchecked Line | Count | Source | 163 | 368k | pub unsafe fn new_unchecked( | 164 | 368k | ptr: Address<M, T>, | 165 | 368k | bit: BitIdx<T::Mem>, | 166 | 368k | ) -> Self { | 167 | 368k | if cfg!(debug_assertions) { | 168 | 0 | Self::new(ptr, bit).unwrap() | 169 | | } | 170 | | else { | 171 | 368k | Self { | 172 | 368k | ptr, | 173 | 368k | bit, | 174 | 368k | ..Self::DANGLING | 175 | 368k | } | 176 | | } | 177 | 368k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8>>::new_unchecked <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::new_unchecked Line | Count | Source | 163 | 613k | pub unsafe fn new_unchecked( | 164 | 613k | ptr: Address<M, T>, | 165 | 613k | bit: BitIdx<T::Mem>, | 166 | 613k | ) -> Self { | 167 | 613k | if cfg!(debug_assertions) { | 168 | 0 | Self::new(ptr, bit).unwrap() | 169 | | } | 170 | | else { | 171 | 613k | Self { | 172 | 613k | ptr, | 173 | 613k | bit, | 174 | 613k | ..Self::DANGLING | 175 | 613k | } | 176 | | } | 177 | 613k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::new_unchecked |
178 | | |
179 | | /// Gets the address of the base storage element. |
180 | | #[inline] |
181 | 0 | pub fn address(self) -> Address<M, T> { |
182 | 0 | self.get_addr() |
183 | 0 | } |
184 | | |
185 | | /// Gets the `BitIdx` that selects the bit within the memory element. |
186 | | #[inline] |
187 | 0 | pub fn bit(self) -> BitIdx<T::Mem> { |
188 | 0 | self.bit |
189 | 0 | } |
190 | | |
191 | | /// Decomposes a bit-pointer into its element address and bit index. |
192 | | /// |
193 | | /// ## Parameters |
194 | | /// |
195 | | /// - `self` |
196 | | /// |
197 | | /// ## Returns |
198 | | /// |
199 | | /// - `.0`: The memory address in which the referent bit is located. |
200 | | /// - `.1`: The index of the referent bit in `*.0` according to the `O` type |
201 | | /// parameter. |
202 | | #[inline] |
203 | 0 | pub fn raw_parts(self) -> (Address<M, T>, BitIdx<T::Mem>) { |
204 | 0 | (self.address(), self.bit()) |
205 | 0 | } |
206 | | |
207 | | /// Converts a bit-pointer into a span descriptor by attaching a length |
208 | | /// counter (in bits). |
209 | | /// |
210 | | /// ## Parameters |
211 | | /// |
212 | | /// - `self`: The base address of the produced span. |
213 | | /// - `bits`: The length, in bits, of the span. |
214 | | /// |
215 | | /// ## Returns |
216 | | /// |
217 | | /// A span descriptor beginning at `self` and ending (exclusive) at `self + |
218 | | /// bits`. This fails if it is unable to encode the requested span into a |
219 | | /// descriptor. |
220 | 0 | pub(crate) fn span( |
221 | 0 | self, |
222 | 0 | bits: usize, |
223 | 0 | ) -> Result<BitSpan<M, T, O>, BitSpanError<T>> { |
224 | 0 | BitSpan::new(self.ptr, self.bit, bits) |
225 | 0 | } |
226 | | |
227 | | /// Converts a bit-pointer into a span descriptor, without performing |
228 | | /// encoding validity checks. |
229 | | /// |
230 | | /// ## Parameters |
231 | | /// |
232 | | /// - `self`: The base address of the produced span. |
233 | | /// - `bits`: The length, in bits, of the span. |
234 | | /// |
235 | | /// ## Returns |
236 | | /// |
237 | | /// An encoded span descriptor of `self` and `bits`. Note that no validity |
238 | | /// checks are performed! |
239 | | /// |
240 | | /// ## Safety |
241 | | /// |
242 | | /// The caller must ensure that the rules of `BitSpan::new` are not |
243 | | /// violated. Typically this method should only be used on parameters that |
244 | | /// have already passed through `BitSpan::new` and are known to be good. |
245 | 873k | pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, T, O> { |
246 | 873k | BitSpan::new_unchecked(self.get_addr(), self.bit, bits) |
247 | 873k | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8>>::span_unchecked <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8, bitvec::order::Msb0>>::span_unchecked Line | Count | Source | 245 | 216k | pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, T, O> { | 246 | 216k | BitSpan::new_unchecked(self.get_addr(), self.bit, bits) | 247 | 216k | } |
<bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::span_unchecked Line | Count | Source | 245 | 217k | pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, T, O> { | 246 | 217k | BitSpan::new_unchecked(self.get_addr(), self.bit, bits) | 247 | 217k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8>>::span_unchecked <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::span_unchecked Line | Count | Source | 245 | 439k | pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, T, O> { | 246 | 439k | BitSpan::new_unchecked(self.get_addr(), self.bit, bits) | 247 | 439k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::span_unchecked |
248 | | |
249 | | /// Produces a bit-pointer range beginning at `self` (inclusive) and ending |
250 | | /// at `self + count` (exclusive). |
251 | | /// |
252 | | /// ## Safety |
253 | | /// |
254 | | /// `self + count` must be within the same provenance region as `self`. The |
255 | | /// first bit past the end of an allocation is included in provenance |
256 | | /// regions, though it is not dereferenceable and will not be dereferenced. |
257 | | /// |
258 | | /// It is unsound to *even construct* a pointer that departs the provenance |
259 | | /// region, even if that pointer is never dereferenced! |
260 | 0 | pub(crate) unsafe fn range(self, count: usize) -> BitPtrRange<M, T, O> { |
261 | 0 | (self .. self.add(count)).into() |
262 | 0 | } |
263 | | |
264 | | /// Removes write permissions from a bit-pointer. |
265 | | #[inline] |
266 | 34.7k | pub fn to_const(self) -> BitPtr<Const, T, O> { |
267 | | let Self { |
268 | 34.7k | ptr: addr, |
269 | 34.7k | bit: head, |
270 | | .. |
271 | 34.7k | } = self; |
272 | 34.7k | BitPtr { |
273 | 34.7k | ptr: addr.immut(), |
274 | 34.7k | bit: head, |
275 | 34.7k | ..BitPtr::DANGLING |
276 | 34.7k | } |
277 | 34.7k | } <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::to_const Line | Count | Source | 266 | 34.7k | pub fn to_const(self) -> BitPtr<Const, T, O> { | 267 | | let Self { | 268 | 34.7k | ptr: addr, | 269 | 34.7k | bit: head, | 270 | | .. | 271 | 34.7k | } = self; | 272 | 34.7k | BitPtr { | 273 | 34.7k | ptr: addr.immut(), | 274 | 34.7k | bit: head, | 275 | 34.7k | ..BitPtr::DANGLING | 276 | 34.7k | } | 277 | 34.7k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::to_const |
278 | | |
279 | | /// Adds write permissions to a bit-pointer. |
280 | | /// |
281 | | /// ## Safety |
282 | | /// |
283 | | /// This pointer must have been derived from a `*mut` pointer. |
284 | | #[inline] |
285 | 0 | pub unsafe fn to_mut(self) -> BitPtr<Mut, T, O> { |
286 | | let Self { |
287 | 0 | ptr: addr, |
288 | 0 | bit: head, |
289 | | .. |
290 | 0 | } = self; |
291 | 0 | BitPtr { |
292 | 0 | ptr: addr.assert_mut(), |
293 | 0 | bit: head, |
294 | 0 | ..BitPtr::DANGLING |
295 | 0 | } |
296 | 0 | } |
297 | | |
298 | | /// Freezes a bit-pointer, forbidding direct mutation. |
299 | | /// |
300 | | /// This is used as a necessary prerequisite to all mutation of memory. |
301 | | /// `BitPtr` uses an implementation scoped to `Frozen<_>` to perform |
302 | | /// alias-aware writes; see below. |
303 | 36.5k | pub(crate) fn freeze(self) -> BitPtr<Frozen<M>, T, O> { |
304 | | let Self { |
305 | 36.5k | ptr: addr, |
306 | 36.5k | bit: head, |
307 | | .. |
308 | 36.5k | } = self; |
309 | 36.5k | BitPtr { |
310 | 36.5k | ptr: addr.freeze(), |
311 | 36.5k | bit: head, |
312 | 36.5k | ..BitPtr::DANGLING |
313 | 36.5k | } |
314 | 36.5k | } <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::freeze Line | Count | Source | 303 | 36.5k | pub(crate) fn freeze(self) -> BitPtr<Frozen<M>, T, O> { | 304 | | let Self { | 305 | 36.5k | ptr: addr, | 306 | 36.5k | bit: head, | 307 | | .. | 308 | 36.5k | } = self; | 309 | 36.5k | BitPtr { | 310 | 36.5k | ptr: addr.freeze(), | 311 | 36.5k | bit: head, | 312 | 36.5k | ..BitPtr::DANGLING | 313 | 36.5k | } | 314 | 36.5k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::freeze |
315 | | } |
316 | | |
317 | | impl<T, O> BitPtr<Const, T, O> |
318 | | where |
319 | | T: BitStore, |
320 | | O: BitOrder, |
321 | | { |
322 | | /// Constructs a `BitPtr` to the zeroth bit in a single element. |
323 | | #[inline] |
324 | 0 | pub fn from_ref(elem: &T) -> Self { |
325 | 0 | unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) } |
326 | 0 | } |
327 | | |
328 | | /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a |
329 | | /// slice. |
330 | | /// |
331 | | /// This method is distinct from `Self::from_ref(&elem[0])`, because it |
332 | | /// ensures that the returned bit-pointer has provenance over the entire |
333 | | /// slice. Indexing within a slice narrows the provenance range, and makes |
334 | | /// departure from the subslice, *even within the original slice*, illegal. |
335 | | #[inline] |
336 | 118k | pub fn from_slice(slice: &[T]) -> Self { |
337 | | unsafe { |
338 | 118k | Self::new_unchecked(slice.as_ptr().into_address(), BitIdx::MIN) |
339 | | } |
340 | 118k | } <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::from_slice Line | Count | Source | 336 | 118k | pub fn from_slice(slice: &[T]) -> Self { | 337 | | unsafe { | 338 | 118k | Self::new_unchecked(slice.as_ptr().into_address(), BitIdx::MIN) | 339 | | } | 340 | 118k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, _, _>>::from_slice |
341 | | |
342 | | /// Gets a raw pointer to the memory element containing the selected bit. |
343 | | #[inline] |
344 | | #[cfg(not(tarpaulin_include))] |
345 | 0 | pub fn pointer(&self) -> *const T { |
346 | 0 | self.get_addr().to_const() |
347 | 0 | } |
348 | | } |
349 | | |
350 | | impl<T, O> BitPtr<Mut, T, O> |
351 | | where |
352 | | T: BitStore, |
353 | | O: BitOrder, |
354 | | { |
355 | | /// Constructs a mutable `BitPtr` to the zeroth bit in a single element. |
356 | | #[inline] |
357 | 0 | pub fn from_mut(elem: &mut T) -> Self { |
358 | 0 | unsafe { Self::new_unchecked(elem.into(), BitIdx::MIN) } |
359 | 0 | } |
360 | | |
361 | | /// Constructs a `BitPtr` to the zeroth bit in the zeroth element of a |
362 | | /// mutable slice. |
363 | | /// |
364 | | /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it |
365 | | /// ensures that the returned bit-pointer has provenance over the entire |
366 | | /// slice. Indexing within a slice narrows the provenance range, and makes |
367 | | /// departure from the subslice, *even within the original slice*, illegal. |
368 | | #[inline] |
369 | 0 | pub fn from_mut_slice(slice: &mut [T]) -> Self { |
370 | | unsafe { |
371 | 0 | Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN) |
372 | | } |
373 | 0 | } |
374 | | |
375 | | /// Constructs a mutable `BitPtr` to the zeroth bit in the zeroth element of |
376 | | /// a slice. |
377 | | /// |
378 | | /// This method is distinct from `Self::from_mut(&mut elem[0])`, because it |
379 | | /// ensures that the returned bit-pointer has provenance over the entire |
380 | | /// slice. Indexing within a slice narrows the provenance range, and makes |
381 | | /// departure from the subslice, *even within the original slice*, illegal. |
382 | | #[inline] |
383 | 140k | pub fn from_slice_mut(slice: &mut [T]) -> Self { |
384 | | unsafe { |
385 | 140k | Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN) |
386 | | } |
387 | 140k | } <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::from_slice_mut Line | Count | Source | 383 | 140k | pub fn from_slice_mut(slice: &mut [T]) -> Self { | 384 | | unsafe { | 385 | 140k | Self::new_unchecked(slice.as_mut_ptr().into_address(), BitIdx::MIN) | 386 | | } | 387 | 140k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, _, _>>::from_slice_mut |
388 | | |
389 | | /// Gets a raw pointer to the memory location containing the selected bit. |
390 | | #[inline] |
391 | | #[cfg(not(tarpaulin_include))] |
392 | 0 | pub fn pointer(&self) -> *mut T { |
393 | 0 | self.get_addr().to_mut() |
394 | 0 | } |
395 | | } |
396 | | |
397 | | /// Port of the `*bool` inherent API. |
398 | | impl<M, T, O> BitPtr<M, T, O> |
399 | | where |
400 | | M: Mutability, |
401 | | T: BitStore, |
402 | | O: BitOrder, |
403 | | { |
404 | | /// Tests if a bit-pointer is the null value. |
405 | | /// |
406 | | /// This is always false, as a `BitPtr` is a `NonNull` internally. Use |
407 | | /// `Option<BitPtr>` to express the potential for a null pointer. |
408 | | /// |
409 | | /// ## Original |
410 | | /// |
411 | | /// [`pointer::is_null`](https://doc.rust-lang.org/std/primitive.pointer.html#method.is_null) |
412 | | #[inline] |
413 | | #[deprecated = "`BitPtr` is never null"] |
414 | 0 | pub fn is_null(self) -> bool { |
415 | 0 | false |
416 | 0 | } |
417 | | |
418 | | /// Casts to a `BitPtr` with a different storage parameter. |
419 | | /// |
420 | | /// This is not free! In order to maintain value integrity, it encodes a |
421 | | /// `BitSpan` encoded descriptor with its value, casts that, then decodes |
422 | | /// into a `BitPtr` of the target type. If `T` and `U` have different |
423 | | /// `::Mem` associated types, then this may change the selected bit in |
424 | | /// memory. This is an unavoidable cost of the addressing and encoding |
425 | | /// schemes. |
426 | | /// |
427 | | /// ## Original |
428 | | /// |
429 | | /// [`pointer::cast`](https://doc.rust-lang.org/std/primitive.pointer.html#method.cast) |
430 | | #[inline] |
431 | 0 | pub fn cast<U>(self) -> BitPtr<M, U, O> |
432 | 0 | where U: BitStore { |
433 | 0 | let (addr, head, _) = |
434 | 0 | unsafe { self.span_unchecked(1) }.cast::<U>().raw_parts(); |
435 | 0 | unsafe { BitPtr::new_unchecked(addr, head) } |
436 | 0 | } |
437 | | |
438 | | /// Decomposes a bit-pointer into its address and head-index components. |
439 | | /// |
440 | | /// ## Original |
441 | | /// |
442 | | /// [`pointer::to_raw_parts`](https://doc.rust-lang.org/std/primitive.pointer.html#method.to_raw_parts) |
443 | | /// |
444 | | /// ## API Differences |
445 | | /// |
446 | | /// The original method is unstable as of 1.54.0; however, because `BitPtr` |
447 | | /// already has a similar API, the name is optimistically stabilized here. |
448 | | /// Prefer [`.raw_parts()`] until the original inherent stabilizes. |
449 | | /// |
450 | | /// [`.raw_parts()`]: Self::raw_parts |
451 | | #[inline] |
452 | | #[cfg(not(tarpaulin_include))] |
453 | 0 | pub fn to_raw_parts(self) -> (Address<M, T>, BitIdx<T::Mem>) { |
454 | 0 | self.raw_parts() |
455 | 0 | } |
456 | | |
457 | | /// Produces a proxy reference to the referent bit. |
458 | | /// |
459 | | /// Because `BitPtr` guarantees that it is non-null and well-aligned, this |
460 | | /// never returns `None`. However, this is still unsafe to call on any |
461 | | /// bit-pointers created from conjured values rather than known references. |
462 | | /// |
463 | | /// ## Original |
464 | | /// |
465 | | /// [`pointer::as_ref`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref) |
466 | | /// |
467 | | /// ## API Differences |
468 | | /// |
469 | | /// This produces a proxy type rather than a true reference. The proxy |
470 | | /// implements `Deref<Target = bool>`, and can be converted to `&bool` with |
471 | | /// a reborrow `&*`. |
472 | | /// |
473 | | /// ## Safety |
474 | | /// |
475 | | /// Since `BitPtr` does not permit null or misaligned pointers, this method |
476 | | /// will always dereference the pointer in order to create the proxy. As |
477 | | /// such, you must ensure the following conditions are met: |
478 | | /// |
479 | | /// - the pointer must be dereferenceable as defined in the standard library |
480 | | /// documentation |
481 | | /// - the pointer must point to an initialized instance of `T` |
482 | | /// - you must ensure that no other pointer will race to modify the referent |
483 | | /// location while this call is reading from memory to produce the proxy |
484 | | /// |
485 | | /// ## Examples |
486 | | /// |
487 | | /// ```rust |
488 | | /// use bitvec::prelude::*; |
489 | | /// |
490 | | /// let data = 1u8; |
491 | | /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); |
492 | | /// let val = unsafe { ptr.as_ref() }.unwrap(); |
493 | | /// assert!(*val); |
494 | | /// ``` |
495 | | #[inline] |
496 | 34.7k | pub unsafe fn as_ref<'a>(self) -> Option<BitRef<'a, Const, T, O>> { |
497 | 34.7k | Some(BitRef::from_bitptr(self.to_const())) |
498 | 34.7k | } <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::as_ref Line | Count | Source | 496 | 34.7k | pub unsafe fn as_ref<'a>(self) -> Option<BitRef<'a, Const, T, O>> { | 497 | 34.7k | Some(BitRef::from_bitptr(self.to_const())) | 498 | 34.7k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::as_ref |
499 | | |
500 | | /// Creates a new bit-pointer at a specified offset from the original. |
501 | | /// |
502 | | /// `count` is in units of bits. |
503 | | /// |
504 | | /// ## Original |
505 | | /// |
506 | | /// [`pointer::offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset) |
507 | | /// |
508 | | /// ## Safety |
509 | | /// |
510 | | /// `BitPtr` is implemented with Rust raw pointers internally, and is |
511 | | /// subject to all of Rust’s rules about provenance and permission tracking. |
512 | | /// You must abide by the safety rules established in the original method, |
513 | | /// to which this internally delegates. |
514 | | /// |
515 | | /// Additionally, `bitvec` imposes its own rules: while Rust cannot observe |
516 | | /// provenance beyond an element or byte level, `bitvec` demands that |
517 | | /// `&mut BitSlice` have exclusive view over all bits it observes. You must |
518 | | /// not produce a bit-pointer that departs a `BitSlice` region and intrudes |
519 | | /// on any `&mut BitSlice`’s handle, and you must not produce a |
520 | | /// write-capable bit-pointer that intrudes on a `&BitSlice` handle that |
521 | | /// expects its contents to be immutable. |
522 | | /// |
523 | | /// Note that it is illegal to *construct* a bit-pointer that invalidates |
524 | | /// any of these rules. If you wish to defer safety-checking to the point of |
525 | | /// dereferencing, and allow the temporary construction *but not* |
526 | | /// *dereference* of illegal `BitPtr`s, use [`.wrapping_offset()`] instead. |
527 | | /// |
528 | | /// ## Examples |
529 | | /// |
530 | | /// ```rust |
531 | | /// use bitvec::prelude::*; |
532 | | /// |
533 | | /// let data = 5u8; |
534 | | /// let ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); |
535 | | /// unsafe { |
536 | | /// assert!(ptr.read()); |
537 | | /// assert!(!ptr.offset(1).read()); |
538 | | /// assert!(ptr.offset(2).read()); |
539 | | /// } |
540 | | /// ``` |
541 | | /// |
542 | | /// [`.wrapping_offset()`]: Self::wrapping_offset |
543 | | #[inline] |
544 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
545 | 469k | pub unsafe fn offset(self, count: isize) -> Self { |
546 | 469k | let (elts, head) = self.bit.offset(count); |
547 | 469k | Self::new_unchecked(self.ptr.offset(elts), head) |
548 | 469k | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8>>::offset <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8, bitvec::order::Msb0>>::offset Line | Count | Source | 545 | 108k | pub unsafe fn offset(self, count: isize) -> Self { | 546 | 108k | let (elts, head) = self.bit.offset(count); | 547 | 108k | Self::new_unchecked(self.ptr.offset(elts), head) | 548 | 108k | } |
<bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::offset Line | Count | Source | 545 | 113k | pub unsafe fn offset(self, count: isize) -> Self { | 546 | 113k | let (elts, head) = self.bit.offset(count); | 547 | 113k | Self::new_unchecked(self.ptr.offset(elts), head) | 548 | 113k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8>>::offset <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::offset Line | Count | Source | 545 | 247k | pub unsafe fn offset(self, count: isize) -> Self { | 546 | 247k | let (elts, head) = self.bit.offset(count); | 547 | 247k | Self::new_unchecked(self.ptr.offset(elts), head) | 548 | 247k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::offset |
549 | | |
550 | | /// Creates a new bit-pointer at a specified offset from the original. |
551 | | /// |
552 | | /// `count` is in units of bits. |
553 | | /// |
554 | | /// ## Original |
555 | | /// |
556 | | /// [`pointer::wrapping_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_offset) |
557 | | /// |
558 | | /// ## API Differences |
559 | | /// |
560 | | /// `bitvec` makes it explicitly illegal to wrap a pointer around the high |
561 | | /// end of the address space, because it is incapable of representing a null |
562 | | /// pointer. |
563 | | /// |
564 | | /// However, `<*T>::wrapping_offset` has additional properties as a result |
565 | | /// of its tolerance for wrapping the address space: it tolerates departing |
566 | | /// a provenance region, and is not unsafe to use to *create* a bit-pointer |
567 | | /// that is outside the bounds of its original provenance. |
568 | | /// |
569 | | /// ## Safety |
570 | | /// |
571 | | /// This function is safe to use because the bit-pointers it creates defer |
572 | | /// their provenance checks until the point of dereference. As such, you |
573 | | /// can safely use this to perform arbitrary pointer arithmetic that Rust |
574 | | /// considers illegal in ordinary arithmetic, as long as you do not |
575 | | /// dereference the bit-pointer until it has been brought in bounds of the |
576 | | /// originating provenance region. |
577 | | /// |
578 | | /// This means that, to the Rust rule engine, |
579 | | /// `let z = x.wrapping_add(y as usize).wrapping_sub(x as usize);` is not |
580 | | /// equivalent to `y`, but `z` is safe to construct, and |
581 | | /// `z.wrapping_add(x as usize).wrapping_sub(y as usize)` produces a |
582 | | /// bit-pointer that *is* equivalent to `x`. |
583 | | /// |
584 | | /// See the documentation of the original method for more details about |
585 | | /// provenance regions, and the distinctions that the optimizer makes about |
586 | | /// them. |
587 | | /// |
588 | | /// ## Examples |
589 | | /// |
590 | | /// ```rust |
591 | | /// use bitvec::prelude::*; |
592 | | /// |
593 | | /// let data = 0u32; |
594 | | /// let mut ptr = BitPtr::<_, _, Lsb0>::from_ref(&data); |
595 | | /// let end = ptr.wrapping_offset(32); |
596 | | /// while ptr < end { |
597 | | /// # #[cfg(feature = "std")] { |
598 | | /// println!("{}", unsafe { ptr.read() }); |
599 | | /// # } |
600 | | /// ptr = ptr.wrapping_offset(3); |
601 | | /// } |
602 | | /// ``` |
603 | | #[inline] |
604 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
605 | 0 | pub fn wrapping_offset(self, count: isize) -> Self { |
606 | 0 | let (elts, head) = self.bit.offset(count); |
607 | 0 | unsafe { Self::new_unchecked(self.ptr.wrapping_offset(elts), head) } |
608 | 0 | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::wrapping_offset Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::wrapping_offset Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::wrapping_offset |
609 | | |
610 | | /// Calculates the distance (in bits) between two bit-pointers. |
611 | | /// |
612 | | /// This method is the inverse of [`.offset()`]. |
613 | | /// |
614 | | /// ## Original |
615 | | /// |
616 | | /// [`pointer::offset_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset_from) |
617 | | /// |
618 | | /// ## API Differences |
619 | | /// |
620 | | /// The base pointer may have a different `BitStore` type parameter, as long |
621 | | /// as they share an underlying memory type. This is necessary in order to |
622 | | /// accommodate aliasing markers introduced between when an origin pointer |
623 | | /// was taken and when `self` compared against it. |
624 | | /// |
625 | | /// ## Safety |
626 | | /// |
627 | | /// Both `self` and `origin` **must** be drawn from the same provenance |
628 | | /// region. This means that they must be created from the same Rust |
629 | | /// allocation, whether with `let` or the allocator API, and must be in the |
630 | | /// (inclusive) range `base ..= base + len`. The first bit past the end of |
631 | | /// a region can be addressed, just not dereferenced. |
632 | | /// |
633 | | /// See the original `<*T>::offset_from` for more details on region safety. |
634 | | /// |
635 | | /// ## Examples |
636 | | /// |
637 | | /// ```rust |
638 | | /// use bitvec::prelude::*; |
639 | | /// |
640 | | /// let data = 0u32; |
641 | | /// let base = BitPtr::<_, _, Lsb0>::from_ref(&data); |
642 | | /// let low = unsafe { base.add(10) }; |
643 | | /// let high = unsafe { low.add(15) }; |
644 | | /// unsafe { |
645 | | /// assert_eq!(high.offset_from(low), 15); |
646 | | /// assert_eq!(low.offset_from(high), -15); |
647 | | /// assert_eq!(low.offset(15), high); |
648 | | /// assert_eq!(high.offset(-15), low); |
649 | | /// } |
650 | | /// ``` |
651 | | /// |
652 | | /// While this method is safe to *construct* bit-pointers that depart a |
653 | | /// provenance region, it remains illegal to *dereference* those pointers! |
654 | | /// |
655 | | /// This usage is incorrect, and a program that contains it is not |
656 | | /// well-formed. |
657 | | /// |
658 | | /// ```rust,no_run |
659 | | /// use bitvec::prelude::*; |
660 | | /// |
661 | | /// let a = 0u8; |
662 | | /// let b = !0u8; |
663 | | /// |
664 | | /// let a_ptr = BitPtr::<_, _, Lsb0>::from_ref(&a); |
665 | | /// let b_ptr = BitPtr::<_, _, Lsb0>::from_ref(&b); |
666 | | /// let diff = (b_ptr.pointer() as isize) |
667 | | /// .wrapping_sub(a_ptr.pointer() as isize) |
668 | | /// // Remember: raw pointers are byte-stepped, |
669 | | /// // but bit-pointers are bit-stepped. |
670 | | /// .wrapping_mul(8); |
671 | | /// // This pointer to `b` has `a`’s provenance: |
672 | | /// let b_ptr_2 = a_ptr.wrapping_offset(diff); |
673 | | /// |
674 | | /// // They are *arithmetically* equal: |
675 | | /// assert_eq!(b_ptr, b_ptr_2); |
676 | | /// // But it is still undefined behavior to cross provenances! |
677 | | /// assert_eq!(0, unsafe { b_ptr_2.offset_from(b_ptr) }); |
678 | | /// ``` |
679 | | /// |
680 | | /// [`.offset()`]: Self::offset |
681 | | #[inline] |
682 | 0 | pub unsafe fn offset_from<U>(self, origin: BitPtr<M, U, O>) -> isize |
683 | 0 | where U: BitStore<Mem = T::Mem> { |
684 | 0 | self.get_addr() |
685 | 0 | .cast::<T::Mem>() |
686 | 0 | .offset_from(origin.get_addr().cast::<T::Mem>()) |
687 | 0 | .wrapping_mul(mem::bits_of::<T::Mem>() as isize) |
688 | 0 | .wrapping_add(self.bit.into_inner() as isize) |
689 | 0 | .wrapping_sub(origin.bit.into_inner() as isize) |
690 | 0 | } |
691 | | |
692 | | /// Adjusts a bit-pointer upwards in memory. This is equivalent to |
693 | | /// `.offset(count as isize)`. |
694 | | /// |
695 | | /// `count` is in units of bits. |
696 | | /// |
697 | | /// ## Original |
698 | | /// |
699 | | /// [`pointer::add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add) |
700 | | /// |
701 | | /// ## Safety |
702 | | /// |
703 | | /// See [`.offset()`](Self::offset). |
704 | | #[inline] |
705 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
706 | 469k | pub unsafe fn add(self, count: usize) -> Self { |
707 | 469k | self.offset(count as isize) |
708 | 469k | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8>>::add <bitvec::ptr::single::BitPtr<wyz::comu::Mut, bitvec::access::BitSafeU8, bitvec::order::Msb0>>::add Line | Count | Source | 706 | 108k | pub unsafe fn add(self, count: usize) -> Self { | 707 | 108k | self.offset(count as isize) | 708 | 108k | } |
<bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::add Line | Count | Source | 706 | 113k | pub unsafe fn add(self, count: usize) -> Self { | 707 | 113k | self.offset(count as isize) | 708 | 113k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8>>::add <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::add Line | Count | Source | 706 | 247k | pub unsafe fn add(self, count: usize) -> Self { | 707 | 247k | self.offset(count as isize) | 708 | 247k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::add |
709 | | |
710 | | /// Adjusts a bit-pointer downwards in memory. This is equivalent to |
711 | | /// `.offset((count as isize).wrapping_neg())`. |
712 | | /// |
713 | | /// `count` is in units of bits. |
714 | | /// |
715 | | /// ## Original |
716 | | /// |
717 | | /// [`pointer::sub`](https://doc.rust-lang.org/std/primitive.pointer.html#method.sub) |
718 | | /// |
719 | | /// ## Safety |
720 | | /// |
721 | | /// See [`.offset()`](Self::offset). |
722 | | #[inline] |
723 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
724 | 0 | pub unsafe fn sub(self, count: usize) -> Self { |
725 | 0 | self.offset((count as isize).wrapping_neg()) |
726 | 0 | } |
727 | | |
728 | | /// Adjusts a bit-pointer upwards in memory, using wrapping semantics. This |
729 | | /// is equivalent to `.wrapping_offset(count as isize)`. |
730 | | /// |
731 | | /// `count` is in units of bits. |
732 | | /// |
733 | | /// ## Original |
734 | | /// |
735 | | /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add) |
736 | | /// |
737 | | /// ## Safety |
738 | | /// |
739 | | /// See [`.wrapping_offset()`](Self::wrapping_offset). |
740 | | #[inline] |
741 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
742 | 0 | pub fn wrapping_add(self, count: usize) -> Self { |
743 | 0 | self.wrapping_offset(count as isize) |
744 | 0 | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::wrapping_add Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::wrapping_add Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::wrapping_add |
745 | | |
746 | | /// Adjusts a bit-pointer downwards in memory, using wrapping semantics. |
747 | | /// This is equivalent to |
748 | | /// `.wrapping_offset((count as isize).wrapping_neg())`. |
749 | | /// |
750 | | /// `count` is in units of bits. |
751 | | /// |
752 | | /// ## Original |
753 | | /// |
754 | | /// [`pointer::wrapping_add`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add) |
755 | | /// |
756 | | /// ## Safety |
757 | | /// |
758 | | /// See [`.wrapping_offset()`](Self::wrapping_offset). |
759 | | #[inline] |
760 | | #[must_use = "returns a new bit-pointer rather than modifying its argument"] |
761 | 0 | pub fn wrapping_sub(self, count: usize) -> Self { |
762 | 0 | self.wrapping_offset((count as isize).wrapping_neg()) |
763 | 0 | } |
764 | | |
765 | | /// Reads the bit from `*self`. |
766 | | /// |
767 | | /// ## Original |
768 | | /// |
769 | | /// [`pointer::read`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read) |
770 | | /// |
771 | | /// ## Safety |
772 | | /// |
773 | | /// See [`ptr::read`](crate::ptr::read). |
774 | | #[inline] |
775 | 34.7k | pub unsafe fn read(self) -> bool { |
776 | 34.7k | (*self.ptr.to_const()).load_value().get_bit::<O>(self.bit) |
777 | 34.7k | } <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0>>::read Line | Count | Source | 775 | 34.7k | pub unsafe fn read(self) -> bool { | 776 | 34.7k | (*self.ptr.to_const()).load_value().get_bit::<O>(self.bit) | 777 | 34.7k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _>>::read |
778 | | |
779 | | /// Reads the bit from `*self` using a volatile load. |
780 | | /// |
781 | | /// Prefer using a crate such as [`voladdress`][0] to manage volatile I/O |
782 | | /// and use `bitvec` only on the local objects it provides. Individual I/O |
783 | | /// operations for individual bits are likely not the behavior you want. |
784 | | /// |
785 | | /// ## Original |
786 | | /// |
787 | | /// [`pointer::read_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile) |
788 | | /// |
789 | | /// ## Safety |
790 | | /// |
791 | | /// See [`ptr::read_volatile`](crate::ptr::read_volatile). |
792 | | /// |
793 | | /// [0]: https://docs.rs/voladdress/later/voladdress |
794 | | #[inline] |
795 | 0 | pub unsafe fn read_volatile(self) -> bool { |
796 | 0 | self.ptr.to_const().read_volatile().get_bit::<O>(self.bit) |
797 | 0 | } |
798 | | |
799 | | /// Reads the bit from `*self` using an unaligned memory access. |
800 | | /// |
801 | | /// `BitPtr` forbids unaligned addresses. If you have such an address, you |
802 | | /// must perform your memory accesses on the raw element, and only use |
803 | | /// `bitvec` on a well-aligned stack temporary. This method should never be |
804 | | /// necessary. |
805 | | /// |
806 | | /// ## Original |
807 | | /// |
808 | | /// [`pointer::read_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned) |
809 | | /// |
810 | | /// ## Safety |
811 | | /// |
812 | | /// See [`ptr::read_unaligned`](crate::ptr::read_unaligned) |
813 | | #[inline] |
814 | | #[deprecated = "`BitPtr` does not have unaligned addresses"] |
815 | 0 | pub unsafe fn read_unaligned(self) -> bool { |
816 | 0 | self.ptr.to_const().read_unaligned().get_bit::<O>(self.bit) |
817 | 0 | } |
818 | | |
819 | | /// Copies `count` bits from `self` to `dest`. The source and destination |
820 | | /// may overlap. |
821 | | /// |
822 | | /// Note that overlap is only defined when `O` and `O2` are the same type. |
823 | | /// If they differ, then `bitvec` does not define overlap, and assumes that |
824 | | /// they are wholly discrete in memory. |
825 | | /// |
826 | | /// ## Original |
827 | | /// |
828 | | /// [`pointer::copy_to`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to) |
829 | | /// |
830 | | /// ## Safety |
831 | | /// |
832 | | /// See [`ptr::copy`](crate::ptr::copy). |
833 | | #[inline] |
834 | | #[cfg(not(tarpaulin_include))] |
835 | 0 | pub unsafe fn copy_to<T2, O2>(self, dest: BitPtr<Mut, T2, O2>, count: usize) |
836 | 0 | where |
837 | 0 | T2: BitStore, |
838 | 0 | O2: BitOrder, |
839 | | { |
840 | 0 | super::copy(self.to_const(), dest, count); |
841 | 0 | } |
842 | | |
843 | | /// Copies `count` bits from `self` to `dest`. The source and destination |
844 | | /// may *not* overlap. |
845 | | /// |
846 | | /// ## Original |
847 | | /// |
848 | | /// [`pointer::copy_to_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to_nonoverlapping) |
849 | | /// |
850 | | /// ## Safety |
851 | | /// |
852 | | /// See [`ptr::copy_nonoverlapping`](crate::ptr::copy_nonoverlapping). |
853 | | #[inline] |
854 | | #[cfg(not(tarpaulin_include))] |
855 | 0 | pub unsafe fn copy_to_nonoverlapping<T2, O2>( |
856 | 0 | self, |
857 | 0 | dest: BitPtr<Mut, T2, O2>, |
858 | 0 | count: usize, |
859 | 0 | ) where |
860 | 0 | T2: BitStore, |
861 | 0 | O2: BitOrder, |
862 | | { |
863 | 0 | super::copy_nonoverlapping(self.to_const(), dest, count); |
864 | 0 | } |
865 | | |
866 | | /// Computes the offset (in bits) that needs to be applied to the |
867 | | /// bit-pointer in order to make it aligned to the given *byte* alignment. |
868 | | /// |
869 | | /// “Alignment” here means that the bit-pointer selects the starting bit of |
870 | | /// a memory location whose address satisfies the requested alignment. |
871 | | /// |
872 | | /// `align` is measured in **bytes**. If you wish to align your bit-pointer |
873 | | /// to a specific fraction (½, ¼, or ⅛ of one byte), please file an issue |
874 | | /// and I will work on adding this functionality. |
875 | | /// |
876 | | /// ## Original |
877 | | /// |
878 | | /// [`pointer::align_offset`](https://doc.rust-lang.org/std/primitive.pointer.html#method.align_offset) |
879 | | /// |
880 | | /// ## Notes |
881 | | /// |
882 | | /// If the base-element address of the bit-pointer is already aligned to |
883 | | /// `align`, then this will return the bit-offset required to select the |
884 | | /// first bit of the successor element. |
885 | | /// |
886 | | /// If it is not possible to align the bit-pointer, then the implementation |
887 | | /// returns `usize::MAX`. |
888 | | /// |
889 | | /// The return value is measured in bits, not `T` elements or bytes. The |
890 | | /// only thing you can do with it is pass it into [`.add()`] or |
891 | | /// [`.wrapping_add()`]. |
892 | | /// |
893 | | /// Note from the standard library: It is permissible for the implementation |
894 | | /// to *always* return `usize::MAX`. Only your algorithm’s performance can |
895 | | /// depend on getting a usable offset here; it must be correct independently |
896 | | /// of this function providing a useful value. |
897 | | /// |
898 | | /// ## Safety |
899 | | /// |
900 | | /// There are no guarantees whatsoëver that offsetting the bit-pointer will |
901 | | /// not overflow or go beyond the allocation that the bit-pointer selects. |
902 | | /// It is up to the caller to ensure that the returned offset is correct in |
903 | | /// all terms other than alignment. |
904 | | /// |
905 | | /// ## Panics |
906 | | /// |
907 | | /// This method panics if `align` is not a power of two. |
908 | | /// |
909 | | /// ## Examples |
910 | | /// |
911 | | /// ```rust |
912 | | /// use bitvec::prelude::*; |
913 | | /// |
914 | | /// let data = [0u8; 3]; |
915 | | /// let ptr = BitPtr::<_, _, Lsb0>::from_slice(&data); |
916 | | /// let ptr = unsafe { ptr.add(2) }; |
917 | | /// let count = ptr.align_offset(2); |
918 | | /// assert!(count >= 6); |
919 | | /// ``` |
920 | | /// |
921 | | /// [`.add()`]: Self::add |
922 | | /// [`.wrapping_add()`]: Self::wrapping_add |
923 | | #[inline] |
924 | 0 | pub fn align_offset(self, align: usize) -> usize { |
925 | 0 | let width = mem::bits_of::<T::Mem>(); |
926 | 0 | match ( |
927 | 0 | self.ptr.to_const().align_offset(align), |
928 | 0 | self.bit.into_inner() as usize, |
929 | 0 | ) { |
930 | 0 | (0, 0) => 0, |
931 | 0 | (0, head) => align * mem::bits_of::<u8>() - head, |
932 | 0 | (usize::MAX, _) => usize::MAX, |
933 | 0 | (elts, head) => elts.wrapping_mul(width).wrapping_sub(head), |
934 | | } |
935 | 0 | } |
936 | | } |
937 | | |
938 | | /// Port of the `*mut bool` inherent API. |
939 | | impl<T, O> BitPtr<Mut, T, O> |
940 | | where |
941 | | T: BitStore, |
942 | | O: BitOrder, |
943 | | { |
944 | | /// Produces a proxy reference to the referent bit. |
945 | | /// |
946 | | /// Because `BitPtr` guarantees that it is non-null and well-aligned, this |
947 | | /// never returns `None`. However, this is still unsafe to call on any |
948 | | /// bit-pointers created from conjured values rather than known references. |
949 | | /// |
950 | | /// ## Original |
951 | | /// |
952 | | /// [`pointer::as_mut`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut) |
953 | | /// |
954 | | /// ## API Differences |
955 | | /// |
956 | | /// This produces a proxy type rather than a true reference. The proxy |
957 | | /// implements `DerefMut<Target = bool>`, and can be converted to |
958 | | /// `&mut bool` with a reborrow `&mut *`. |
959 | | /// |
960 | | /// Writes to the proxy are not reflected in the proxied location until the |
961 | | /// proxy is destroyed, either through `Drop` or its [`.commit()`] method. |
962 | | /// |
963 | | /// ## Safety |
964 | | /// |
965 | | /// Since `BitPtr` does not permit null or misaligned pointers, this method |
966 | | /// will always dereference the pointer in order to create the proxy. As |
967 | | /// such, you must ensure the following conditions are met: |
968 | | /// |
969 | | /// - the pointer must be dereferenceable as defined in the standard library |
970 | | /// documentation |
971 | | /// - the pointer must point to an initialized instance of `T` |
972 | | /// - you must ensure that no other pointer will race to modify the referent |
973 | | /// location while this call is reading from memory to produce the proxy |
974 | | /// - you must ensure that no other `bitvec` handle targets the referent bit |
975 | | /// |
976 | | /// ## Examples |
977 | | /// |
978 | | /// ```rust |
979 | | /// use bitvec::prelude::*; |
980 | | /// |
981 | | /// let mut data = 0u8; |
982 | | /// let ptr = BitPtr::<_, _, Lsb0>::from_mut(&mut data); |
983 | | /// let mut val = unsafe { ptr.as_mut() }.unwrap(); |
984 | | /// assert!(!*val); |
985 | | /// *val = true; |
986 | | /// assert!(*val); |
987 | | /// ``` |
988 | | /// |
989 | | /// [`.commit()`]: crate::ptr::BitRef::commit |
990 | | #[inline] |
991 | 0 | pub unsafe fn as_mut<'a>(self) -> Option<BitRef<'a, Mut, T, O>> { |
992 | 0 | Some(BitRef::from_bitptr(self)) |
993 | 0 | } |
994 | | |
995 | | /// Copies `count` bits from the region starting at `src` to the region |
996 | | /// starting at `self`. |
997 | | /// |
998 | | /// The regions are free to overlap; the implementation will detect overlap |
999 | | /// and correctly avoid it. |
1000 | | /// |
1001 | | /// Note: this has the *opposite* argument order from [`ptr::copy`]: `self` |
1002 | | /// is the destination, not the source. |
1003 | | /// |
1004 | | /// ## Original |
1005 | | /// |
1006 | | /// [`pointer::copy_from`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from) |
1007 | | /// |
1008 | | /// ## Safety |
1009 | | /// |
1010 | | /// See [`ptr::copy`]. |
1011 | | /// |
1012 | | /// [`ptr::copy`]: crate::ptr::copy |
1013 | | #[inline] |
1014 | | #[cfg(not(tarpaulin_include))] |
1015 | 0 | pub unsafe fn copy_from<T2, O2>( |
1016 | 0 | self, |
1017 | 0 | src: BitPtr<Const, T2, O2>, |
1018 | 0 | count: usize, |
1019 | 0 | ) where |
1020 | 0 | T2: BitStore, |
1021 | 0 | O2: BitOrder, |
1022 | | { |
1023 | 0 | src.copy_to(self, count); |
1024 | 0 | } |
1025 | | |
1026 | | /// Copies `count` bits from the region starting at `src` to the region |
1027 | | /// starting at `self`. |
1028 | | /// |
1029 | | /// Unlike [`.copy_from()`], the two regions may *not* overlap; this method |
1030 | | /// does not attempt to detect overlap and thus may have a slight |
1031 | | /// performance boost over the overlap-handling `.copy_from()`. |
1032 | | /// |
1033 | | /// Note: this has the *opposite* argument order from |
1034 | | /// [`ptr::copy_nonoverlapping`]: `self` is the destination, not the source. |
1035 | | /// |
1036 | | /// ## Original |
1037 | | /// |
1038 | | /// [`pointer::copy_from_nonoverlapping`](https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_from_nonoverlapping) |
1039 | | /// |
1040 | | /// ## Safety |
1041 | | /// |
1042 | | /// See [`ptr::copy_nonoverlapping`]. |
1043 | | /// |
1044 | | /// [`.copy_from()`]: Self::copy_from |
1045 | | #[inline] |
1046 | | #[cfg(not(tarpaulin_include))] |
1047 | 0 | pub unsafe fn copy_from_nonoverlapping<T2, O2>( |
1048 | 0 | self, |
1049 | 0 | src: BitPtr<Const, T2, O2>, |
1050 | 0 | count: usize, |
1051 | 0 | ) where |
1052 | 0 | T2: BitStore, |
1053 | 0 | O2: BitOrder, |
1054 | | { |
1055 | 0 | src.copy_to_nonoverlapping(self, count); |
1056 | 0 | } |
1057 | | |
1058 | | /// Runs the destructor of the referent value. |
1059 | | /// |
1060 | | /// `bool` has no destructor; this function does nothing. |
1061 | | /// |
1062 | | /// ## Original |
1063 | | /// |
1064 | | /// [`pointer::drop_in_place`](https://doc.rust-lang.org/std/primitive.pointer.html#method.drop_in_place) |
1065 | | /// |
1066 | | /// ## Safety |
1067 | | /// |
1068 | | /// See [`ptr::drop_in_place`]. |
1069 | | /// |
1070 | | /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place |
1071 | | #[inline] |
1072 | | #[deprecated = "this has no effect, and should not be called"] |
1073 | 0 | pub fn drop_in_place(self) {} |
1074 | | |
1075 | | /// Writes a new bit into the given location. |
1076 | | /// |
1077 | | /// ## Original |
1078 | | /// |
1079 | | /// [`pointer::write`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write) |
1080 | | /// |
1081 | | /// ## Safety |
1082 | | /// |
1083 | | /// See [`ptr::write`]. |
1084 | | /// |
1085 | | /// [`ptr::write`]: crate::ptr::write |
1086 | | #[inline] |
1087 | 0 | pub unsafe fn write(self, value: bool) { |
1088 | 0 | self.replace(value); |
1089 | 0 | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::write Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, _, _>>::write |
1090 | | |
1091 | | /// Writes a new bit using volatile I/O operations. |
1092 | | /// |
1093 | | /// Because processors do not generally have single-bit read or write |
1094 | | /// instructions, this must perform a volatile read of the entire memory |
1095 | | /// location, perform the write locally, then perform another volatile write |
1096 | | /// to the entire location. These three steps are guaranteed to be |
1097 | | /// sequential with respect to each other, but are not guaranteed to be |
1098 | | /// atomic. |
1099 | | /// |
1100 | | /// Volatile operations are intended to act on I/O memory, and are *only* |
1101 | | /// guaranteed not to be elided or reördered by the compiler across other |
1102 | | /// I/O operations. |
1103 | | /// |
1104 | | /// You should not use `bitvec` to act on volatile memory. You should use a |
1105 | | /// crate specialized for volatile I/O work, such as [`voladdr`], and use it |
1106 | | /// to explicitly manage the I/O and ask it to perform `bitvec` work only on |
1107 | | /// the local snapshot of a volatile location. |
1108 | | /// |
1109 | | /// ## Original |
1110 | | /// |
1111 | | /// [`pointer::write_volatile`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_volatile) |
1112 | | /// |
1113 | | /// ## Safety |
1114 | | /// |
1115 | | /// See [`ptr::write_volatile`]. |
1116 | | /// |
1117 | | /// [`ptr::write_volatile`]: crate::ptr::write_volatile |
1118 | | /// [`voladdr`]: https://docs.rs/voladdr/latest/voladdr |
1119 | | #[inline] |
1120 | | #[allow(clippy::needless_borrow)] // Clippy is wrong. |
1121 | 0 | pub unsafe fn write_volatile(self, value: bool) { |
1122 | 0 | let ptr = self.ptr.to_mut(); |
1123 | 0 | let mut tmp = ptr.read_volatile(); |
1124 | 0 | Self::new_unchecked((&mut tmp).into(), self.bit).write(value); |
1125 | 0 | ptr.write_volatile(tmp); |
1126 | 0 | } |
1127 | | |
1128 | | /// Writes a bit into memory, tolerating unaligned addresses. |
1129 | | /// |
1130 | | /// `BitPtr` does not have unaligned addresses. `BitPtr` itself is capable |
1131 | | /// of operating on misaligned addresses, but elects to disallow use of them |
1132 | | /// in keeping with the rest of `bitvec`’s requirements. |
1133 | | /// |
1134 | | /// ## Original |
1135 | | /// |
1136 | | /// [`pointer::write_unaligned`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_unaligned) |
1137 | | /// |
1138 | | /// ## Safety |
1139 | | /// |
1140 | | /// See [`ptr::write_unaligned`]. |
1141 | | /// |
1142 | | /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned |
1143 | | #[inline] |
1144 | | #[allow(clippy::needless_borrow)] // Clippy is wrong. |
1145 | | #[deprecated = "`BitPtr` does not have unaligned addresses"] |
1146 | 0 | pub unsafe fn write_unaligned(self, value: bool) { |
1147 | 0 | let ptr = self.ptr.to_mut(); |
1148 | 0 | let mut tmp = ptr.read_unaligned(); |
1149 | 0 | Self::new_unchecked((&mut tmp).into(), self.bit).write(value); |
1150 | 0 | ptr.write_unaligned(tmp); |
1151 | 0 | } |
1152 | | |
1153 | | /// Replaces the bit at `*self` with a new value, returning the previous |
1154 | | /// value. |
1155 | | /// |
1156 | | /// ## Original |
1157 | | /// |
1158 | | /// [`pointer::replace`](https://doc.rust-lang.org/std/primitive.pointer.html#method.replace) |
1159 | | /// |
1160 | | /// ## Safety |
1161 | | /// |
1162 | | /// See [`ptr::replace`]. |
1163 | | /// |
1164 | | /// [`ptr::replace`]: crate::ptr::replace |
1165 | | #[inline] |
1166 | 36.5k | pub unsafe fn replace(self, value: bool) -> bool { |
1167 | 36.5k | self.freeze().frozen_write_bit(value) |
1168 | 36.5k | } <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0>>::replace Line | Count | Source | 1166 | 36.5k | pub unsafe fn replace(self, value: bool) -> bool { | 1167 | 36.5k | self.freeze().frozen_write_bit(value) | 1168 | 36.5k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, _, _>>::replace |
1169 | | |
1170 | | /// Swaps the bits at two mutable locations. |
1171 | | /// |
1172 | | /// ## Original |
1173 | | /// |
1174 | | /// [`pointer::swap`](https://doc.rust-lang.org/std/primitive.pointer.html#method.swap) |
1175 | | /// |
1176 | | /// ## Safety |
1177 | | /// |
1178 | | /// See [`ptr::swap`]. |
1179 | | /// |
1180 | | /// [`ptr::swap`]: crate::ptr::swap |
1181 | | #[inline] |
1182 | 0 | pub unsafe fn swap<T2, O2>(self, with: BitPtr<Mut, T2, O2>) |
1183 | 0 | where |
1184 | 0 | T2: BitStore, |
1185 | 0 | O2: BitOrder, |
1186 | | { |
1187 | 0 | self.write(with.replace(self.read())); |
1188 | 0 | } |
1189 | | } |
1190 | | |
1191 | | impl<M, T, O> BitPtr<Frozen<M>, T, O> |
1192 | | where |
1193 | | M: Mutability, |
1194 | | T: BitStore, |
1195 | | O: BitOrder, |
1196 | | { |
1197 | | /// Writes through a bit-pointer that has had its mutability permission |
1198 | | /// removed. |
1199 | | /// |
1200 | | /// This is used to allow `BitPtr<Const, _, AliasSafe<T>>` pointers, which |
1201 | | /// are not `Mut` but may still modify memory, to do so. |
1202 | 36.5k | pub(crate) unsafe fn frozen_write_bit(self, value: bool) -> bool { |
1203 | 36.5k | (*self.ptr.cast::<T::Access>().to_const()) |
1204 | 36.5k | .write_bit::<O>(self.bit, value) |
1205 | 36.5k | } <bitvec::ptr::single::BitPtr<wyz::comu::Frozen<wyz::comu::Mut>, u8, bitvec::order::Msb0>>::frozen_write_bit Line | Count | Source | 1202 | 36.5k | pub(crate) unsafe fn frozen_write_bit(self, value: bool) -> bool { | 1203 | 36.5k | (*self.ptr.cast::<T::Access>().to_const()) | 1204 | 36.5k | .write_bit::<O>(self.bit, value) | 1205 | 36.5k | } |
Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Frozen<_>, _, _>>::frozen_write_bit |
1206 | | } |
1207 | | |
1208 | | #[cfg(not(tarpaulin_include))] |
1209 | | impl<M, T, O> Clone for BitPtr<M, T, O> |
1210 | | where |
1211 | | M: Mutability, |
1212 | | T: BitStore, |
1213 | | O: BitOrder, |
1214 | | { |
1215 | | #[inline] |
1216 | 0 | fn clone(&self) -> Self { |
1217 | 0 | Self { |
1218 | 0 | ptr: self.get_addr(), |
1219 | 0 | ..*self |
1220 | 0 | } |
1221 | 0 | } |
1222 | | } |
1223 | | |
1224 | | impl<M, T, O> Eq for BitPtr<M, T, O> |
1225 | | where |
1226 | | M: Mutability, |
1227 | | T: BitStore, |
1228 | | O: BitOrder, |
1229 | | { |
1230 | | } |
1231 | | |
1232 | | impl<M, T, O> Ord for BitPtr<M, T, O> |
1233 | | where |
1234 | | M: Mutability, |
1235 | | T: BitStore, |
1236 | | O: BitOrder, |
1237 | | { |
1238 | | #[inline] |
1239 | 0 | fn cmp(&self, other: &Self) -> cmp::Ordering { |
1240 | 0 | self.partial_cmp(other).expect( |
1241 | 0 | "BitPtr has a total ordering when type parameters are identical", |
1242 | | ) |
1243 | 0 | } |
1244 | | } |
1245 | | |
1246 | | impl<M1, M2, T1, T2, O> PartialEq<BitPtr<M2, T2, O>> for BitPtr<M1, T1, O> |
1247 | | where |
1248 | | M1: Mutability, |
1249 | | M2: Mutability, |
1250 | | T1: BitStore, |
1251 | | T2: BitStore, |
1252 | | O: BitOrder, |
1253 | | { |
1254 | | #[inline] |
1255 | 0 | fn eq(&self, other: &BitPtr<M2, T2, O>) -> bool { |
1256 | 0 | if !dvl::match_store::<T1::Mem, T2::Mem>() { |
1257 | 0 | return false; |
1258 | 0 | } |
1259 | 0 | self.get_addr().to_const() as usize |
1260 | 0 | == other.get_addr().to_const() as usize |
1261 | 0 | && self.bit.into_inner() == other.bit.into_inner() |
1262 | 0 | } Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Mut, u8, bitvec::order::Msb0> as core::cmp::PartialEq>::eq Unexecuted instantiation: <bitvec::ptr::single::BitPtr<wyz::comu::Const, u8, bitvec::order::Msb0> as core::cmp::PartialEq>::eq Unexecuted instantiation: <bitvec::ptr::single::BitPtr<_, _, _> as core::cmp::PartialEq<bitvec::ptr::single::BitPtr<_, _, _>>>::eq |
1263 | | } |
1264 | | |
1265 | | impl<M1, M2, T1, T2, O> PartialOrd<BitPtr<M2, T2, O>> for BitPtr<M1, T1, O> |
1266 | | where |
1267 | | M1: Mutability, |
1268 | | M2: Mutability, |
1269 | | T1: BitStore, |
1270 | | T2: BitStore, |
1271 | | O: BitOrder, |
1272 | | { |
1273 | | #[inline] |
1274 | 0 | fn partial_cmp(&self, other: &BitPtr<M2, T2, O>) -> Option<cmp::Ordering> { |
1275 | 0 | if !dvl::match_store::<T1::Mem, T2::Mem>() { |
1276 | 0 | return None; |
1277 | 0 | } |
1278 | 0 | match (self.get_addr().to_const() as usize) |
1279 | 0 | .cmp(&(other.get_addr().to_const() as usize)) |
1280 | | { |
1281 | | cmp::Ordering::Equal => { |
1282 | 0 | self.bit.into_inner().partial_cmp(&other.bit.into_inner()) |
1283 | | }, |
1284 | 0 | ord => Some(ord), |
1285 | | } |
1286 | 0 | } |
1287 | | } |
1288 | | |
1289 | | #[cfg(not(tarpaulin_include))] |
1290 | | impl<T, O> From<&T> for BitPtr<Const, T, O> |
1291 | | where |
1292 | | T: BitStore, |
1293 | | O: BitOrder, |
1294 | | { |
1295 | | #[inline] |
1296 | 0 | fn from(elem: &T) -> Self { |
1297 | 0 | Self::from_ref(elem) |
1298 | 0 | } |
1299 | | } |
1300 | | |
1301 | | #[cfg(not(tarpaulin_include))] |
1302 | | impl<T, O> From<&mut T> for BitPtr<Mut, T, O> |
1303 | | where |
1304 | | T: BitStore, |
1305 | | O: BitOrder, |
1306 | | { |
1307 | | #[inline] |
1308 | 0 | fn from(elem: &mut T) -> Self { |
1309 | 0 | Self::from_mut(elem) |
1310 | 0 | } |
1311 | | } |
1312 | | |
1313 | | impl<T, O> TryFrom<*const T> for BitPtr<Const, T, O> |
1314 | | where |
1315 | | T: BitStore, |
1316 | | O: BitOrder, |
1317 | | { |
1318 | | type Error = BitPtrError<T>; |
1319 | | |
1320 | | #[inline] |
1321 | 0 | fn try_from(elem: *const T) -> Result<Self, Self::Error> { |
1322 | 0 | elem.try_conv::<Address<Const, T>>()? |
1323 | 0 | .pipe(|ptr| Self::new(ptr, BitIdx::MIN))? |
1324 | 0 | .pipe(Ok) |
1325 | 0 | } |
1326 | | } |
1327 | | |
1328 | | impl<T, O> TryFrom<*mut T> for BitPtr<Mut, T, O> |
1329 | | where |
1330 | | T: BitStore, |
1331 | | O: BitOrder, |
1332 | | { |
1333 | | type Error = BitPtrError<T>; |
1334 | | |
1335 | | #[inline] |
1336 | 0 | fn try_from(elem: *mut T) -> Result<Self, Self::Error> { |
1337 | 0 | elem.try_conv::<Address<Mut, T>>()? |
1338 | 0 | .pipe(|ptr| Self::new(ptr, BitIdx::MIN))? |
1339 | 0 | .pipe(Ok) |
1340 | 0 | } |
1341 | | } |
1342 | | |
1343 | | impl<M, T, O> Debug for BitPtr<M, T, O> |
1344 | | where |
1345 | | M: Mutability, |
1346 | | T: BitStore, |
1347 | | O: BitOrder, |
1348 | | { |
1349 | | #[inline] |
1350 | 0 | fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { |
1351 | 0 | write!( |
1352 | 0 | fmt, |
1353 | 0 | "{} Bit<{}, {}>", |
1354 | | M::RENDER, |
1355 | 0 | any::type_name::<T>(), |
1356 | 0 | any::type_name::<O>(), |
1357 | 0 | )?; |
1358 | 0 | Pointer::fmt(self, fmt) |
1359 | 0 | } |
1360 | | } |
1361 | | |
1362 | | impl<M, T, O> Pointer for BitPtr<M, T, O> |
1363 | | where |
1364 | | M: Mutability, |
1365 | | T: BitStore, |
1366 | | O: BitOrder, |
1367 | | { |
1368 | | #[inline] |
1369 | 0 | fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { |
1370 | 0 | fmt.debug_tuple("") |
1371 | 0 | .field(&self.get_addr().fmt_pointer()) |
1372 | 0 | .field(&self.bit.fmt_binary()) |
1373 | 0 | .finish() |
1374 | 0 | } |
1375 | | } |
1376 | | |
1377 | | #[cfg(not(tarpaulin_include))] |
1378 | | impl<M, T, O> Hash for BitPtr<M, T, O> |
1379 | | where |
1380 | | M: Mutability, |
1381 | | T: BitStore, |
1382 | | O: BitOrder, |
1383 | | { |
1384 | | #[inline] |
1385 | 0 | fn hash<H>(&self, state: &mut H) |
1386 | 0 | where H: Hasher { |
1387 | 0 | self.get_addr().hash(state); |
1388 | 0 | self.bit.hash(state); |
1389 | 0 | } |
1390 | | } |
1391 | | |
1392 | | impl<M, T, O> Copy for BitPtr<M, T, O> |
1393 | | where |
1394 | | M: Mutability, |
1395 | | T: BitStore, |
1396 | | O: BitOrder, |
1397 | | { |
1398 | | } |
1399 | | |
1400 | | /// Errors produced by invalid bit-pointer components. |
1401 | | #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] |
1402 | | pub enum BitPtrError<T> |
1403 | | where T: BitStore |
1404 | | { |
1405 | | /// Attempted to construct a bit-pointer with the null element address. |
1406 | | Null(NullPtrError), |
1407 | | /// Attempted to construct a bit-pointer with an address not aligned for the |
1408 | | /// element type. |
1409 | | Misaligned(MisalignError<T>), |
1410 | | } |
1411 | | |
1412 | | #[cfg(not(tarpaulin_include))] |
1413 | | impl<T> From<MisalignError<T>> for BitPtrError<T> |
1414 | | where T: BitStore |
1415 | | { |
1416 | | #[inline] |
1417 | 0 | fn from(err: MisalignError<T>) -> Self { |
1418 | 0 | Self::Misaligned(err) |
1419 | 0 | } |
1420 | | } |
1421 | | |
1422 | | #[cfg(not(tarpaulin_include))] |
1423 | | impl<T> From<NullPtrError> for BitPtrError<T> |
1424 | | where T: BitStore |
1425 | | { |
1426 | | #[inline] |
1427 | 0 | fn from(err: NullPtrError) -> Self { |
1428 | 0 | Self::Null(err) |
1429 | 0 | } |
1430 | | } |
1431 | | |
1432 | | #[cfg(not(tarpaulin_include))] |
1433 | | impl<T> Display for BitPtrError<T> |
1434 | | where T: BitStore |
1435 | | { |
1436 | | #[inline] |
1437 | 0 | fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { |
1438 | 0 | match self { |
1439 | 0 | Self::Null(err) => Display::fmt(err, fmt), |
1440 | 0 | Self::Misaligned(err) => Display::fmt(err, fmt), |
1441 | | } |
1442 | 0 | } |
1443 | | } |
1444 | | |
1445 | | #[cfg(feature = "std")] |
1446 | | impl<T> std::error::Error for BitPtrError<T> where T: BitStore {} |