/rust/registry/src/index.crates.io-6f17d22bba15001f/bitvec-1.0.1/src/order.rs
Line | Count | Source (jump to first uncovered line) |
1 | | #![doc = include_str!("../doc/order.md")] |
2 | | |
3 | | use crate::{ |
4 | | index::{ |
5 | | BitEnd, |
6 | | BitIdx, |
7 | | BitMask, |
8 | | BitPos, |
9 | | BitSel, |
10 | | }, |
11 | | mem::{ |
12 | | bits_of, |
13 | | BitRegister, |
14 | | }, |
15 | | }; |
16 | | |
17 | | #[doc = include_str!("../doc/order/BitOrder.md")] |
18 | | pub unsafe trait BitOrder: 'static { |
19 | | /// Translates a semantic bit index into a real bit position. |
20 | | /// |
21 | | /// This function is the basis of the trait, and must adhere to a number of |
22 | | /// requirements in order for an implementation to be correct. |
23 | | /// |
24 | | /// ## Type Parameters |
25 | | /// |
26 | | /// - `R`: The memory element type that the index and position govern. |
27 | | /// |
28 | | /// ## Parameters |
29 | | /// |
30 | | /// - `index`: A semantic bit-index within some `R` element. |
31 | | /// |
32 | | /// ## Returns |
33 | | /// |
34 | | /// The real position of the indexed bit within an `R` element. See the |
35 | | /// `BitPos` documentation for what these positions are considered to mean. |
36 | | /// |
37 | | /// ## Requirements |
38 | | /// |
39 | | /// This function must satisfy the following requirements for all possible |
40 | | /// input and output values, for all possible `R` type parameters: |
41 | | /// |
42 | | /// - Totality: The implementation must be able to accept every input in |
43 | | /// [`BitIdx::<R>::range_all()`], and produce some `BitPos` value for |
44 | | /// each. |
45 | | /// - Bijection: There must be an exactly one-to-one correspondence between |
46 | | /// input and output values. No input index may choose its output from a |
47 | | /// set of more than one position, and no output position may be produced |
48 | | /// by more than one input index. |
49 | | /// - Purity: The translation from index to position must be consistent for |
50 | | /// the lifetime of *at least* all data structures in the program. This |
51 | | /// function *may* refer to global state, but that state **must** be |
52 | | /// immutable while any `bitvec` data structures exist, and must not be |
53 | | /// used to violate the totality or bijection requirements. |
54 | | /// - Validity: The produced `BitPos` value must be within the valid range |
55 | | /// of its type. This is enforced by [`BitPos::new`], but not by the |
56 | | /// unsafe constructor [`BitPos::new_unchecked`]. |
57 | | /// |
58 | | /// [`BitIdx::<R>::range_all()`]: crate::index::BitIdx::range_all |
59 | | /// [`BitPos::new`]: crate::index::BitPos::new |
60 | | /// [`BitPos::new_unchecked`]: crate::index::BitPos::new_unchecked |
61 | | fn at<R>(index: BitIdx<R>) -> BitPos<R> |
62 | | where R: BitRegister; |
63 | | |
64 | | /// Produces a single-bit selection mask from a bit-index. |
65 | | /// |
66 | | /// This is an optional function: it is implemented as, and must always be |
67 | | /// exactly identical to, `BitOrder::at(index).select()`. If your ordering |
68 | | /// has a faster implementation, you may provide it, but it must be exactly |
69 | | /// numerically equivalent. |
70 | | #[inline] |
71 | 0 | fn select<R>(index: BitIdx<R>) -> BitSel<R> |
72 | 0 | where R: BitRegister { |
73 | 0 | Self::at::<R>(index).select() |
74 | 0 | } |
75 | | |
76 | | /// Produces a multi-bit selection mask from a range of bit-indices. |
77 | | /// |
78 | | /// This is an optional function: it is implemented as, and must always be |
79 | | /// exactly identical to, |
80 | | /// `BitIdx::range(from, upto).map(BitOrder::select).sum()`. If your |
81 | | /// ordering has a faster implementation, you may provide it, but it must be |
82 | | /// exactly numerically equivalent. |
83 | | /// |
84 | | /// ## Parameters |
85 | | /// |
86 | | /// - `from`: The inclusive starting value of the indices being selected. |
87 | | /// Defaults to [`BitIdx::MIN`]. |
88 | | /// - `upto`: The exclusive ending value of the indices being selected. |
89 | | /// Defaults to [`BitEnd::MAX`]. |
90 | | /// |
91 | | /// ## Returns |
92 | | /// |
93 | | /// A selection mask with all bit-positions corresponding to `from .. upto` |
94 | | /// selected. |
95 | | /// |
96 | | /// [`BitEnd::MAX`]: crate::index::BitEnd::MAX |
97 | | /// [`BitIdx::MIN`]: crate::index::BitIdx::MIN |
98 | | #[inline] |
99 | 0 | fn mask<R>( |
100 | 0 | from: impl Into<Option<BitIdx<R>>>, |
101 | 0 | upto: impl Into<Option<BitEnd<R>>>, |
102 | 0 | ) -> BitMask<R> |
103 | 0 | where |
104 | 0 | R: BitRegister, |
105 | 0 | { |
106 | 0 | let (from, upto) = match (from.into(), upto.into()) { |
107 | 0 | (None, None) => return BitMask::ALL, |
108 | 0 | (Some(from), None) => (from, BitEnd::MAX), |
109 | 0 | (None, Some(upto)) => (BitIdx::MIN, upto), |
110 | 0 | (Some(from), Some(upto)) => (from, upto), |
111 | | }; |
112 | 0 | from.range(upto).map(Self::select::<R>).sum() |
113 | 0 | } |
114 | | } |
115 | | |
116 | | #[doc = include_str!("../doc/order/Lsb0.md")] |
117 | | #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] |
118 | | pub struct Lsb0; |
119 | | |
120 | | #[doc = include_str!("../doc/order/Msb0.md")] |
121 | | #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] |
122 | | pub struct Msb0; |
123 | | |
124 | | unsafe impl BitOrder for Lsb0 { |
125 | | #[inline] |
126 | 0 | fn at<R>(index: BitIdx<R>) -> BitPos<R> |
127 | 0 | where R: BitRegister { |
128 | 0 | unsafe { BitPos::new_unchecked(index.into_inner()) } |
129 | 0 | } |
130 | | |
131 | | #[inline] |
132 | 0 | fn select<R>(index: BitIdx<R>) -> BitSel<R> |
133 | 0 | where R: BitRegister { |
134 | 0 | unsafe { BitSel::new_unchecked(R::ONE << index.into_inner()) } |
135 | 0 | } |
136 | | |
137 | | #[inline] |
138 | 0 | fn mask<R>( |
139 | 0 | from: impl Into<Option<BitIdx<R>>>, |
140 | 0 | upto: impl Into<Option<BitEnd<R>>>, |
141 | 0 | ) -> BitMask<R> |
142 | 0 | where |
143 | 0 | R: BitRegister, |
144 | 0 | { |
145 | 0 | let from = from.into().unwrap_or(BitIdx::MIN).into_inner(); |
146 | 0 | let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner(); |
147 | 0 | debug_assert!( |
148 | 0 | from <= upto, |
149 | 0 | "Ranges must run from low index ({}) to high ({})", |
150 | | from, |
151 | | upto, |
152 | | ); |
153 | 0 | let ct = upto - from; |
154 | 0 | if ct == bits_of::<R>() as u8 { |
155 | 0 | return BitMask::ALL; |
156 | 0 | } |
157 | 0 | /* This expression does the following work: |
158 | 0 | * 1. Set all bits in the mask to `1`. |
159 | 0 | * 2. Shift left by the number of bits in the mask. The mask bits are |
160 | 0 | * now at LSedge and `0`. |
161 | 0 | * 3. Invert the mask. The mask bits are now at LSedge and `1`; all |
162 | 0 | * else are `0`. |
163 | 0 | * 4. Shift left by the `from` distance from LSedge. The mask bits now |
164 | 0 | * begin at `from` left of LSedge and extend to `upto` left of |
165 | 0 | * LSedge. |
166 | 0 | */ |
167 | 0 | BitMask::new(!(R::ALL << ct) << from) |
168 | 0 | } Unexecuted instantiation: <bitvec::order::Lsb0 as bitvec::order::BitOrder>::mask::<u8, bitvec::index::BitIdx<u8>, bitvec::index::BitEnd<u8>> Unexecuted instantiation: <bitvec::order::Lsb0 as bitvec::order::BitOrder>::mask::<_, _, _> |
169 | | } |
170 | | |
171 | | unsafe impl BitOrder for Msb0 { |
172 | | #[inline] |
173 | 0 | fn at<R>(index: BitIdx<R>) -> BitPos<R> |
174 | 0 | where R: BitRegister { |
175 | 0 | unsafe { BitPos::new_unchecked(R::MASK - index.into_inner()) } |
176 | 0 | } |
177 | | |
178 | | #[inline] |
179 | 63.2k | fn select<R>(index: BitIdx<R>) -> BitSel<R> |
180 | 63.2k | where R: BitRegister { |
181 | 63.2k | /* Shift the MSbit down by the index count. This is not equivalent to |
182 | 63.2k | * the expression `1 << (mask - index)`, because that is required to |
183 | 63.2k | * perform a runtime subtraction before the shift, while this produces |
184 | 63.2k | * a constant that is shifted. |
185 | 63.2k | */ |
186 | 63.2k | let msbit: R = R::ONE << R::MASK; |
187 | 63.2k | unsafe { BitSel::new_unchecked(msbit >> index.into_inner()) } |
188 | 63.2k | } <bitvec::order::Msb0 as bitvec::order::BitOrder>::select::<u8> Line | Count | Source | 179 | 63.2k | fn select<R>(index: BitIdx<R>) -> BitSel<R> | 180 | 63.2k | where R: BitRegister { | 181 | 63.2k | /* Shift the MSbit down by the index count. This is not equivalent to | 182 | 63.2k | * the expression `1 << (mask - index)`, because that is required to | 183 | 63.2k | * perform a runtime subtraction before the shift, while this produces | 184 | 63.2k | * a constant that is shifted. | 185 | 63.2k | */ | 186 | 63.2k | let msbit: R = R::ONE << R::MASK; | 187 | 63.2k | unsafe { BitSel::new_unchecked(msbit >> index.into_inner()) } | 188 | 63.2k | } |
Unexecuted instantiation: <bitvec::order::Msb0 as bitvec::order::BitOrder>::select::<_> |
189 | | |
190 | | #[inline] |
191 | 341k | fn mask<R>( |
192 | 341k | from: impl Into<Option<BitIdx<R>>>, |
193 | 341k | upto: impl Into<Option<BitEnd<R>>>, |
194 | 341k | ) -> BitMask<R> |
195 | 341k | where |
196 | 341k | R: BitRegister, |
197 | 341k | { |
198 | 341k | let from = from.into().unwrap_or(BitIdx::MIN).into_inner(); |
199 | 341k | let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner(); |
200 | 341k | debug_assert!( |
201 | 0 | from <= upto, |
202 | 0 | "ranges must run from low index ({}) to high ({})", |
203 | | from, |
204 | | upto, |
205 | | ); |
206 | 341k | let ct = upto - from; |
207 | 341k | if ct == bits_of::<R>() as u8 { |
208 | 0 | return BitMask::ALL; |
209 | 341k | } |
210 | 341k | /* This expression does the following work: |
211 | 341k | * 1. Set all bits in the mask to `1`. |
212 | 341k | * 2. Shift right by the number of bits in the mask. The mask bits are |
213 | 341k | * now at MSedge and `0`. |
214 | 341k | * 3. Invert the mask. The mask bits are now at MSedge and `1`; all |
215 | 341k | * else are `0`. |
216 | 341k | * 4. Shift right by the `from` distance from MSedge. The mask bits |
217 | 341k | * now begin at `from` right of MSedge and extend to `upto` right |
218 | 341k | * of MSedge. |
219 | 341k | */ |
220 | 341k | BitMask::new(!(R::ALL >> ct) >> from) |
221 | 341k | } <bitvec::order::Msb0 as bitvec::order::BitOrder>::mask::<u8, bitvec::index::BitIdx<u8>, bitvec::index::BitEnd<u8>> Line | Count | Source | 191 | 341k | fn mask<R>( | 192 | 341k | from: impl Into<Option<BitIdx<R>>>, | 193 | 341k | upto: impl Into<Option<BitEnd<R>>>, | 194 | 341k | ) -> BitMask<R> | 195 | 341k | where | 196 | 341k | R: BitRegister, | 197 | 341k | { | 198 | 341k | let from = from.into().unwrap_or(BitIdx::MIN).into_inner(); | 199 | 341k | let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner(); | 200 | 341k | debug_assert!( | 201 | 0 | from <= upto, | 202 | 0 | "ranges must run from low index ({}) to high ({})", | 203 | | from, | 204 | | upto, | 205 | | ); | 206 | 341k | let ct = upto - from; | 207 | 341k | if ct == bits_of::<R>() as u8 { | 208 | 0 | return BitMask::ALL; | 209 | 341k | } | 210 | 341k | /* This expression does the following work: | 211 | 341k | * 1. Set all bits in the mask to `1`. | 212 | 341k | * 2. Shift right by the number of bits in the mask. The mask bits are | 213 | 341k | * now at MSedge and `0`. | 214 | 341k | * 3. Invert the mask. The mask bits are now at MSedge and `1`; all | 215 | 341k | * else are `0`. | 216 | 341k | * 4. Shift right by the `from` distance from MSedge. The mask bits | 217 | 341k | * now begin at `from` right of MSedge and extend to `upto` right | 218 | 341k | * of MSedge. | 219 | 341k | */ | 220 | 341k | BitMask::new(!(R::ALL >> ct) >> from) | 221 | 341k | } |
Unexecuted instantiation: <bitvec::order::Msb0 as bitvec::order::BitOrder>::mask::<_, _, _> |
222 | | } |
223 | | |
224 | | #[cfg(target_endian = "little")] |
225 | | #[doc = include_str!("../doc/order/LocalBits.md")] |
226 | | pub use self::Lsb0 as LocalBits; |
227 | | #[cfg(target_endian = "big")] |
228 | | #[doc = include_str!("../doc/order/LocalBits.md")] |
229 | | pub use self::Msb0 as LocalBits; |
230 | | |
231 | | #[cfg(not(any(target_endian = "big", target_endian = "little")))] |
232 | | compile_fail!( |
233 | | "This architecture is not supported! Please consider filing an issue" |
234 | | ); |
235 | | |
236 | | #[inline] |
237 | | #[cfg(not(tarpaulin_include))] |
238 | | #[doc = include_str!("../doc/order/verify.md")] |
239 | 0 | pub fn verify<O>(verbose: bool) |
240 | 0 | where O: BitOrder { |
241 | 0 | verify_for_type::<u8, O>(verbose); |
242 | 0 | verify_for_type::<u16, O>(verbose); |
243 | 0 | verify_for_type::<u32, O>(verbose); |
244 | 0 | verify_for_type::<usize, O>(verbose); |
245 | 0 |
|
246 | 0 | #[cfg(target_pointer_width = "64")] |
247 | 0 | verify_for_type::<u64, O>(verbose); |
248 | 0 | } |
249 | | |
250 | | /// Verification does not access memory, and is both useless and slow in Miri. |
251 | | #[cfg(miri)] |
252 | | pub fn verify_for_type<R, O>(_: bool) |
253 | | where |
254 | | R: BitRegister, |
255 | | O: BitOrder, |
256 | | { |
257 | | } |
258 | | |
259 | | #[cfg(not(miri))] |
260 | | #[doc = include_str!("../doc/order/verify_for_type.md")] |
261 | 0 | pub fn verify_for_type<R, O>(verbose: bool) |
262 | 0 | where |
263 | 0 | R: BitRegister, |
264 | 0 | O: BitOrder, |
265 | 0 | { |
266 | | use core::any::type_name; |
267 | 0 | let mut accum = BitMask::<R>::ZERO; |
268 | 0 |
|
269 | 0 | let ord_name = type_name::<O>(); |
270 | 0 | let reg_name = type_name::<R>(); |
271 | | |
272 | 0 | for n in 0 .. bits_of::<R>() as u8 { |
273 | | // Wrap the counter as an index. |
274 | 0 | let idx = unsafe { BitIdx::<R>::new_unchecked(n) }; |
275 | 0 |
|
276 | 0 | // Compute the bit position for the index. |
277 | 0 | let pos = O::at::<R>(idx); |
278 | 0 | if verbose { |
279 | 0 | #[cfg(feature = "std")] |
280 | 0 | println!( |
281 | 0 | "`<{} as BitOrder>::at::<{}>({})` produces {}", |
282 | 0 | ord_name, |
283 | 0 | reg_name, |
284 | 0 | n, |
285 | 0 | pos.into_inner(), |
286 | 0 | ); |
287 | 0 | } |
288 | | |
289 | | // If the computed position exceeds the valid range, fail. |
290 | 0 | assert!( |
291 | 0 | pos.into_inner() < bits_of::<R>() as u8, |
292 | 0 | "Error when verifying the implementation of `BitOrder` for `{}`: \ |
293 | 0 | Index {} produces a bit position ({}) that exceeds the type width \ |
294 | 0 | {}", |
295 | 0 | ord_name, |
296 | 0 | n, |
297 | 0 | pos.into_inner(), |
298 | 0 | bits_of::<R>(), |
299 | | ); |
300 | | |
301 | | // Check `O`’s implementation of `select` |
302 | 0 | let sel = O::select::<R>(idx); |
303 | 0 | if verbose { |
304 | 0 | #[cfg(feature = "std")] |
305 | 0 | println!( |
306 | 0 | "`<{} as BitOrder>::select::<{}>({})` produces {:b}", |
307 | 0 | ord_name, reg_name, n, sel, |
308 | 0 | ); |
309 | 0 | } |
310 | | |
311 | | // If the selector bit is not one-hot, fail. |
312 | 0 | assert_eq!( |
313 | 0 | sel.into_inner().count_ones(), |
314 | | 1, |
315 | 0 | "Error when verifying the implementation of `BitOrder` for `{}`: \ |
316 | 0 | Index {} produces a bit selector ({:b}) that is not a one-hot mask", |
317 | | ord_name, |
318 | | n, |
319 | | sel, |
320 | | ); |
321 | | |
322 | | // Check that the selection computed from the index matches the |
323 | | // selection computed from the position. |
324 | 0 | let shl = pos.select(); |
325 | 0 | // If `O::select(idx)` does not produce `1 << pos`, fail. |
326 | 0 | assert_eq!( |
327 | | sel, |
328 | | shl, |
329 | 0 | "Error when verifying the implementation of `BitOrder` for `{}`: \ |
330 | 0 | Index {} produces a bit selector ({:b}) that is not equal to `1 \ |
331 | 0 | << {}` ({:b})", |
332 | 0 | ord_name, |
333 | 0 | n, |
334 | 0 | sel, |
335 | 0 | pos.into_inner(), |
336 | | shl, |
337 | | ); |
338 | | |
339 | | // Check that the produced selector bit has not already been added to |
340 | | // the accumulator. |
341 | 0 | assert!( |
342 | 0 | !accum.test(sel), |
343 | 0 | "Error when verifying the implementation of `BitOrder` for `{}`: \ |
344 | 0 | Index {} produces a bit position ({}) that has already been \ |
345 | 0 | produced by a prior index", |
346 | 0 | ord_name, |
347 | 0 | n, |
348 | 0 | pos.into_inner(), |
349 | | ); |
350 | 0 | accum.insert(sel); |
351 | 0 | if verbose { |
352 | 0 | #[cfg(feature = "std")] |
353 | 0 | println!( |
354 | 0 | "`<{} as BitOrder>::at::<{}>({})` accumulates {:b}", |
355 | 0 | ord_name, reg_name, n, accum, |
356 | 0 | ); |
357 | 0 | } |
358 | | } |
359 | | |
360 | | // Check that all indices produced all positions. |
361 | 0 | assert_eq!( |
362 | 0 | accum, |
363 | 0 | BitMask::ALL, |
364 | 0 | "Error when verifying the implementation of `BitOrder` for `{}`: The \ |
365 | 0 | bit positions marked with a `0` here were never produced from an \ |
366 | 0 | index, despite all possible indices being passed in for translation: \ |
367 | 0 | {:b}", |
368 | | ord_name, |
369 | | accum, |
370 | | ); |
371 | | |
372 | | // Check that `O::mask` is correct for all range combinations. |
373 | 0 | for from in BitIdx::<R>::range_all() { |
374 | 0 | for upto in BitEnd::<R>::range_from(from) { |
375 | 0 | let mask = O::mask(from, upto); |
376 | 0 | let check = from |
377 | 0 | .range(upto) |
378 | 0 | .map(O::at) |
379 | 0 | .map(BitPos::select) |
380 | 0 | .sum::<BitMask<R>>(); |
381 | 0 | assert_eq!( |
382 | | mask, |
383 | | check, |
384 | 0 | "Error when verifying the implementation of `BitOrder` for \ |
385 | 0 | `{o}`: `{o}::mask::<{m}>({f}, {u})` produced {bad:b}, but \ |
386 | 0 | expected {good:b}", |
387 | | o = ord_name, |
388 | | m = reg_name, |
389 | | f = from, |
390 | | u = upto, |
391 | | bad = mask, |
392 | | good = check, |
393 | | ); |
394 | | } |
395 | | } |
396 | 0 | } |
397 | | |
398 | | /// An ordering that does not provide a contiguous index map or `BitField` |
399 | | /// acceleration. |
400 | | #[cfg(test)] |
401 | | pub struct HiLo; |
402 | | |
403 | | #[cfg(test)] |
404 | | unsafe impl BitOrder for HiLo { |
405 | | fn at<R>(index: BitIdx<R>) -> BitPos<R> |
406 | | where R: BitRegister { |
407 | | BitPos::new(index.into_inner() ^ 4).unwrap() |
408 | | } |
409 | | } |
410 | | |
411 | | #[cfg(test)] |
412 | | mod tests { |
413 | | use super::*; |
414 | | |
415 | | #[test] |
416 | | fn default_impl() { |
417 | | assert_eq!(Lsb0::mask(None, None), BitMask::<u8>::ALL); |
418 | | assert_eq!(Msb0::mask(None, None), BitMask::<u8>::ALL); |
419 | | assert_eq!(HiLo::mask(None, None), BitMask::<u8>::ALL); |
420 | | |
421 | | assert_eq!( |
422 | | HiLo::mask(None, BitEnd::<u8>::new(3).unwrap()), |
423 | | BitMask::new(0b0111_0000), |
424 | | ); |
425 | | assert_eq!( |
426 | | HiLo::mask(BitIdx::<u8>::new(3).unwrap(), None), |
427 | | BitMask::new(0b1000_1111), |
428 | | ); |
429 | | } |
430 | | |
431 | | // Split these out into individual test functions so they can parallelize. |
432 | | |
433 | | mod lsb0 { |
434 | | use super::*; |
435 | | |
436 | | #[test] |
437 | | fn verify_u8() { |
438 | | verify_for_type::<u8, Lsb0>(cfg!(feature = "verbose")); |
439 | | } |
440 | | |
441 | | #[test] |
442 | | #[cfg(not(tarpaulin))] |
443 | | fn verify_u16() { |
444 | | verify_for_type::<u16, Lsb0>(cfg!(feature = "verbose")); |
445 | | } |
446 | | |
447 | | #[test] |
448 | | #[cfg(not(tarpaulin))] |
449 | | fn verify_u32() { |
450 | | verify_for_type::<u32, Lsb0>(cfg!(feature = "verbose")); |
451 | | } |
452 | | |
453 | | #[test] |
454 | | #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] |
455 | | fn verify_u64() { |
456 | | verify_for_type::<u64, Lsb0>(cfg!(feature = "verbose")); |
457 | | } |
458 | | |
459 | | #[test] |
460 | | #[cfg(not(tarpaulin))] |
461 | | fn verify_usize() { |
462 | | verify_for_type::<usize, Lsb0>(cfg!(feature = "verbose")); |
463 | | } |
464 | | } |
465 | | |
466 | | mod msb0 { |
467 | | use super::*; |
468 | | |
469 | | #[test] |
470 | | fn verify_u8() { |
471 | | verify_for_type::<u8, Msb0>(cfg!(feature = "verbose")); |
472 | | } |
473 | | |
474 | | #[test] |
475 | | #[cfg(not(tarpaulin))] |
476 | | fn verify_u16() { |
477 | | verify_for_type::<u16, Msb0>(cfg!(feature = "verbose")); |
478 | | } |
479 | | |
480 | | #[test] |
481 | | #[cfg(not(tarpaulin))] |
482 | | fn verify_u32() { |
483 | | verify_for_type::<u32, Msb0>(cfg!(feature = "verbose")); |
484 | | } |
485 | | |
486 | | #[test] |
487 | | #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] |
488 | | fn verify_u64() { |
489 | | verify_for_type::<u64, Msb0>(cfg!(feature = "verbose")); |
490 | | } |
491 | | |
492 | | #[test] |
493 | | #[cfg(not(tarpaulin))] |
494 | | fn verify_usize() { |
495 | | verify_for_type::<usize, Msb0>(cfg!(feature = "verbose")); |
496 | | } |
497 | | } |
498 | | |
499 | | mod hilo { |
500 | | use super::*; |
501 | | |
502 | | #[test] |
503 | | fn verify_u8() { |
504 | | verify_for_type::<u8, HiLo>(cfg!(feature = "verbose")); |
505 | | } |
506 | | |
507 | | #[test] |
508 | | #[cfg(not(tarpaulin))] |
509 | | fn verify_u16() { |
510 | | verify_for_type::<u16, HiLo>(cfg!(feature = "verbose")); |
511 | | } |
512 | | |
513 | | #[test] |
514 | | #[cfg(not(tarpaulin))] |
515 | | fn verify_u32() { |
516 | | verify_for_type::<u32, HiLo>(cfg!(feature = "verbose")); |
517 | | } |
518 | | |
519 | | #[test] |
520 | | #[cfg(all(target_pointer_width = "64", not(tarpaulin)))] |
521 | | fn verify_u64() { |
522 | | verify_for_type::<u64, HiLo>(cfg!(feature = "verbose")); |
523 | | } |
524 | | |
525 | | #[test] |
526 | | #[cfg(not(tarpaulin))] |
527 | | fn verify_usize() { |
528 | | verify_for_type::<usize, HiLo>(cfg!(feature = "verbose")); |
529 | | } |
530 | | } |
531 | | } |