/rust/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.14/src/util/mod.rs
Line | Count | Source |
1 | | // Copyright 2023 The Fuchsia Authors |
2 | | // |
3 | | // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0 |
4 | | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
5 | | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
6 | | // This file may not be copied, modified, or distributed except according to |
7 | | // those terms. |
8 | | |
9 | | #[macro_use] |
10 | | mod macros; |
11 | | |
12 | | #[doc(hidden)] |
13 | | pub mod macro_util; |
14 | | |
15 | | use core::{ |
16 | | cell::UnsafeCell, |
17 | | marker::PhantomData, |
18 | | mem::{self, ManuallyDrop, MaybeUninit}, |
19 | | num::{NonZeroUsize, Wrapping}, |
20 | | ptr::NonNull, |
21 | | }; |
22 | | |
23 | | use crate::{ |
24 | | error::AlignmentError, |
25 | | pointer::invariant::{self, Invariants}, |
26 | | Unalign, |
27 | | }; |
28 | | |
29 | | /// A type which has the same layout as the type it wraps. |
30 | | /// |
31 | | /// # Safety |
32 | | /// |
33 | | /// `T: TransparentWrapper` implies that `T` has the same size as [`T::Inner`]. |
34 | | /// Further, `T: TransparentWrapper<I>` implies that: |
35 | | /// - If `T::UnsafeCellVariance = Covariant`, then `T` has `UnsafeCell`s |
36 | | /// covering the same byte ranges as `T::Inner`. |
37 | | /// - If a `T` pointer satisfies the alignment invariant `I::Alignment`, then |
38 | | /// that same pointer, cast to `T::Inner`, satisfies the alignment invariant |
39 | | /// `<T::AlignmentVariance as AlignmentVariance<I::Alignment>>::Applied`. |
40 | | /// - If a `T` pointer satisfies the validity invariant `I::Validity`, then that |
41 | | /// same pointer, cast to `T::Inner`, satisfies the validity invariant |
42 | | /// `<T::ValidityVariance as ValidityVariance<I::Validity>>::Applied`. |
43 | | /// |
44 | | /// [`T::Inner`]: TransparentWrapper::Inner |
45 | | /// [`UnsafeCell`]: core::cell::UnsafeCell |
46 | | /// [`T::AlignmentVariance`]: TransparentWrapper::AlignmentVariance |
47 | | /// [`T::ValidityVariance`]: TransparentWrapper::ValidityVariance |
48 | | #[doc(hidden)] |
49 | | pub unsafe trait TransparentWrapper<I: Invariants> { |
50 | | type Inner: ?Sized; |
51 | | |
52 | | type UnsafeCellVariance; |
53 | | type AlignmentVariance: AlignmentVariance<I::Alignment>; |
54 | | type ValidityVariance: ValidityVariance<I::Validity>; |
55 | | |
56 | | /// Casts a wrapper pointer to an inner pointer. |
57 | | /// |
58 | | /// # Safety |
59 | | /// |
60 | | /// The resulting pointer has the same address and provenance as `ptr`, and |
61 | | /// addresses the same number of bytes. |
62 | | fn cast_into_inner(ptr: *mut Self) -> *mut Self::Inner; |
63 | | |
64 | | /// Casts an inner pointer to a wrapper pointer. |
65 | | /// |
66 | | /// # Safety |
67 | | /// |
68 | | /// The resulting pointer has the same address and provenance as `ptr`, and |
69 | | /// addresses the same number of bytes. |
70 | | fn cast_from_inner(ptr: *mut Self::Inner) -> *mut Self; |
71 | | } |
72 | | |
73 | | #[allow(unreachable_pub)] |
74 | | #[doc(hidden)] |
75 | | pub trait AlignmentVariance<I: invariant::Alignment> { |
76 | | type Applied: invariant::Alignment; |
77 | | } |
78 | | |
79 | | #[allow(unreachable_pub)] |
80 | | #[doc(hidden)] |
81 | | pub trait ValidityVariance<I: invariant::Validity> { |
82 | | type Applied: invariant::Validity; |
83 | | } |
84 | | |
85 | | #[doc(hidden)] |
86 | | #[allow(missing_copy_implementations, missing_debug_implementations)] |
87 | | pub enum Covariant {} |
88 | | |
89 | | impl<I: invariant::Alignment> AlignmentVariance<I> for Covariant { |
90 | | type Applied = I; |
91 | | } |
92 | | |
93 | | impl<I: invariant::Validity> ValidityVariance<I> for Covariant { |
94 | | type Applied = I; |
95 | | } |
96 | | |
97 | | #[doc(hidden)] |
98 | | #[allow(missing_copy_implementations, missing_debug_implementations)] |
99 | | pub enum Invariant {} |
100 | | |
101 | | impl<I: invariant::Alignment> AlignmentVariance<I> for Invariant { |
102 | | type Applied = invariant::Any; |
103 | | } |
104 | | |
105 | | impl<I: invariant::Validity> ValidityVariance<I> for Invariant { |
106 | | type Applied = invariant::Any; |
107 | | } |
108 | | |
109 | | // SAFETY: |
110 | | // - Per [1], `MaybeUninit<T>` has the same size as `T`. |
111 | | // - See inline comments for other safety justifications. |
112 | | // |
113 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: |
114 | | // |
115 | | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as |
116 | | // `T` |
117 | | unsafe impl<T, I: Invariants> TransparentWrapper<I> for MaybeUninit<T> { |
118 | | type Inner = T; |
119 | | |
120 | | // SAFETY: `MaybeUninit<T>` has `UnsafeCell`s covering the same byte ranges |
121 | | // as `Inner = T`. This is not explicitly documented, but it can be |
122 | | // inferred. Per [1] in the preceding safety comment, `MaybeUninit<T>` has |
123 | | // the same size as `T`. Further, note the signature of |
124 | | // `MaybeUninit::assume_init_ref` [2]: |
125 | | // |
126 | | // pub unsafe fn assume_init_ref(&self) -> &T |
127 | | // |
128 | | // If the argument `&MaybeUninit<T>` and the returned `&T` had `UnsafeCell`s |
129 | | // at different offsets, this would be unsound. Its existence is proof that |
130 | | // this is not the case. |
131 | | // |
132 | | // [2] https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#method.assume_init_ref |
133 | | type UnsafeCellVariance = Covariant; |
134 | | // SAFETY: Per [1], `MaybeUninit<T>` has the same layout as `T`, and thus |
135 | | // has the same alignment as `T`. |
136 | | // |
137 | | // [1] Per https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#layout-1: |
138 | | // |
139 | | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and |
140 | | // ABI as `T`. |
141 | | type AlignmentVariance = Covariant; |
142 | | // SAFETY: `MaybeUninit` has no validity invariants. Thus, a valid |
143 | | // `MaybeUninit<T>` is not necessarily a valid `T`. |
144 | | type ValidityVariance = Invariant; |
145 | | |
146 | | #[inline(always)] |
147 | 0 | fn cast_into_inner(ptr: *mut MaybeUninit<T>) -> *mut T { |
148 | | // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same |
149 | | // layout as `T`. Thus, this cast preserves size. |
150 | | // |
151 | | // This cast trivially preserves provenance. |
152 | 0 | ptr.cast::<T>() |
153 | 0 | } |
154 | | |
155 | | #[inline(always)] |
156 | 0 | fn cast_from_inner(ptr: *mut T) -> *mut MaybeUninit<T> { |
157 | | // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same |
158 | | // layout as `T`. Thus, this cast preserves size. |
159 | | // |
160 | | // This cast trivially preserves provenance. |
161 | 0 | ptr.cast::<MaybeUninit<T>>() |
162 | 0 | } |
163 | | } |
164 | | |
165 | | // SAFETY: |
166 | | // - Per [1], `ManuallyDrop<T>` has the same size as `T`. |
167 | | // - See inline comments for other safety justifications. |
168 | | // |
169 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: |
170 | | // |
171 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as |
172 | | // `T` |
173 | | unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for ManuallyDrop<T> { |
174 | | type Inner = T; |
175 | | |
176 | | // SAFETY: Per [1], `ManuallyDrop<T>` has `UnsafeCell`s covering the same |
177 | | // byte ranges as `Inner = T`. |
178 | | // |
179 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: |
180 | | // |
181 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
182 | | // validity as `T`, and is subject to the same layout optimizations as |
183 | | // `T`. As a consequence, it has no effect on the assumptions that the |
184 | | // compiler makes about its contents. |
185 | | type UnsafeCellVariance = Covariant; |
186 | | // SAFETY: Per [1], `ManuallyDrop<T>` has the same layout as `T`, and thus |
187 | | // has the same alignment as `T`. |
188 | | // |
189 | | // [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: |
190 | | // |
191 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
192 | | // validity as `T` |
193 | | type AlignmentVariance = Covariant; |
194 | | |
195 | | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same bit |
196 | | // validity as `T`. |
197 | | type ValidityVariance = Covariant; |
198 | | |
199 | | #[inline(always)] |
200 | 0 | fn cast_into_inner(ptr: *mut ManuallyDrop<T>) -> *mut T { |
201 | | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same |
202 | | // layout as `T`. Thus, this cast preserves size even if `T` is unsized. |
203 | | // |
204 | | // This cast trivially preserves provenance. |
205 | | #[allow(clippy::as_conversions)] |
206 | 0 | return ptr as *mut T; |
207 | 0 | } |
208 | | |
209 | | #[inline(always)] |
210 | 0 | fn cast_from_inner(ptr: *mut T) -> *mut ManuallyDrop<T> { |
211 | | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same |
212 | | // layout as `T`. Thus, this cast preserves size even if `T` is unsized. |
213 | | // |
214 | | // This cast trivially preserves provenance. |
215 | | #[allow(clippy::as_conversions)] |
216 | 0 | return ptr as *mut ManuallyDrop<T>; |
217 | 0 | } |
218 | | } |
219 | | |
220 | | // SAFETY: |
221 | | // - Per [1], `Wrapping<T>` has the same size as `T`. |
222 | | // - See inline comments for other safety justifications. |
223 | | // |
224 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1: |
225 | | // |
226 | | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
227 | | unsafe impl<T, I: Invariants> TransparentWrapper<I> for Wrapping<T> { |
228 | | type Inner = T; |
229 | | |
230 | | // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`. Since its |
231 | | // single field (of type `T`) is public, it would be a breaking change to |
232 | | // add or remove fields. Thus, we know that `Wrapping<T>` contains a `T` (as |
233 | | // opposed to just having the same size and alignment as `T`) with no pre- |
234 | | // or post-padding. Thus, `Wrapping<T>` must have `UnsafeCell`s covering the |
235 | | // same byte ranges as `Inner = T`. |
236 | | // |
237 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1: |
238 | | // |
239 | | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
240 | | type UnsafeCellVariance = Covariant; |
241 | | // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`, and thus has |
242 | | // the same alignment as `T`. |
243 | | // |
244 | | // [1] Per https://doc.rust-lang.org/core/num/struct.Wrapping.html#layout-1: |
245 | | // |
246 | | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
247 | | type AlignmentVariance = Covariant; |
248 | | |
249 | | // SAFETY: `Wrapping<T>` has only one field, which is `pub` [2]. We are also |
250 | | // guaranteed per [1] (from the comment above) that `Wrapping<T>` has the |
251 | | // same layout as `T`. The only way for both of these to be true |
252 | | // simultaneously is for `Wrapping<T>` to have the same bit validity as `T`. |
253 | | // In particular, in order to change the bit validity, one of the following |
254 | | // would need to happen: |
255 | | // - `Wrapping` could change its `repr`, but this would violate the layout |
256 | | // guarantee. |
257 | | // - `Wrapping` could add or change its fields, but this would be a |
258 | | // stability-breaking change. |
259 | | // |
260 | | // [2] https://doc.rust-lang.org/core/num/struct.Wrapping.html |
261 | | type ValidityVariance = Covariant; |
262 | | |
263 | | #[inline(always)] |
264 | 0 | fn cast_into_inner(ptr: *mut Wrapping<T>) -> *mut T { |
265 | | // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same |
266 | | // layout as `T`. Thus, this cast preserves size. |
267 | | // |
268 | | // This cast trivially preserves provenance. |
269 | 0 | ptr.cast::<T>() |
270 | 0 | } |
271 | | |
272 | | #[inline(always)] |
273 | 0 | fn cast_from_inner(ptr: *mut T) -> *mut Wrapping<T> { |
274 | | // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same |
275 | | // layout as `T`. Thus, this cast preserves size. |
276 | | // |
277 | | // This cast trivially preserves provenance. |
278 | 0 | ptr.cast::<Wrapping<T>>() |
279 | 0 | } |
280 | | } |
281 | | |
282 | | // SAFETY: |
283 | | // - Per [1], `UnsafeCell<T>` has the same size as `T`. |
284 | | // - See inline comments for other safety justifications. |
285 | | // |
286 | | // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: |
287 | | // |
288 | | // `UnsafeCell<T>` has the same in-memory representation as its inner type |
289 | | // `T`. |
290 | | unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for UnsafeCell<T> { |
291 | | type Inner = T; |
292 | | |
293 | | // SAFETY: Since we set this to `Invariant`, we make no safety claims. |
294 | | type UnsafeCellVariance = Invariant; |
295 | | |
296 | | // SAFETY: Per [1] (from comment on impl), `Unalign<T>` has the same |
297 | | // representation as `T`, and thus has the same alignment as `T`. |
298 | | type AlignmentVariance = Covariant; |
299 | | |
300 | | // SAFETY: Per [1], `Unalign<T>` has the same bit validity as `T`. |
301 | | // Technically the term "representation" doesn't guarantee this, but the |
302 | | // subsequent sentence in the documentation makes it clear that this is the |
303 | | // intention. |
304 | | // |
305 | | // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: |
306 | | // |
307 | | // `UnsafeCell<T>` has the same in-memory representation as its inner type |
308 | | // `T`. A consequence of this guarantee is that it is possible to convert |
309 | | // between `T` and `UnsafeCell<T>`. |
310 | | type ValidityVariance = Covariant; |
311 | | |
312 | | #[inline(always)] |
313 | 0 | fn cast_into_inner(ptr: *mut UnsafeCell<T>) -> *mut T { |
314 | | // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same |
315 | | // representation as `T`. Thus, this cast preserves size. |
316 | | // |
317 | | // This cast trivially preserves provenance. |
318 | | #[allow(clippy::as_conversions)] |
319 | 0 | return ptr as *mut T; |
320 | 0 | } |
321 | | |
322 | | #[inline(always)] |
323 | 0 | fn cast_from_inner(ptr: *mut T) -> *mut UnsafeCell<T> { |
324 | | // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same |
325 | | // representation as `T`. Thus, this cast preserves size. |
326 | | // |
327 | | // This cast trivially preserves provenance. |
328 | | #[allow(clippy::as_conversions)] |
329 | 0 | return ptr as *mut UnsafeCell<T>; |
330 | 0 | } |
331 | | } |
332 | | |
333 | | // SAFETY: `Unalign<T>` promises to have the same size as `T`. |
334 | | // |
335 | | // See inline comments for other safety justifications. |
336 | | unsafe impl<T, I: Invariants> TransparentWrapper<I> for Unalign<T> { |
337 | | type Inner = T; |
338 | | |
339 | | // SAFETY: `Unalign<T>` promises to have `UnsafeCell`s covering the same |
340 | | // byte ranges as `Inner = T`. |
341 | | type UnsafeCellVariance = Covariant; |
342 | | |
343 | | // SAFETY: Since `Unalign<T>` promises to have alignment 1 regardless of |
344 | | // `T`'s alignment. Thus, an aligned pointer to `Unalign<T>` is not |
345 | | // necessarily an aligned pointer to `T`. |
346 | | type AlignmentVariance = Invariant; |
347 | | |
348 | | // SAFETY: `Unalign<T>` promises to have the same validity as `T`. |
349 | | type ValidityVariance = Covariant; |
350 | | |
351 | | #[inline(always)] |
352 | 0 | fn cast_into_inner(ptr: *mut Unalign<T>) -> *mut T { |
353 | | // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has |
354 | | // the size as `T`. Thus, this cast preserves size. |
355 | | // |
356 | | // This cast trivially preserves provenance. |
357 | 0 | ptr.cast::<T>() |
358 | 0 | } |
359 | | |
360 | | #[inline(always)] |
361 | 0 | fn cast_from_inner(ptr: *mut T) -> *mut Unalign<T> { |
362 | | // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has |
363 | | // the size as `T`. Thus, this cast preserves size. |
364 | | // |
365 | | // This cast trivially preserves provenance. |
366 | 0 | ptr.cast::<Unalign<T>>() |
367 | 0 | } |
368 | | } |
369 | | |
370 | | /// Implements `TransparentWrapper` for an atomic type. |
371 | | /// |
372 | | /// # Safety |
373 | | /// |
374 | | /// The caller promises that `$atomic` is an atomic type whose natie equivalent |
375 | | /// is `$native`. |
376 | | #[cfg(all( |
377 | | zerocopy_target_has_atomics_1_60_0, |
378 | | any( |
379 | | target_has_atomic = "8", |
380 | | target_has_atomic = "16", |
381 | | target_has_atomic = "32", |
382 | | target_has_atomic = "64", |
383 | | target_has_atomic = "ptr" |
384 | | ) |
385 | | ))] |
386 | | macro_rules! unsafe_impl_transparent_wrapper_for_atomic { |
387 | | ($(#[$attr:meta])* $(,)?) => {}; |
388 | | ($(#[$attr:meta])* $atomic:ty [$native:ty], $($atomics:ty [$natives:ty]),* $(,)?) => { |
389 | | $(#[$attr])* |
390 | | // SAFETY: See safety comment in next match arm. |
391 | | unsafe impl<I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic { |
392 | | unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]); |
393 | | } |
394 | | unsafe_impl_transparent_wrapper_for_atomic!($(#[$attr])* $($atomics [$natives],)*); |
395 | | }; |
396 | | ($(#[$attr:meta])* $tyvar:ident => $atomic:ty [$native:ty]) => { |
397 | | // We implement for `$atomic` and set `Inner = $native`. The caller has |
398 | | // promised that `$atomic` and `$native` are an atomic type and its |
399 | | // native counterpart, respectively. Per [1], `$atomic` and `$native` |
400 | | // have the same size. |
401 | | // |
402 | | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
403 | | // |
404 | | // This type has the same size and bit validity as the underlying |
405 | | // integer type |
406 | | $(#[$attr])* |
407 | | unsafe impl<$tyvar, I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic { |
408 | | unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]); |
409 | | } |
410 | | }; |
411 | | (@inner $atomic:ty [$native:ty]) => { |
412 | | type Inner = UnsafeCell<$native>; |
413 | | |
414 | | // SAFETY: It is "obvious" that each atomic type contains a single |
415 | | // `UnsafeCell` that covers all bytes of the type, but we can also prove |
416 | | // it: |
417 | | // - Since `$atomic` provides an API which permits loading and storing |
418 | | // values of type `$native` via a `&self` (shared) reference, *some* |
419 | | // interior mutation must be happening, and interior mutation can only |
420 | | // happen via `UnsafeCell`. Further, there must be enough bytes in |
421 | | // `$atomic` covered by an `UnsafeCell` to hold every possible value |
422 | | // of `$native`. |
423 | | // - Per [1], `$atomic` has the same size as `$native`. This on its own |
424 | | // isn't enough: it would still be possible for `$atomic` to store |
425 | | // `$native` using a compact representation (for `$native` types for |
426 | | // which some bit patterns are illegal). However, this is ruled out by |
427 | | // the fact that `$atomic` has the same bit validity as `$native` [1]. |
428 | | // Thus, we can conclude that every byte of `$atomic` must be covered |
429 | | // by an `UnsafeCell`. |
430 | | // |
431 | | // Thus, every byte of `$atomic` is covered by an `UnsafeCell`, and we |
432 | | // set `type Inner = UnsafeCell<$native>`. Thus, `Self` and |
433 | | // `Self::Inner` have `UnsafeCell`s covering the same byte ranges. |
434 | | // |
435 | | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
436 | | // |
437 | | // This type has the same size and bit validity as the underlying |
438 | | // integer type |
439 | | type UnsafeCellVariance = crate::util::Covariant; |
440 | | |
441 | | // SAFETY: No safety justification is required for an invariant |
442 | | // variance. |
443 | | type AlignmentVariance = crate::util::Invariant; |
444 | | |
445 | | // SAFETY: Per [1], all atomic types have the same bit validity as their |
446 | | // native counterparts. The caller has promised that `$atomic` and |
447 | | // `$native` are an atomic type and its native counterpart, |
448 | | // respectively. |
449 | | // |
450 | | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
451 | | // |
452 | | // This type has the same size and bit validity as the underlying |
453 | | // integer type |
454 | | type ValidityVariance = crate::util::Covariant; |
455 | | |
456 | | #[inline(always)] |
457 | 0 | fn cast_into_inner(ptr: *mut $atomic) -> *mut UnsafeCell<$native> { |
458 | | // SAFETY: Per [1] (from comment on impl block), `$atomic` has the |
459 | | // same size as `$native`. Thus, this cast preserves size. |
460 | | // |
461 | | // This cast trivially preserves provenance. |
462 | 0 | ptr.cast::<UnsafeCell<$native>>() |
463 | 0 | } Unexecuted instantiation: <core::sync::atomic::AtomicUsize as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicPtr<_> as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicIsize as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicU8 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicI8 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicBool as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicU16 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicI16 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicU32 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicI32 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicU64 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner Unexecuted instantiation: <core::sync::atomic::AtomicI64 as zerocopy::util::TransparentWrapper<_>>::cast_into_inner |
464 | | |
465 | | #[inline(always)] |
466 | 0 | fn cast_from_inner(ptr: *mut UnsafeCell<$native>) -> *mut $atomic { |
467 | | // SAFETY: Per [1] (from comment on impl block), `$atomic` has the |
468 | | // same size as `$native`. Thus, this cast preserves size. |
469 | | // |
470 | | // This cast trivially preserves provenance. |
471 | 0 | ptr.cast::<$atomic>() |
472 | 0 | } Unexecuted instantiation: <core::sync::atomic::AtomicUsize as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicPtr<_> as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicIsize as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicU8 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicI8 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicBool as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicU16 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicI16 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicU32 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicI32 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicU64 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner Unexecuted instantiation: <core::sync::atomic::AtomicI64 as zerocopy::util::TransparentWrapper<_>>::cast_from_inner |
473 | | }; |
474 | | } |
475 | | |
476 | | /// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the |
477 | | /// wrapped `T` is. |
478 | | pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>); |
479 | | |
480 | | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
481 | | // to be called from multiple threads. |
482 | | unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {} |
483 | | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
484 | | // to be called from multiple threads. |
485 | | unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {} |
486 | | |
487 | | impl<T: ?Sized> Default for SendSyncPhantomData<T> { |
488 | 0 | fn default() -> SendSyncPhantomData<T> { |
489 | 0 | SendSyncPhantomData(PhantomData) |
490 | 0 | } Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<usb_util::types::ConfigDescriptor>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<usb_util::types::DescriptorHeader>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<usb_util::types::DeviceDescriptor>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<usb_util::types::EndpointDescriptor>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<usb_util::types::InterfaceDescriptor>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<usb_util::types::ConfigDescriptor> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<usb_util::types::DescriptorHeader> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<usb_util::types::DeviceDescriptor> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<usb_util::types::EndpointDescriptor> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<usb_util::types::InterfaceDescriptor> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<base::sys::linux::acpi_event::AcpiGenlEvent>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<base::sys::linux::netlink::GenlMsgHdr>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlAttr>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlMsgHdr>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<base::sys::linux::acpi_event::AcpiGenlEvent> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<base::sys::linux::netlink::GenlMsgHdr> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<base::sys::linux::netlink::NlAttr> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<base::sys::linux::netlink::NlMsgHdr> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<_> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iova_range>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_info_cap_header>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iommu_type1_info_cap_iova_range_header>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserU64>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserInflight>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserGpuMapMsg>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringAddr>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringState>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemMapMsg>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemUnmapMsg>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserExternalMapMsg>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserSingleMemoryRegion>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::DeviceStateTransferParameters>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<devices::virtio::fs::read_dir::LinuxDirent64>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vfio_sys::vfio::vfio_iova_range> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vfio_sys::vfio::vfio_info_cap_header> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vfio_sys::vfio::vfio_iommu_type1_info_cap_iova_range_header> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserU64> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserConfig> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserMemory> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserInflight> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserGpuMapMsg> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserVringAddr> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserVringState> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserShmemMapMsg> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserShmemUnmapMsg> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserExternalMapMsg> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostUserSingleMemoryRegion> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::DeviceStateTransferParameters> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::fs::read_dir::LinuxDirent64> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ModeSense6> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ReportLuns> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ModeSelect6> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::WriteSame10> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::WriteSame16> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::TestUnitReady> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ReadCapacity10> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ReadCapacity16> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::SynchronizeCache10> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::ReportSupportedTMFs> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::Read6> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::Unmap> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::Read10> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::Inquiry> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<devices::virtio::scsi::commands::Write10> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<[vmm_vhost::message::VhostUserMemoryRegion]> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<fuse::sys::SecctxHeader>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<fuse::sys::Secctx>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<fuse::sys::SecctxHeader> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<fuse::sys::Secctx> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostSharedMemoryRegion>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<vmm_vhost::message::VhostSharedMemoryRegion> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::CrossDomainInitLegacy>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainInit>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainHeader>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainReadWrite>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainSendReceive>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainGetImageRequirements>> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::CrossDomainInitLegacy> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainInit> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainHeader> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainReadWrite> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainSendReceive> as core::default::Default>::default Unexecuted instantiation: <zerocopy::util::SendSyncPhantomData<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainGetImageRequirements> as core::default::Default>::default |
491 | | } |
492 | | |
493 | | impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> { |
494 | 0 | fn eq(&self, other: &Self) -> bool { |
495 | 0 | self.0.eq(&other.0) |
496 | 0 | } |
497 | | } |
498 | | |
499 | | impl<T: ?Sized> Eq for SendSyncPhantomData<T> {} |
500 | | |
501 | | pub(crate) trait AsAddress { |
502 | | fn addr(self) -> usize; |
503 | | } |
504 | | |
505 | | impl<T: ?Sized> AsAddress for &T { |
506 | | #[inline(always)] |
507 | 4.77M | fn addr(self) -> usize { |
508 | 4.77M | let ptr: *const T = self; |
509 | 4.77M | AsAddress::addr(ptr) |
510 | 4.77M | } <&[u8] as zerocopy::util::AsAddress>::addr Line | Count | Source | 507 | 4.77M | fn addr(self) -> usize { | 508 | 4.77M | let ptr: *const T = self; | 509 | 4.77M | AsAddress::addr(ptr) | 510 | 4.77M | } |
Unexecuted instantiation: <&[u8] as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <&_ as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <&[u8] as zerocopy::util::AsAddress>::addr <&[u8] as zerocopy::util::AsAddress>::addr Line | Count | Source | 507 | 1.32k | fn addr(self) -> usize { | 508 | 1.32k | let ptr: *const T = self; | 509 | 1.32k | AsAddress::addr(ptr) | 510 | 1.32k | } |
Unexecuted instantiation: <&[u8] as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <&[u8] as zerocopy::util::AsAddress>::addr |
511 | | } |
512 | | |
513 | | impl<T: ?Sized> AsAddress for &mut T { |
514 | | #[inline(always)] |
515 | 0 | fn addr(self) -> usize { |
516 | 0 | let ptr: *const T = self; |
517 | 0 | AsAddress::addr(ptr) |
518 | 0 | } |
519 | | } |
520 | | |
521 | | impl<T: ?Sized> AsAddress for NonNull<T> { |
522 | | #[inline(always)] |
523 | 0 | fn addr(self) -> usize { |
524 | 0 | AsAddress::addr(self.as_ptr()) |
525 | 0 | } |
526 | | } |
527 | | |
528 | | impl<T: ?Sized> AsAddress for *const T { |
529 | | #[inline(always)] |
530 | 4.77M | fn addr(self) -> usize { |
531 | | // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use |
532 | | // `.addr()` instead of `as usize` once it's stable, and get rid of this |
533 | | // `allow`. Currently, `as usize` is the only way to accomplish this. |
534 | | #[allow(clippy::as_conversions)] |
535 | | #[cfg_attr( |
536 | | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
537 | | allow(lossy_provenance_casts) |
538 | | )] |
539 | 4.77M | return self.cast::<()>() as usize; |
540 | 4.77M | } <*const [u8] as zerocopy::util::AsAddress>::addr Line | Count | Source | 530 | 4.77M | fn addr(self) -> usize { | 531 | | // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use | 532 | | // `.addr()` instead of `as usize` once it's stable, and get rid of this | 533 | | // `allow`. Currently, `as usize` is the only way to accomplish this. | 534 | | #[allow(clippy::as_conversions)] | 535 | | #[cfg_attr( | 536 | | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, | 537 | | allow(lossy_provenance_casts) | 538 | | )] | 539 | 4.77M | return self.cast::<()>() as usize; | 540 | 4.77M | } |
Unexecuted instantiation: <*const [u8] as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <*const _ as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <*const [u8] as zerocopy::util::AsAddress>::addr <*const [u8] as zerocopy::util::AsAddress>::addr Line | Count | Source | 530 | 1.32k | fn addr(self) -> usize { | 531 | | // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use | 532 | | // `.addr()` instead of `as usize` once it's stable, and get rid of this | 533 | | // `allow`. Currently, `as usize` is the only way to accomplish this. | 534 | | #[allow(clippy::as_conversions)] | 535 | | #[cfg_attr( | 536 | | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, | 537 | | allow(lossy_provenance_casts) | 538 | | )] | 539 | 1.32k | return self.cast::<()>() as usize; | 540 | 1.32k | } |
Unexecuted instantiation: <*const [u8] as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <*const [u8] as zerocopy::util::AsAddress>::addr |
541 | | } |
542 | | |
543 | | impl<T: ?Sized> AsAddress for *mut T { |
544 | | #[inline(always)] |
545 | 0 | fn addr(self) -> usize { |
546 | 0 | let ptr: *const T = self; |
547 | 0 | AsAddress::addr(ptr) |
548 | 0 | } Unexecuted instantiation: <*mut [u8] as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <*mut _ as zerocopy::util::AsAddress>::addr Unexecuted instantiation: <*mut [u8] as zerocopy::util::AsAddress>::addr |
549 | | } |
550 | | |
551 | | /// Validates that `t` is aligned to `align_of::<U>()`. |
552 | | #[inline(always)] |
553 | 4.77M | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { |
554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in |
555 | | // turn guarantees that this mod operation will not panic. |
556 | | #[allow(clippy::arithmetic_side_effects)] |
557 | 4.77M | let remainder = t.addr() % mem::align_of::<U>(); |
558 | 4.77M | if remainder == 0 { |
559 | 4.77M | Ok(()) |
560 | | } else { |
561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. |
562 | | // That's only possible if `align_of::<U>() > 1`. |
563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) |
564 | | } |
565 | 4.77M | } zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<usb_util::types::ConfigDescriptor>> Line | Count | Source | 553 | 22.7k | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 22.7k | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 22.7k | if remainder == 0 { | 559 | 22.7k | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 22.7k | } |
zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<usb_util::types::DescriptorHeader>> Line | Count | Source | 553 | 3.50M | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 3.50M | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 3.50M | if remainder == 0 { | 559 | 3.50M | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 3.50M | } |
zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<usb_util::types::DeviceDescriptor>> Line | Count | Source | 553 | 1.24k | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 1.24k | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 1.24k | if remainder == 0 { | 559 | 1.24k | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 1.24k | } |
zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<usb_util::types::EndpointDescriptor>> Line | Count | Source | 553 | 906k | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 906k | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 906k | if remainder == 0 { | 559 | 906k | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 906k | } |
zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<usb_util::types::InterfaceDescriptor>> Line | Count | Source | 553 | 338k | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 338k | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 338k | if remainder == 0 { | 559 | 338k | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 338k | } |
Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<base::sys::linux::acpi_event::AcpiGenlEvent>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<base::sys::linux::netlink::GenlMsgHdr>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlAttr>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlMsgHdr>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<_, _> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iova_range>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_info_cap_header>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iommu_type1_info_cap_iova_range_header>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserU64>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserInflight>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserGpuMapMsg>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringAddr>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringState>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemMapMsg>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemUnmapMsg>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserExternalMapMsg>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserSingleMemoryRegion>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::DeviceStateTransferParameters>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<devices::virtio::fs::read_dir::LinuxDirent64>> zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<fuse::sys::SecctxHeader>> Line | Count | Source | 553 | 593 | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 593 | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 593 | if remainder == 0 { | 559 | 593 | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 593 | } |
zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<fuse::sys::Secctx>> Line | Count | Source | 553 | 732 | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { | 554 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in | 555 | | // turn guarantees that this mod operation will not panic. | 556 | | #[allow(clippy::arithmetic_side_effects)] | 557 | 732 | let remainder = t.addr() % mem::align_of::<U>(); | 558 | 732 | if remainder == 0 { | 559 | 732 | Ok(()) | 560 | | } else { | 561 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. | 562 | | // That's only possible if `align_of::<U>() > 1`. | 563 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) | 564 | | } | 565 | 732 | } |
Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<vmm_vhost::message::VhostSharedMemoryRegion>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::CrossDomainInitLegacy>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainInit>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainHeader>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainReadWrite>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainSendReceive>> Unexecuted instantiation: zerocopy::util::validate_aligned_to::<&[u8], zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainGetImageRequirements>> |
566 | | |
567 | | /// Returns the bytes needed to pad `len` to the next multiple of `align`. |
568 | | /// |
569 | | /// This function assumes that align is a power of two; there are no guarantees |
570 | | /// on the answer it gives if this is not the case. |
571 | 0 | pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { |
572 | | // Abstractly, we want to compute: |
573 | | // align - (len % align). |
574 | | // Handling the case where len%align is 0. |
575 | | // Because align is a power of two, len % align = len & (align-1). |
576 | | // Guaranteed not to underflow as align is nonzero. |
577 | | #[allow(clippy::arithmetic_side_effects)] |
578 | 0 | let mask = align.get() - 1; |
579 | | |
580 | | // To efficiently subtract this value from align, we can use the bitwise complement. |
581 | | // Note that ((!len) & (align-1)) gives us a number that with (len & |
582 | | // (align-1)) sums to align-1. So subtracting 1 from x before taking the |
583 | | // complement subtracts `len` from `align`. Some quick inspection of |
584 | | // cases shows that this also handles the case where `len % align = 0` |
585 | | // correctly too: len-1 % align then equals align-1, so the complement mod |
586 | | // align will be 0, as desired. |
587 | | // |
588 | | // The following reasoning can be verified quickly by an SMT solver |
589 | | // supporting the theory of bitvectors: |
590 | | // ```smtlib |
591 | | // ; Naive implementation of padding |
592 | | // (define-fun padding1 ( |
593 | | // (len (_ BitVec 32)) |
594 | | // (align (_ BitVec 32))) (_ BitVec 32) |
595 | | // (ite |
596 | | // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32)))) |
597 | | // (_ bv0 32) |
598 | | // (bvsub align (bvand len (bvsub align (_ bv1 32)))))) |
599 | | // |
600 | | // ; The implementation below |
601 | | // (define-fun padding2 ( |
602 | | // (len (_ BitVec 32)) |
603 | | // (align (_ BitVec 32))) (_ BitVec 32) |
604 | | // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32)))) |
605 | | // |
606 | | // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool |
607 | | // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32))))) |
608 | | // |
609 | | // (declare-const len (_ BitVec 32)) |
610 | | // (declare-const align (_ BitVec 32)) |
611 | | // ; Search for a case where align is a power of two and padding2 disagrees with padding1 |
612 | | // (assert (and (is-power-of-two align) |
613 | | // (not (= (padding1 len align) (padding2 len align))))) |
614 | | // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20 |
615 | | // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20 |
616 | | // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30 |
617 | | // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30 |
618 | | // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0 |
619 | | // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0 |
620 | | // (check-sat) ; unsat, also works for 64-bit bitvectors |
621 | | // ``` |
622 | 0 | !(len.wrapping_sub(1)) & mask |
623 | 0 | } |
624 | | |
625 | | /// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align |
626 | | /// == 0`. |
627 | | /// |
628 | | /// # Panics |
629 | | /// |
630 | | /// May panic if `align` is not a power of two. Even if it doesn't panic in this |
631 | | /// case, it will produce nonsense results. |
632 | | #[inline(always)] |
633 | 0 | pub(crate) const fn round_down_to_next_multiple_of_alignment( |
634 | 0 | n: usize, |
635 | 0 | align: NonZeroUsize, |
636 | 0 | ) -> usize { |
637 | 0 | let align = align.get(); |
638 | | #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
639 | 0 | debug_assert!(align.is_power_of_two()); |
640 | | |
641 | | // Subtraction can't underflow because `align.get() >= 1`. |
642 | | #[allow(clippy::arithmetic_side_effects)] |
643 | 0 | let mask = !(align - 1); |
644 | 0 | n & mask |
645 | 0 | } |
646 | | |
647 | 0 | pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
648 | 0 | if a.get() < b.get() { |
649 | 0 | b |
650 | | } else { |
651 | 0 | a |
652 | | } |
653 | 0 | } |
654 | | |
655 | 0 | pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
656 | 0 | if a.get() > b.get() { |
657 | 0 | b |
658 | | } else { |
659 | 0 | a |
660 | | } |
661 | 0 | } |
662 | | |
663 | | /// Copies `src` into the prefix of `dst`. |
664 | | /// |
665 | | /// # Safety |
666 | | /// |
667 | | /// The caller guarantees that `src.len() <= dst.len()`. |
668 | | #[inline(always)] |
669 | 0 | pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) { |
670 | 0 | debug_assert!(src.len() <= dst.len()); |
671 | | // SAFETY: This invocation satisfies the safety contract of |
672 | | // copy_nonoverlapping [1]: |
673 | | // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes |
674 | | // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the |
675 | | // caller has promised that `src.len() <= dst.len()` |
676 | | // - `src` and `dst` are, trivially, properly aligned |
677 | | // - the region of memory beginning at `src` with a size of `src.len()` |
678 | | // bytes does not overlap with the region of memory beginning at `dst` |
679 | | // with the same size, because `dst` is derived from an exclusive |
680 | | // reference. |
681 | 0 | unsafe { |
682 | 0 | core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len()); |
683 | 0 | }; |
684 | 0 | } |
685 | | |
686 | | /// Unsafely transmutes the given `src` into a type `Dst`. |
687 | | /// |
688 | | /// # Safety |
689 | | /// |
690 | | /// The value `src` must be a valid instance of `Dst`. |
691 | | #[inline(always)] |
692 | 4.77M | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { |
693 | 4.77M | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); |
694 | | |
695 | | #[repr(C)] |
696 | | union Transmute<Src, Dst> { |
697 | | src: ManuallyDrop<Src>, |
698 | | dst: ManuallyDrop<Dst>, |
699 | | } |
700 | | |
701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` |
702 | | // fields both start at the same offset and the types of those fields are |
703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, |
704 | | // initializng `Transmute` with with `src` and then reading out `dst` is |
705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` |
706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid |
707 | | // instance of `Dst`. |
708 | | // |
709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: |
710 | | // |
711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
712 | | // validity as `T`, and is subject to the same layout optimizations as |
713 | | // `T`. |
714 | | // |
715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: |
716 | | // |
717 | | // Effectively, writing to and then reading from a union with the C |
718 | | // representation is analogous to a transmute from the type used for |
719 | | // writing to the type used for reading. |
720 | 4.77M | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } |
721 | 4.77M | } zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<usb_util::types::ConfigDescriptor>, usb_util::types::ConfigDescriptor> Line | Count | Source | 692 | 22.7k | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 22.7k | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 22.7k | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 22.7k | } |
zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<usb_util::types::DescriptorHeader>, usb_util::types::DescriptorHeader> Line | Count | Source | 692 | 3.50M | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 3.50M | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 3.50M | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 3.50M | } |
zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<usb_util::types::DeviceDescriptor>, usb_util::types::DeviceDescriptor> Line | Count | Source | 692 | 1.24k | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 1.24k | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 1.24k | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 1.24k | } |
zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<usb_util::types::EndpointDescriptor>, usb_util::types::EndpointDescriptor> Line | Count | Source | 692 | 906k | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 906k | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 906k | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 906k | } |
zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<usb_util::types::InterfaceDescriptor>, usb_util::types::InterfaceDescriptor> Line | Count | Source | 692 | 338k | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 338k | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 338k | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 338k | } |
Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<base::sys::linux::netlink::GenlMsgHdr>, base::sys::linux::netlink::GenlMsgHdr> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlAttr>, base::sys::linux::netlink::NlAttr> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<base::sys::linux::netlink::NlMsgHdr>, base::sys::linux::netlink::NlMsgHdr> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<base::sys::linux::acpi_event::AcpiGenlEvent>, base::sys::linux::acpi_event::AcpiGenlEvent> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<_, _> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserU64>, vmm_vhost::message::VhostUserU64> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserInflight>, vmm_vhost::message::VhostUserInflight> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserGpuMapMsg>, vmm_vhost::message::VhostUserGpuMapMsg> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringAddr>, vmm_vhost::message::VhostUserVringAddr> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserVringState>, vmm_vhost::message::VhostUserVringState> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemMapMsg>, vmm_vhost::message::VhostUserShmemMapMsg> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserShmemUnmapMsg>, vmm_vhost::message::VhostUserShmemUnmapMsg> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserExternalMapMsg>, vmm_vhost::message::VhostUserExternalMapMsg> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostUserSingleMemoryRegion>, vmm_vhost::message::VhostUserSingleMemoryRegion> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::DeviceStateTransferParameters>, vmm_vhost::message::DeviceStateTransferParameters> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<devices::virtio::fs::read_dir::LinuxDirent64>, devices::virtio::fs::read_dir::LinuxDirent64> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iova_range>, vfio_sys::vfio::vfio_iova_range> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_info_cap_header>, vfio_sys::vfio::vfio_info_cap_header> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vfio_sys::vfio::vfio_iommu_type1_info_cap_iova_range_header>, vfio_sys::vfio::vfio_iommu_type1_info_cap_iova_range_header> zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<fuse::sys::SecctxHeader>, fuse::sys::SecctxHeader> Line | Count | Source | 692 | 593 | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 593 | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 593 | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 593 | } |
zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<fuse::sys::Secctx>, fuse::sys::Secctx> Line | Count | Source | 692 | 732 | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { | 693 | 732 | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); | 694 | | | 695 | | #[repr(C)] | 696 | | union Transmute<Src, Dst> { | 697 | | src: ManuallyDrop<Src>, | 698 | | dst: ManuallyDrop<Dst>, | 699 | | } | 700 | | | 701 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` | 702 | | // fields both start at the same offset and the types of those fields are | 703 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, | 704 | | // initializng `Transmute` with with `src` and then reading out `dst` is | 705 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` | 706 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid | 707 | | // instance of `Dst`. | 708 | | // | 709 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: | 710 | | // | 711 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit | 712 | | // validity as `T`, and is subject to the same layout optimizations as | 713 | | // `T`. | 714 | | // | 715 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: | 716 | | // | 717 | | // Effectively, writing to and then reading from a union with the C | 718 | | // representation is analogous to a transmute from the type used for | 719 | | // writing to the type used for reading. | 720 | 732 | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } | 721 | 732 | } |
Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<vmm_vhost::message::VhostSharedMemoryRegion>, vmm_vhost::message::VhostSharedMemoryRegion> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainInit>, rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainInit> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainHeader>, rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainHeader> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainReadWrite>, rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainReadWrite> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainSendReceive>, rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainSendReceive> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainGetImageRequirements>, rutabaga_gfx::cross_domain::cross_domain_protocol::CrossDomainGetImageRequirements> Unexecuted instantiation: zerocopy::util::transmute_unchecked::<zerocopy::wrappers::Unalign<rutabaga_gfx::cross_domain::CrossDomainInitLegacy>, rutabaga_gfx::cross_domain::CrossDomainInitLegacy> |
722 | | |
723 | | /// Uses `allocate` to create a `Box<T>`. |
724 | | /// |
725 | | /// # Errors |
726 | | /// |
727 | | /// Returns an error on allocation failure. Allocation failure is guaranteed |
728 | | /// never to cause a panic or an abort. |
729 | | /// |
730 | | /// # Safety |
731 | | /// |
732 | | /// `allocate` must be either `alloc::alloc::alloc` or |
733 | | /// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box` |
734 | | /// has the same bit-validity as the referent of the pointer returned by the |
735 | | /// given `allocate` and sufficient size to store `T` with `meta`. |
736 | | #[must_use = "has no side effects (other than allocation)"] |
737 | | #[cfg(feature = "alloc")] |
738 | | #[inline] |
739 | | pub(crate) unsafe fn new_box<T>( |
740 | | meta: T::PointerMetadata, |
741 | | allocate: unsafe fn(core::alloc::Layout) -> *mut u8, |
742 | | ) -> Result<alloc::boxed::Box<T>, crate::error::AllocError> |
743 | | where |
744 | | T: ?Sized + crate::KnownLayout, |
745 | | { |
746 | | use crate::error::AllocError; |
747 | | use crate::PointerMetadata; |
748 | | use core::alloc::Layout; |
749 | | |
750 | | let size = match meta.size_for_metadata(T::LAYOUT) { |
751 | | Some(size) => size, |
752 | | None => return Err(AllocError), |
753 | | }; |
754 | | |
755 | | let align = T::LAYOUT.align.get(); |
756 | | // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in |
757 | | // which sufficiently-large allocations (those which, when rounded up to the |
758 | | // alignment, overflow `isize`) are not rejected, which can cause undefined |
759 | | // behavior. See #64 for details. |
760 | | // |
761 | | // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion. |
762 | | #[allow(clippy::as_conversions)] |
763 | | let max_alloc = (isize::MAX as usize).saturating_sub(align); |
764 | | if size > max_alloc { |
765 | | return Err(AllocError); |
766 | | } |
767 | | |
768 | | // TODO(https://github.com/rust-lang/rust/issues/55724): Use |
769 | | // `Layout::repeat` once it's stabilized. |
770 | | let layout = Layout::from_size_align(size, align).or(Err(AllocError))?; |
771 | | |
772 | | let ptr = if layout.size() != 0 { |
773 | | // SAFETY: By contract on the caller, `allocate` is either |
774 | | // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above |
775 | | // check ensures their shared safety precondition: that the supplied |
776 | | // layout is not zero-sized type [1]. |
777 | | // |
778 | | // [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc: |
779 | | // |
780 | | // This function is unsafe because undefined behavior can result if |
781 | | // the caller does not ensure that layout has non-zero size. |
782 | | let ptr = unsafe { allocate(layout) }; |
783 | | match NonNull::new(ptr) { |
784 | | Some(ptr) => ptr, |
785 | | None => return Err(AllocError), |
786 | | } |
787 | | } else { |
788 | | let align = T::LAYOUT.align.get(); |
789 | | // We use `transmute` instead of an `as` cast since Miri (with strict |
790 | | // provenance enabled) notices and complains that an `as` cast creates a |
791 | | // pointer with no provenance. Miri isn't smart enough to realize that |
792 | | // we're only executing this branch when we're constructing a zero-sized |
793 | | // `Box`, which doesn't require provenance. |
794 | | // |
795 | | // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All |
796 | | // bits of a `usize` are initialized. |
797 | | #[allow(clippy::useless_transmute)] |
798 | | let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) }; |
799 | | // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a |
800 | | // `NonZeroUsize`, which is guaranteed to be non-zero. |
801 | | // |
802 | | // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is |
803 | | // zero, but it does require a non-null dangling pointer for its |
804 | | // allocation. |
805 | | // |
806 | | // TODO(https://github.com/rust-lang/rust/issues/95228): Use |
807 | | // `std::ptr::without_provenance` once it's stable. That may optimize |
808 | | // better. As written, Rust may assume that this consumes "exposed" |
809 | | // provenance, and thus Rust may have to assume that this may consume |
810 | | // provenance from any pointer whose provenance has been exposed. |
811 | | unsafe { NonNull::new_unchecked(dangling) } |
812 | | }; |
813 | | |
814 | | let ptr = T::raw_from_ptr_len(ptr, meta); |
815 | | |
816 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to |
817 | | // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST |
818 | | // case (in which we manually construct a dangling pointer) and to justify |
819 | | // why `Box` is safe to drop (it's because `allocate` uses the system |
820 | | // allocator). |
821 | | #[allow(clippy::undocumented_unsafe_blocks)] |
822 | | Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) }) |
823 | | } |
824 | | |
825 | | /// Since we support multiple versions of Rust, there are often features which |
826 | | /// have been stabilized in the most recent stable release which do not yet |
827 | | /// exist (stably) on our MSRV. This module provides polyfills for those |
828 | | /// features so that we can write more "modern" code, and just remove the |
829 | | /// polyfill once our MSRV supports the corresponding feature. Without this, |
830 | | /// we'd have to write worse/more verbose code and leave TODO comments sprinkled |
831 | | /// throughout the codebase to update to the new pattern once it's stabilized. |
832 | | /// |
833 | | /// Each trait is imported as `_` at the crate root; each polyfill should "just |
834 | | /// work" at usage sites. |
835 | | pub(crate) mod polyfills { |
836 | | use core::ptr::{self, NonNull}; |
837 | | |
838 | | // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our |
839 | | // MSRV is 1.70, when that function was stabilized. |
840 | | // |
841 | | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
842 | | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
843 | | // method rather than to this trait, and so this trait is considered unused. |
844 | | // |
845 | | // TODO(#67): Once our MSRV is 1.70, remove this. |
846 | | #[allow(unused)] |
847 | | pub(crate) trait NonNullExt<T> { |
848 | | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; |
849 | | } |
850 | | |
851 | | impl<T> NonNullExt<T> for NonNull<T> { |
852 | | // NOTE on coverage: this will never be tested in nightly since it's a |
853 | | // polyfill for a feature which has been stabilized on our nightly |
854 | | // toolchain. |
855 | | #[cfg_attr(coverage_nightly, coverage(off))] |
856 | | #[inline(always)] |
857 | 0 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { |
858 | 0 | let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); |
859 | | // SAFETY: `ptr` is converted from `data`, which is non-null. |
860 | 0 | unsafe { NonNull::new_unchecked(ptr) } |
861 | 0 | } |
862 | | } |
863 | | |
864 | | // A polyfill for `Self::unchecked_sub` that we can use until methods like |
865 | | // `usize::unchecked_sub` is stabilized. |
866 | | // |
867 | | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
868 | | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
869 | | // method rather than to this trait, and so this trait is considered unused. |
870 | | // |
871 | | // TODO(#67): Once our MSRV is high enough, remove this. |
872 | | #[allow(unused)] |
873 | | pub(crate) trait NumExt { |
874 | | /// Subtract without checking for underflow. |
875 | | /// |
876 | | /// # Safety |
877 | | /// |
878 | | /// The caller promises that the subtraction will not underflow. |
879 | | unsafe fn unchecked_sub(self, rhs: Self) -> Self; |
880 | | } |
881 | | |
882 | | impl NumExt for usize { |
883 | | // NOTE on coverage: this will never be tested in nightly since it's a |
884 | | // polyfill for a feature which has been stabilized on our nightly |
885 | | // toolchain. |
886 | | #[cfg_attr(coverage_nightly, coverage(off))] |
887 | | #[inline(always)] |
888 | 0 | unsafe fn unchecked_sub(self, rhs: usize) -> usize { |
889 | 0 | match self.checked_sub(rhs) { |
890 | 0 | Some(x) => x, |
891 | | None => { |
892 | | // SAFETY: The caller promises that the subtraction will not |
893 | | // underflow. |
894 | 0 | unsafe { core::hint::unreachable_unchecked() } |
895 | | } |
896 | | } |
897 | 0 | } |
898 | | } |
899 | | } |
900 | | |
901 | | #[cfg(test)] |
902 | | pub(crate) mod testutil { |
903 | | use crate::*; |
904 | | |
905 | | /// A `T` which is aligned to at least `align_of::<A>()`. |
906 | | #[derive(Default)] |
907 | | pub(crate) struct Align<T, A> { |
908 | | pub(crate) t: T, |
909 | | _a: [A; 0], |
910 | | } |
911 | | |
912 | | impl<T: Default, A> Align<T, A> { |
913 | | pub(crate) fn set_default(&mut self) { |
914 | | self.t = T::default(); |
915 | | } |
916 | | } |
917 | | |
918 | | impl<T, A> Align<T, A> { |
919 | | pub(crate) const fn new(t: T) -> Align<T, A> { |
920 | | Align { t, _a: [] } |
921 | | } |
922 | | } |
923 | | |
924 | | /// A `T` which is guaranteed not to satisfy `align_of::<A>()`. |
925 | | /// |
926 | | /// It must be the case that `align_of::<T>() < align_of::<A>()` in order |
927 | | /// fot this type to work properly. |
928 | | #[repr(C)] |
929 | | pub(crate) struct ForceUnalign<T: Unaligned, A> { |
930 | | // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is |
931 | | // placed at the minimum offset that guarantees its alignment. If |
932 | | // `align_of::<T>() < align_of::<A>()`, then that offset will be |
933 | | // guaranteed *not* to satisfy `align_of::<A>()`. |
934 | | // |
935 | | // Note that we need `T: Unaligned` in order to guarantee that there is |
936 | | // no padding between `_u` and `t`. |
937 | | _u: u8, |
938 | | pub(crate) t: T, |
939 | | _a: [A; 0], |
940 | | } |
941 | | |
942 | | impl<T: Unaligned, A> ForceUnalign<T, A> { |
943 | | pub(crate) fn new(t: T) -> ForceUnalign<T, A> { |
944 | | ForceUnalign { _u: 0, t, _a: [] } |
945 | | } |
946 | | } |
947 | | // A `u64` with alignment 8. |
948 | | // |
949 | | // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By |
950 | | // contrast, `AU64` is guaranteed to have alignment 8 on all platforms. |
951 | | #[derive( |
952 | | KnownLayout, |
953 | | Immutable, |
954 | | FromBytes, |
955 | | IntoBytes, |
956 | | Eq, |
957 | | PartialEq, |
958 | | Ord, |
959 | | PartialOrd, |
960 | | Default, |
961 | | Debug, |
962 | | Copy, |
963 | | Clone, |
964 | | )] |
965 | | #[repr(C, align(8))] |
966 | | pub(crate) struct AU64(pub(crate) u64); |
967 | | |
968 | | impl AU64 { |
969 | | // Converts this `AU64` to bytes using this platform's endianness. |
970 | | pub(crate) fn to_bytes(self) -> [u8; 8] { |
971 | | crate::transmute!(self) |
972 | | } |
973 | | } |
974 | | |
975 | | impl Display for AU64 { |
976 | | #[cfg_attr(coverage_nightly, coverage(off))] |
977 | | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
978 | | Display::fmt(&self.0, f) |
979 | | } |
980 | | } |
981 | | |
982 | | #[derive(Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)] |
983 | | #[repr(C)] |
984 | | pub(crate) struct Nested<T, U: ?Sized> { |
985 | | _t: T, |
986 | | _u: U, |
987 | | } |
988 | | } |
989 | | |
990 | | #[cfg(test)] |
991 | | mod tests { |
992 | | use super::*; |
993 | | |
994 | | #[test] |
995 | | fn test_round_down_to_next_multiple_of_alignment() { |
996 | | fn alt_impl(n: usize, align: NonZeroUsize) -> usize { |
997 | | let mul = n / align.get(); |
998 | | mul * align.get() |
999 | | } |
1000 | | |
1001 | | for align in [1, 2, 4, 8, 16] { |
1002 | | for n in 0..256 { |
1003 | | let align = NonZeroUsize::new(align).unwrap(); |
1004 | | let want = alt_impl(n, align); |
1005 | | let got = round_down_to_next_multiple_of_alignment(n, align); |
1006 | | assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align); |
1007 | | } |
1008 | | } |
1009 | | } |
1010 | | |
1011 | | #[rustversion::since(1.57.0)] |
1012 | | #[test] |
1013 | | #[should_panic] |
1014 | | fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() { |
1015 | | round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap()); |
1016 | | } |
1017 | | } |
1018 | | |
1019 | | #[cfg(kani)] |
1020 | | mod proofs { |
1021 | | use super::*; |
1022 | | |
1023 | | #[kani::proof] |
1024 | | fn prove_round_down_to_next_multiple_of_alignment() { |
1025 | | fn model_impl(n: usize, align: NonZeroUsize) -> usize { |
1026 | | assert!(align.get().is_power_of_two()); |
1027 | | let mul = n / align.get(); |
1028 | | mul * align.get() |
1029 | | } |
1030 | | |
1031 | | let align: NonZeroUsize = kani::any(); |
1032 | | kani::assume(align.get().is_power_of_two()); |
1033 | | let n: usize = kani::any(); |
1034 | | |
1035 | | let expected = model_impl(n, align); |
1036 | | let actual = round_down_to_next_multiple_of_alignment(n, align); |
1037 | | assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({}, {})", n, align); |
1038 | | } |
1039 | | |
1040 | | // Restricted to nightly since we use the unstable `usize::next_multiple_of` |
1041 | | // in our model implementation. |
1042 | | #[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] |
1043 | | #[kani::proof] |
1044 | | fn prove_padding_needed_for() { |
1045 | | fn model_impl(len: usize, align: NonZeroUsize) -> usize { |
1046 | | let padded = len.next_multiple_of(align.get()); |
1047 | | let padding = padded - len; |
1048 | | padding |
1049 | | } |
1050 | | |
1051 | | let align: NonZeroUsize = kani::any(); |
1052 | | kani::assume(align.get().is_power_of_two()); |
1053 | | let len: usize = kani::any(); |
1054 | | // Constrain `len` to valid Rust lengths, since our model implementation |
1055 | | // isn't robust to overflow. |
1056 | | kani::assume(len <= isize::MAX as usize); |
1057 | | kani::assume(align.get() < 1 << 29); |
1058 | | |
1059 | | let expected = model_impl(len, align); |
1060 | | let actual = padding_needed_for(len, align); |
1061 | | assert_eq!(expected, actual, "padding_needed_for({}, {})", len, align); |
1062 | | |
1063 | | let padded_len = actual + len; |
1064 | | assert_eq!(padded_len % align, 0); |
1065 | | assert!(padded_len / align >= len / align); |
1066 | | } |
1067 | | } |