/rust/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.33/src/util/mod.rs
Line | Count | Source |
1 | | // Copyright 2023 The Fuchsia Authors |
2 | | // |
3 | | // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0 |
4 | | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
5 | | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
6 | | // This file may not be copied, modified, or distributed except according to |
7 | | // those terms. |
8 | | |
9 | | #[macro_use] |
10 | | pub(crate) mod macros; |
11 | | |
12 | | #[doc(hidden)] |
13 | | pub mod macro_util; |
14 | | |
15 | | use core::{ |
16 | | marker::PhantomData, |
17 | | mem::{self, ManuallyDrop}, |
18 | | num::NonZeroUsize, |
19 | | ptr::NonNull, |
20 | | }; |
21 | | |
22 | | use super::*; |
23 | | |
24 | | /// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the |
25 | | /// wrapped `T` is. |
26 | | pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>); |
27 | | |
28 | | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
29 | | // to be called from multiple threads. |
30 | | unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {} |
31 | | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
32 | | // to be called from multiple threads. |
33 | | unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {} |
34 | | |
35 | | impl<T: ?Sized> Default for SendSyncPhantomData<T> { |
36 | 0 | fn default() -> SendSyncPhantomData<T> { |
37 | 0 | SendSyncPhantomData(PhantomData) |
38 | 0 | } |
39 | | } |
40 | | |
41 | | impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> { |
42 | 0 | fn eq(&self, _other: &Self) -> bool { |
43 | 0 | true |
44 | 0 | } |
45 | | } |
46 | | |
47 | | impl<T: ?Sized> Eq for SendSyncPhantomData<T> {} |
48 | | |
49 | | impl<T: ?Sized> Clone for SendSyncPhantomData<T> { |
50 | 0 | fn clone(&self) -> Self { |
51 | 0 | SendSyncPhantomData(PhantomData) |
52 | 0 | } |
53 | | } |
54 | | |
55 | | #[cfg(miri)] |
56 | | extern "Rust" { |
57 | | /// Miri-provided intrinsic that marks the pointer `ptr` as aligned to |
58 | | /// `align`. |
59 | | /// |
60 | | /// This intrinsic is used to inform Miri's symbolic alignment checker that |
61 | | /// a pointer is aligned, even if Miri cannot statically deduce that fact. |
62 | | /// This is often required when performing raw pointer arithmetic or casts |
63 | | /// where the alignment is guaranteed by runtime checks or invariants that |
64 | | /// Miri is not aware of. |
65 | | pub(crate) fn miri_promise_symbolic_alignment(ptr: *const (), align: usize); |
66 | | } |
67 | | |
68 | | pub(crate) trait AsAddress { |
69 | | fn addr(self) -> usize; |
70 | | } |
71 | | |
72 | | impl<T: ?Sized> AsAddress for &T { |
73 | | #[inline(always)] |
74 | 0 | fn addr(self) -> usize { |
75 | 0 | let ptr: *const T = self; |
76 | 0 | AsAddress::addr(ptr) |
77 | 0 | } |
78 | | } |
79 | | |
80 | | impl<T: ?Sized> AsAddress for &mut T { |
81 | | #[inline(always)] |
82 | 0 | fn addr(self) -> usize { |
83 | 0 | let ptr: *const T = self; |
84 | 0 | AsAddress::addr(ptr) |
85 | 0 | } |
86 | | } |
87 | | |
88 | | impl<T: ?Sized> AsAddress for NonNull<T> { |
89 | | #[inline(always)] |
90 | 0 | fn addr(self) -> usize { |
91 | 0 | AsAddress::addr(self.as_ptr()) |
92 | 0 | } |
93 | | } |
94 | | |
95 | | impl<T: ?Sized> AsAddress for *const T { |
96 | | #[inline(always)] |
97 | 0 | fn addr(self) -> usize { |
98 | | // FIXME(#181), FIXME(https://github.com/rust-lang/rust/issues/95228): |
99 | | // Use `.addr()` instead of `as usize` once it's stable, and get rid of |
100 | | // this `allow`. Currently, `as usize` is the only way to accomplish |
101 | | // this. |
102 | | #[allow(clippy::as_conversions)] |
103 | | #[cfg_attr( |
104 | | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
105 | | allow(lossy_provenance_casts) |
106 | | )] |
107 | 0 | return self.cast::<()>() as usize; |
108 | 0 | } |
109 | | } |
110 | | |
111 | | impl<T: ?Sized> AsAddress for *mut T { |
112 | | #[inline(always)] |
113 | 0 | fn addr(self) -> usize { |
114 | 0 | let ptr: *const T = self; |
115 | 0 | AsAddress::addr(ptr) |
116 | 0 | } |
117 | | } |
118 | | |
119 | | /// Validates that `t` is aligned to `align_of::<U>()`. |
120 | | #[inline(always)] |
121 | 0 | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { |
122 | | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in |
123 | | // turn guarantees that this mod operation will not panic. |
124 | | #[allow(clippy::arithmetic_side_effects)] |
125 | 0 | let remainder = t.addr() % mem::align_of::<U>(); |
126 | 0 | if remainder == 0 { |
127 | 0 | Ok(()) |
128 | | } else { |
129 | | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. |
130 | | // That's only possible if `align_of::<U>() > 1`. |
131 | 0 | Err(unsafe { AlignmentError::new_unchecked(()) }) |
132 | | } |
133 | 0 | } |
134 | | |
135 | | /// Returns the bytes needed to pad `len` to the next multiple of `align`. |
136 | | /// |
137 | | /// This function assumes that align is a power of two; there are no guarantees |
138 | | /// on the answer it gives if this is not the case. |
139 | | #[cfg_attr( |
140 | | kani, |
141 | | kani::requires(len <= isize::MAX as usize), |
142 | | kani::requires(align.is_power_of_two()), |
143 | | kani::ensures(|&p| (len + p) % align.get() == 0), |
144 | | // Ensures that we add the minimum required padding. |
145 | | kani::ensures(|&p| p < align.get()), |
146 | | )] |
147 | 0 | pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { |
148 | | #[cfg(kani)] |
149 | | #[kani::proof_for_contract(padding_needed_for)] |
150 | | fn proof() { |
151 | | padding_needed_for(kani::any(), kani::any()); |
152 | | } |
153 | | |
154 | | // Abstractly, we want to compute: |
155 | | // align - (len % align). |
156 | | // Handling the case where len%align is 0. |
157 | | // Because align is a power of two, len % align = len & (align-1). |
158 | | // Guaranteed not to underflow as align is nonzero. |
159 | | #[allow(clippy::arithmetic_side_effects)] |
160 | 0 | let mask = align.get() - 1; |
161 | | |
162 | | // To efficiently subtract this value from align, we can use the bitwise |
163 | | // complement. |
164 | | // Note that ((!len) & (align-1)) gives us a number that with (len & |
165 | | // (align-1)) sums to align-1. So subtracting 1 from x before taking the |
166 | | // complement subtracts `len` from `align`. Some quick inspection of |
167 | | // cases shows that this also handles the case where `len % align = 0` |
168 | | // correctly too: len-1 % align then equals align-1, so the complement mod |
169 | | // align will be 0, as desired. |
170 | | // |
171 | | // The following reasoning can be verified quickly by an SMT solver |
172 | | // supporting the theory of bitvectors: |
173 | | // ```smtlib |
174 | | // ; Naive implementation of padding |
175 | | // (define-fun padding1 ( |
176 | | // (len (_ BitVec 32)) |
177 | | // (align (_ BitVec 32))) (_ BitVec 32) |
178 | | // (ite |
179 | | // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32)))) |
180 | | // (_ bv0 32) |
181 | | // (bvsub align (bvand len (bvsub align (_ bv1 32)))))) |
182 | | // |
183 | | // ; The implementation below |
184 | | // (define-fun padding2 ( |
185 | | // (len (_ BitVec 32)) |
186 | | // (align (_ BitVec 32))) (_ BitVec 32) |
187 | | // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32)))) |
188 | | // |
189 | | // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool |
190 | | // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32))))) |
191 | | // |
192 | | // (declare-const len (_ BitVec 32)) |
193 | | // (declare-const align (_ BitVec 32)) |
194 | | // ; Search for a case where align is a power of two and padding2 disagrees |
195 | | // ; with padding1 |
196 | | // (assert (and (is-power-of-two align) |
197 | | // (not (= (padding1 len align) (padding2 len align))))) |
198 | | // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20 |
199 | | // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20 |
200 | | // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30 |
201 | | // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30 |
202 | | // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0 |
203 | | // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0 |
204 | | // (check-sat) ; unsat, also works for 64-bit bitvectors |
205 | | // ``` |
206 | 0 | !(len.wrapping_sub(1)) & mask |
207 | 0 | } |
208 | | |
209 | | /// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align |
210 | | /// == 0`. |
211 | | /// |
212 | | /// # Panics |
213 | | /// |
214 | | /// May panic if `align` is not a power of two. Even if it doesn't panic in this |
215 | | /// case, it will produce nonsense results. |
216 | | #[inline(always)] |
217 | | #[cfg_attr( |
218 | | kani, |
219 | | kani::requires(align.is_power_of_two()), |
220 | | kani::ensures(|&m| m <= n && m % align.get() == 0), |
221 | | // Guarantees that `m` is the *largest* value such that `m % align == 0`. |
222 | | kani::ensures(|&m| { |
223 | | // If this `checked_add` fails, then the next multiple would wrap |
224 | | // around, which trivially satisfies the "largest value" requirement. |
225 | | m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true) |
226 | | }) |
227 | | )] |
228 | 0 | pub(crate) const fn round_down_to_next_multiple_of_alignment( |
229 | 0 | n: usize, |
230 | 0 | align: NonZeroUsize, |
231 | 0 | ) -> usize { |
232 | | #[cfg(kani)] |
233 | | #[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)] |
234 | | fn proof() { |
235 | | round_down_to_next_multiple_of_alignment(kani::any(), kani::any()); |
236 | | } |
237 | | |
238 | 0 | let align = align.get(); |
239 | | #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] |
240 | 0 | debug_assert!(align.is_power_of_two()); |
241 | | |
242 | | // Subtraction can't underflow because `align.get() >= 1`. |
243 | | #[allow(clippy::arithmetic_side_effects)] |
244 | 0 | let mask = !(align - 1); |
245 | 0 | n & mask |
246 | 0 | } |
247 | | |
248 | 0 | pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
249 | 0 | if a.get() < b.get() { |
250 | 0 | b |
251 | | } else { |
252 | 0 | a |
253 | | } |
254 | 0 | } |
255 | | |
256 | 0 | pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
257 | 0 | if a.get() > b.get() { |
258 | 0 | b |
259 | | } else { |
260 | 0 | a |
261 | | } |
262 | 0 | } |
263 | | |
264 | | /// Copies `src` into the prefix of `dst`. |
265 | | /// |
266 | | /// # Safety |
267 | | /// |
268 | | /// The caller guarantees that `src.len() <= dst.len()`. |
269 | | #[inline(always)] |
270 | 0 | pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) { |
271 | 0 | debug_assert!(src.len() <= dst.len()); |
272 | | // SAFETY: This invocation satisfies the safety contract of |
273 | | // copy_nonoverlapping [1]: |
274 | | // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes |
275 | | // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the |
276 | | // caller has promised that `src.len() <= dst.len()` |
277 | | // - `src` and `dst` are, trivially, properly aligned |
278 | | // - the region of memory beginning at `src` with a size of `src.len()` |
279 | | // bytes does not overlap with the region of memory beginning at `dst` |
280 | | // with the same size, because `dst` is derived from an exclusive |
281 | | // reference. |
282 | 0 | unsafe { |
283 | 0 | core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len()); |
284 | 0 | }; |
285 | 0 | } |
286 | | |
287 | | /// Unsafely transmutes the given `src` into a type `Dst`. |
288 | | /// |
289 | | /// # Safety |
290 | | /// |
291 | | /// The value `src` must be a valid instance of `Dst`. |
292 | | #[inline(always)] |
293 | 0 | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { |
294 | 0 | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); |
295 | | |
296 | | #[repr(C)] |
297 | | union Transmute<Src, Dst> { |
298 | | src: ManuallyDrop<Src>, |
299 | | dst: ManuallyDrop<Dst>, |
300 | | } |
301 | | |
302 | | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` |
303 | | // fields both start at the same offset and the types of those fields are |
304 | | // transparent wrappers around `Src` and `Dst` [1]. Consequently, |
305 | | // initializing `Transmute` with with `src` and then reading out `dst` is |
306 | | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` |
307 | | // to `Dst` is valid because — by contract on the caller — `src` is a valid |
308 | | // instance of `Dst`. |
309 | | // |
310 | | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: |
311 | | // |
312 | | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
313 | | // validity as `T`, and is subject to the same layout optimizations as |
314 | | // `T`. |
315 | | // |
316 | | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: |
317 | | // |
318 | | // Effectively, writing to and then reading from a union with the C |
319 | | // representation is analogous to a transmute from the type used for |
320 | | // writing to the type used for reading. |
321 | 0 | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } |
322 | 0 | } |
323 | | |
324 | | /// Uses `allocate` to create a `Box<T>`. |
325 | | /// |
326 | | /// # Errors |
327 | | /// |
328 | | /// Returns an error on allocation failure. Allocation failure is guaranteed |
329 | | /// never to cause a panic or an abort. |
330 | | /// |
331 | | /// # Safety |
332 | | /// |
333 | | /// `allocate` must be either `alloc::alloc::alloc` or |
334 | | /// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box` |
335 | | /// has the same bit-validity as the referent of the pointer returned by the |
336 | | /// given `allocate` and sufficient size to store `T` with `meta`. |
337 | | #[must_use = "has no side effects (other than allocation)"] |
338 | | #[cfg(feature = "alloc")] |
339 | | #[inline] |
340 | | pub(crate) unsafe fn new_box<T>( |
341 | | meta: T::PointerMetadata, |
342 | | allocate: unsafe fn(core::alloc::Layout) -> *mut u8, |
343 | | ) -> Result<alloc::boxed::Box<T>, AllocError> |
344 | | where |
345 | | T: ?Sized + crate::KnownLayout, |
346 | | { |
347 | | let size = match T::size_for_metadata(meta) { |
348 | | Some(size) => size, |
349 | | None => return Err(AllocError), |
350 | | }; |
351 | | |
352 | | let align = T::LAYOUT.align.get(); |
353 | | // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in |
354 | | // which sufficiently-large allocations (those which, when rounded up to the |
355 | | // alignment, overflow `isize`) are not rejected, which can cause undefined |
356 | | // behavior. See #64 for details. |
357 | | // |
358 | | // FIXME(#67): Once our MSRV is > 1.64.0, remove this assertion. |
359 | | #[allow(clippy::as_conversions)] |
360 | | let max_alloc = (isize::MAX as usize).saturating_sub(align); |
361 | | if size > max_alloc { |
362 | | return Err(AllocError); |
363 | | } |
364 | | |
365 | | // FIXME(https://github.com/rust-lang/rust/issues/55724): Use |
366 | | // `Layout::repeat` once it's stabilized. |
367 | | let layout = Layout::from_size_align(size, align).or(Err(AllocError))?; |
368 | | |
369 | | let ptr = if layout.size() != 0 { |
370 | | // SAFETY: By contract on the caller, `allocate` is either |
371 | | // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above |
372 | | // check ensures their shared safety precondition: that the supplied |
373 | | // layout is not zero-sized type [1]. |
374 | | // |
375 | | // [1] Per https://doc.rust-lang.org/1.81.0/std/alloc/trait.GlobalAlloc.html#tymethod.alloc: |
376 | | // |
377 | | // This function is unsafe because undefined behavior can result if |
378 | | // the caller does not ensure that layout has non-zero size. |
379 | | let ptr = unsafe { allocate(layout) }; |
380 | | match NonNull::new(ptr) { |
381 | | Some(ptr) => ptr, |
382 | | None => return Err(AllocError), |
383 | | } |
384 | | } else { |
385 | | let align = T::LAYOUT.align.get(); |
386 | | |
387 | | // We use `transmute` instead of an `as` cast since Miri (with strict |
388 | | // provenance enabled) notices and complains that an `as` cast creates a |
389 | | // pointer with no provenance. Miri isn't smart enough to realize that |
390 | | // we're only executing this branch when we're constructing a zero-sized |
391 | | // `Box`, which doesn't require provenance. |
392 | | // |
393 | | // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All |
394 | | // bits of a `usize` are initialized. |
395 | | // |
396 | | // `#[allow(unknown_lints)]` is for `integer_to_ptr_transmutes` |
397 | | #[allow(unknown_lints)] |
398 | | #[allow(clippy::useless_transmute, integer_to_ptr_transmutes)] |
399 | | let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) }; |
400 | | // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a |
401 | | // `NonZeroUsize`, which is guaranteed to be non-zero. |
402 | | // |
403 | | // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is |
404 | | // zero, but it does require a non-null dangling pointer for its |
405 | | // allocation. |
406 | | // |
407 | | // FIXME(https://github.com/rust-lang/rust/issues/95228): Use |
408 | | // `std::ptr::without_provenance` once it's stable. That may optimize |
409 | | // better. As written, Rust may assume that this consumes "exposed" |
410 | | // provenance, and thus Rust may have to assume that this may consume |
411 | | // provenance from any pointer whose provenance has been exposed. |
412 | | unsafe { NonNull::new_unchecked(dangling) } |
413 | | }; |
414 | | |
415 | | let ptr = T::raw_from_ptr_len(ptr, meta); |
416 | | |
417 | | // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to |
418 | | // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST |
419 | | // case (in which we manually construct a dangling pointer) and to justify |
420 | | // why `Box` is safe to drop (it's because `allocate` uses the system |
421 | | // allocator). |
422 | | #[allow(clippy::undocumented_unsafe_blocks)] |
423 | | Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) }) |
424 | | } |
425 | | |
426 | | mod len_of { |
427 | | use super::*; |
428 | | |
429 | | /// A witness type for metadata of a valid instance of `&T`. |
430 | | pub(crate) struct MetadataOf<T: ?Sized + KnownLayout> { |
431 | | /// # Safety |
432 | | /// |
433 | | /// The size of an instance of `&T` with the given metadata is not |
434 | | /// larger than `isize::MAX`. |
435 | | meta: T::PointerMetadata, |
436 | | _p: PhantomData<T>, |
437 | | } |
438 | | |
439 | | impl<T: ?Sized + KnownLayout> Copy for MetadataOf<T> {} |
440 | | impl<T: ?Sized + KnownLayout> Clone for MetadataOf<T> { |
441 | 0 | fn clone(&self) -> Self { |
442 | 0 | *self |
443 | 0 | } |
444 | | } |
445 | | |
446 | | impl<T: ?Sized> MetadataOf<T> |
447 | | where |
448 | | T: KnownLayout, |
449 | | { |
450 | | /// Returns `None` if `meta` is greater than `t`'s metadata. |
451 | | #[inline(always)] |
452 | 0 | pub(crate) fn new_in_bounds(t: &T, meta: usize) -> Option<Self> |
453 | 0 | where |
454 | 0 | T: KnownLayout<PointerMetadata = usize>, |
455 | | { |
456 | 0 | if meta <= Ptr::from_ref(t).len() { |
457 | | // SAFETY: We have checked that `meta` is not greater than `t`'s |
458 | | // metadata, which, by invariant on `&T`, addresses no more than |
459 | | // `isize::MAX` bytes [1][2]. |
460 | | // |
461 | | // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety: |
462 | | // |
463 | | // For all types, `T: ?Sized`, and for all `t: &T` or `t: |
464 | | // &mut T`, when such values cross an API boundary, the |
465 | | // following invariants must generally be upheld: |
466 | | // |
467 | | // * `t` is non-null |
468 | | // * `t` is aligned to `align_of_val(t)` |
469 | | // * if `size_of_val(t) > 0`, then `t` is dereferenceable for |
470 | | // `size_of_val(t)` many bytes |
471 | | // |
472 | | // If `t` points at address `a`, being "dereferenceable" for |
473 | | // N bytes means that the memory range `[a, a + N)` is all |
474 | | // contained within a single allocated object. |
475 | | // |
476 | | // [2] Per https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object: |
477 | | // |
478 | | // For any allocated object with `base` address, `size`, and |
479 | | // a set of `addresses`, the following are guaranteed: |
480 | | // - For all addresses `a` in `addresses`, `a` is in the |
481 | | // range `base .. (base + size)` (note that this requires |
482 | | // `a < base + size`, not `a <= base + size`) |
483 | | // - `base` is not equal to [`null()`] (i.e., the address |
484 | | // with the numerical value 0) |
485 | | // - `base + size <= usize::MAX` |
486 | | // - `size <= isize::MAX` |
487 | 0 | Some(unsafe { Self::new_unchecked(meta) }) |
488 | | } else { |
489 | 0 | None |
490 | | } |
491 | 0 | } |
492 | | |
493 | | /// # Safety |
494 | | /// |
495 | | /// The size of an instance of `&T` with the given metadata is not |
496 | | /// larger than `isize::MAX`. |
497 | 0 | pub(crate) unsafe fn new_unchecked(meta: T::PointerMetadata) -> Self { |
498 | | // SAFETY: The caller has promised that the size of an instance of |
499 | | // `&T` with the given metadata is not larger than `isize::MAX`. |
500 | 0 | Self { meta, _p: PhantomData } |
501 | 0 | } |
502 | | |
503 | 0 | pub(crate) fn get(&self) -> T::PointerMetadata |
504 | 0 | where |
505 | 0 | T::PointerMetadata: Copy, |
506 | | { |
507 | 0 | self.meta |
508 | 0 | } |
509 | | |
510 | | #[inline] |
511 | 0 | pub(crate) fn padding_needed_for(&self) -> usize |
512 | 0 | where |
513 | 0 | T: KnownLayout<PointerMetadata = usize>, |
514 | | { |
515 | 0 | let trailing_slice_layout = crate::trailing_slice_layout::<T>(); |
516 | | |
517 | | // FIXME(#67): Remove this allow. See NumExt for more details. |
518 | | #[allow( |
519 | | unstable_name_collisions, |
520 | | clippy::incompatible_msrv, |
521 | | clippy::multiple_unsafe_ops_per_block |
522 | | )] |
523 | | // SAFETY: By invariant on `self`, a `&T` with metadata `self.meta` |
524 | | // describes an object of size `<= isize::MAX`. This computes the |
525 | | // size of such a `&T` without any trailing padding, and so neither |
526 | | // the multiplication nor the addition will overflow. |
527 | 0 | let unpadded_size = unsafe { |
528 | 0 | let trailing_size = self.meta.unchecked_mul(trailing_slice_layout.elem_size); |
529 | 0 | trailing_size.unchecked_add(trailing_slice_layout.offset) |
530 | | }; |
531 | | |
532 | 0 | util::padding_needed_for(unpadded_size, T::LAYOUT.align) |
533 | 0 | } |
534 | | |
535 | | #[inline(always)] |
536 | 0 | pub(crate) fn validate_cast_and_convert_metadata( |
537 | 0 | addr: usize, |
538 | 0 | bytes_len: MetadataOf<[u8]>, |
539 | 0 | cast_type: CastType, |
540 | 0 | meta: Option<T::PointerMetadata>, |
541 | 0 | ) -> Result<(MetadataOf<T>, MetadataOf<[u8]>), MetadataCastError> { |
542 | 0 | let layout = match meta { |
543 | 0 | None => T::LAYOUT, |
544 | | // This can return `None` if the metadata describes an object |
545 | | // which can't fit in an `isize`. |
546 | 0 | Some(meta) => { |
547 | 0 | let size = match T::size_for_metadata(meta) { |
548 | 0 | Some(size) => size, |
549 | 0 | None => return Err(MetadataCastError::Size), |
550 | | }; |
551 | 0 | DstLayout { |
552 | 0 | align: T::LAYOUT.align, |
553 | 0 | size_info: crate::SizeInfo::Sized { size }, |
554 | 0 | statically_shallow_unpadded: false, |
555 | 0 | } |
556 | | } |
557 | | }; |
558 | | // Lemma 0: By contract on `validate_cast_and_convert_metadata`, if |
559 | | // the result is `Ok(..)`, then a `&T` with `elems` trailing slice |
560 | | // elements is no larger in size than `bytes_len.get()`. |
561 | 0 | let (elems, split_at) = |
562 | 0 | layout.validate_cast_and_convert_metadata(addr, bytes_len.get(), cast_type)?; |
563 | 0 | let elems = T::PointerMetadata::from_elem_count(elems); |
564 | | |
565 | | // For a slice DST type, if `meta` is `Some(elems)`, then we |
566 | | // synthesize `layout` to describe a sized type whose size is equal |
567 | | // to the size of the instance that we are asked to cast. For sized |
568 | | // types, `validate_cast_and_convert_metadata` returns `elems == 0`. |
569 | | // Thus, in this case, we need to use the `elems` passed by the |
570 | | // caller, not the one returned by |
571 | | // `validate_cast_and_convert_metadata`. |
572 | | // |
573 | | // Lemma 1: A `&T` with `elems` trailing slice elements is no larger |
574 | | // in size than `bytes_len.get()`. Proof: |
575 | | // - If `meta` is `None`, then `elems` satisfies this condition by |
576 | | // Lemma 0. |
577 | | // - If `meta` is `Some(meta)`, then `layout` describes an object |
578 | | // whose size is equal to the size of an `&T` with `meta` |
579 | | // metadata. By Lemma 0, that size is not larger than |
580 | | // `bytes_len.get()`. |
581 | | // |
582 | | // Lemma 2: A `&T` with `elems` trailing slice elements is no larger |
583 | | // than `isize::MAX` bytes. Proof: By Lemma 1, a `&T` with metadata |
584 | | // `elems` is not larger in size than `bytes_len.get()`. By |
585 | | // invariant on `MetadataOf<[u8]>`, a `&[u8]` with metadata |
586 | | // `bytes_len` is not larger than `isize::MAX`. Because |
587 | | // `size_of::<u8>()` is `1`, a `&[u8]` with metadata `bytes_len` has |
588 | | // size `bytes_len.get()` bytes. Therefore, a `&T` with metadata |
589 | | // `elems` has size not larger than `isize::MAX`. |
590 | 0 | let elems = meta.unwrap_or(elems); |
591 | | |
592 | | // SAFETY: See Lemma 2. |
593 | 0 | let elems = unsafe { MetadataOf::new_unchecked(elems) }; |
594 | | |
595 | | // SAFETY: Let `size` be the size of a `&T` with metadata `elems`. |
596 | | // By post-condition on `validate_cast_and_convert_metadata`, one of |
597 | | // the following conditions holds: |
598 | | // - `split_at == size`, in which case, by Lemma 2, `split_at <= |
599 | | // isize::MAX`. Since `size_of::<u8>() == 1`, a `[u8]` with |
600 | | // `split_at` elems has size not larger than `isize::MAX`. |
601 | | // - `split_at == bytes_len - size`. Since `bytes_len: |
602 | | // MetadataOf<u8>`, and since `size` is non-negative, `split_at` |
603 | | // addresses no more bytes than `bytes_len` does. Since |
604 | | // `bytes_len: MetadataOf<u8>`, `bytes_len` describes a `[u8]` |
605 | | // which has no more than `isize::MAX` bytes, and thus so does |
606 | | // `split_at`. |
607 | 0 | let split_at = unsafe { MetadataOf::<[u8]>::new_unchecked(split_at) }; |
608 | 0 | Ok((elems, split_at)) |
609 | 0 | } |
610 | | } |
611 | | } |
612 | | |
613 | | pub(crate) use len_of::MetadataOf; |
614 | | |
615 | | /// Since we support multiple versions of Rust, there are often features which |
616 | | /// have been stabilized in the most recent stable release which do not yet |
617 | | /// exist (stably) on our MSRV. This module provides polyfills for those |
618 | | /// features so that we can write more "modern" code, and just remove the |
619 | | /// polyfill once our MSRV supports the corresponding feature. Without this, |
620 | | /// we'd have to write worse/more verbose code and leave FIXME comments |
621 | | /// sprinkled throughout the codebase to update to the new pattern once it's |
622 | | /// stabilized. |
623 | | /// |
624 | | /// Each trait is imported as `_` at the crate root; each polyfill should "just |
625 | | /// work" at usage sites. |
626 | | pub(crate) mod polyfills { |
627 | | use core::ptr::{self, NonNull}; |
628 | | |
629 | | // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our |
630 | | // MSRV is 1.70, when that function was stabilized. |
631 | | // |
632 | | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
633 | | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
634 | | // method rather than to this trait, and so this trait is considered unused. |
635 | | // |
636 | | // FIXME(#67): Once our MSRV is 1.70, remove this. |
637 | | #[allow(unused)] |
638 | | pub(crate) trait NonNullExt<T> { |
639 | | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; |
640 | | } |
641 | | |
642 | | impl<T> NonNullExt<T> for NonNull<T> { |
643 | | // NOTE on coverage: this will never be tested in nightly since it's a |
644 | | // polyfill for a feature which has been stabilized on our nightly |
645 | | // toolchain. |
646 | | #[cfg_attr( |
647 | | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
648 | | coverage(off) |
649 | | )] |
650 | | #[inline(always)] |
651 | 0 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { |
652 | 0 | let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); |
653 | | // SAFETY: `ptr` is converted from `data`, which is non-null. |
654 | 0 | unsafe { NonNull::new_unchecked(ptr) } |
655 | 0 | } |
656 | | } |
657 | | |
658 | | // A polyfill for `Self::unchecked_sub` that we can use until methods like |
659 | | // `usize::unchecked_sub` is stabilized. |
660 | | // |
661 | | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
662 | | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
663 | | // method rather than to this trait, and so this trait is considered unused. |
664 | | // |
665 | | // FIXME(#67): Once our MSRV is high enough, remove this. |
666 | | #[allow(unused)] |
667 | | pub(crate) trait NumExt { |
668 | | /// Add without checking for overflow. |
669 | | /// |
670 | | /// # Safety |
671 | | /// |
672 | | /// The caller promises that the addition will not overflow. |
673 | | unsafe fn unchecked_add(self, rhs: Self) -> Self; |
674 | | |
675 | | /// Subtract without checking for underflow. |
676 | | /// |
677 | | /// # Safety |
678 | | /// |
679 | | /// The caller promises that the subtraction will not underflow. |
680 | | unsafe fn unchecked_sub(self, rhs: Self) -> Self; |
681 | | |
682 | | /// Multiply without checking for overflow. |
683 | | /// |
684 | | /// # Safety |
685 | | /// |
686 | | /// The caller promises that the multiplication will not overflow. |
687 | | unsafe fn unchecked_mul(self, rhs: Self) -> Self; |
688 | | } |
689 | | |
690 | | // NOTE on coverage: these will never be tested in nightly since they're |
691 | | // polyfills for a feature which has been stabilized on our nightly |
692 | | // toolchain. |
693 | | impl NumExt for usize { |
694 | | #[cfg_attr( |
695 | | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
696 | | coverage(off) |
697 | | )] |
698 | | #[inline(always)] |
699 | 0 | unsafe fn unchecked_add(self, rhs: usize) -> usize { |
700 | 0 | match self.checked_add(rhs) { |
701 | 0 | Some(x) => x, |
702 | | None => { |
703 | | // SAFETY: The caller promises that the addition will not |
704 | | // underflow. |
705 | 0 | unsafe { core::hint::unreachable_unchecked() } |
706 | | } |
707 | | } |
708 | 0 | } |
709 | | |
710 | | #[cfg_attr( |
711 | | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
712 | | coverage(off) |
713 | | )] |
714 | | #[inline(always)] |
715 | 0 | unsafe fn unchecked_sub(self, rhs: usize) -> usize { |
716 | 0 | match self.checked_sub(rhs) { |
717 | 0 | Some(x) => x, |
718 | | None => { |
719 | | // SAFETY: The caller promises that the subtraction will not |
720 | | // underflow. |
721 | 0 | unsafe { core::hint::unreachable_unchecked() } |
722 | | } |
723 | | } |
724 | 0 | } |
725 | | |
726 | | #[cfg_attr( |
727 | | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
728 | | coverage(off) |
729 | | )] |
730 | | #[inline(always)] |
731 | 0 | unsafe fn unchecked_mul(self, rhs: usize) -> usize { |
732 | 0 | match self.checked_mul(rhs) { |
733 | 0 | Some(x) => x, |
734 | | None => { |
735 | | // SAFETY: The caller promises that the multiplication will |
736 | | // not overflow. |
737 | 0 | unsafe { core::hint::unreachable_unchecked() } |
738 | | } |
739 | | } |
740 | 0 | } |
741 | | } |
742 | | } |
743 | | |
744 | | #[cfg(test)] |
745 | | pub(crate) mod testutil { |
746 | | use crate::*; |
747 | | |
748 | | /// A `T` which is aligned to at least `align_of::<A>()`. |
749 | | #[derive(Default)] |
750 | | pub(crate) struct Align<T, A> { |
751 | | pub(crate) t: T, |
752 | | _a: [A; 0], |
753 | | } |
754 | | |
755 | | impl<T: Default, A> Align<T, A> { |
756 | | pub(crate) fn set_default(&mut self) { |
757 | | self.t = T::default(); |
758 | | } |
759 | | } |
760 | | |
761 | | impl<T, A> Align<T, A> { |
762 | | pub(crate) const fn new(t: T) -> Align<T, A> { |
763 | | Align { t, _a: [] } |
764 | | } |
765 | | } |
766 | | |
767 | | /// A `T` which is guaranteed not to satisfy `align_of::<A>()`. |
768 | | /// |
769 | | /// It must be the case that `align_of::<T>() < align_of::<A>()` in order |
770 | | /// for this type to work properly. |
771 | | #[repr(C)] |
772 | | pub(crate) struct ForceUnalign<T: Unaligned, A> { |
773 | | // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is |
774 | | // placed at the minimum offset that guarantees its alignment. If |
775 | | // `align_of::<T>() < align_of::<A>()`, then that offset will be |
776 | | // guaranteed *not* to satisfy `align_of::<A>()`. |
777 | | // |
778 | | // Note that we need `T: Unaligned` in order to guarantee that there is |
779 | | // no padding between `_u` and `t`. |
780 | | _u: u8, |
781 | | pub(crate) t: T, |
782 | | _a: [A; 0], |
783 | | } |
784 | | |
785 | | impl<T: Unaligned, A> ForceUnalign<T, A> { |
786 | | pub(crate) fn new(t: T) -> ForceUnalign<T, A> { |
787 | | ForceUnalign { _u: 0, t, _a: [] } |
788 | | } |
789 | | } |
790 | | // A `u64` with alignment 8. |
791 | | // |
792 | | // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By |
793 | | // contrast, `AU64` is guaranteed to have alignment 8 on all platforms. |
794 | | #[derive( |
795 | | KnownLayout, |
796 | | Immutable, |
797 | | FromBytes, |
798 | | IntoBytes, |
799 | | Eq, |
800 | | PartialEq, |
801 | | Ord, |
802 | | PartialOrd, |
803 | | Default, |
804 | | Debug, |
805 | | Copy, |
806 | | Clone, |
807 | | )] |
808 | | #[repr(C, align(8))] |
809 | | pub(crate) struct AU64(pub(crate) u64); |
810 | | |
811 | | impl AU64 { |
812 | | // Converts this `AU64` to bytes using this platform's endianness. |
813 | | pub(crate) fn to_bytes(self) -> [u8; 8] { |
814 | | crate::transmute!(self) |
815 | | } |
816 | | } |
817 | | |
818 | | impl Display for AU64 { |
819 | | #[cfg_attr( |
820 | | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
821 | | coverage(off) |
822 | | )] |
823 | | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
824 | | Display::fmt(&self.0, f) |
825 | | } |
826 | | } |
827 | | } |
828 | | |
829 | | #[cfg(test)] |
830 | | mod tests { |
831 | | use super::*; |
832 | | |
833 | | #[test] |
834 | | fn test_round_down_to_next_multiple_of_alignment() { |
835 | | fn alt_impl(n: usize, align: NonZeroUsize) -> usize { |
836 | | let mul = n / align.get(); |
837 | | mul * align.get() |
838 | | } |
839 | | |
840 | | for align in [1, 2, 4, 8, 16] { |
841 | | for n in 0..256 { |
842 | | let align = NonZeroUsize::new(align).unwrap(); |
843 | | let want = alt_impl(n, align); |
844 | | let got = round_down_to_next_multiple_of_alignment(n, align); |
845 | | assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align); |
846 | | } |
847 | | } |
848 | | } |
849 | | |
850 | | #[rustversion::since(1.57.0)] |
851 | | #[test] |
852 | | #[should_panic] |
853 | | fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() { |
854 | | round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap()); |
855 | | } |
856 | | #[test] |
857 | | fn test_send_sync_phantom_data() { |
858 | | let x = SendSyncPhantomData::<u8>::default(); |
859 | | let y = x.clone(); |
860 | | assert!(x == y); |
861 | | assert!(x == SendSyncPhantomData::<u8>::default()); |
862 | | } |
863 | | |
864 | | #[test] |
865 | | #[allow(clippy::as_conversions)] |
866 | | fn test_as_address() { |
867 | | let x = 0u8; |
868 | | let r = &x; |
869 | | let mut x_mut = 0u8; |
870 | | let rm = &mut x_mut; |
871 | | let p = r as *const u8; |
872 | | let pm = rm as *mut u8; |
873 | | let nn = NonNull::new(p as *mut u8).unwrap(); |
874 | | |
875 | | assert_eq!(AsAddress::addr(r), p as usize); |
876 | | assert_eq!(AsAddress::addr(rm), pm as usize); |
877 | | assert_eq!(AsAddress::addr(p), p as usize); |
878 | | assert_eq!(AsAddress::addr(pm), pm as usize); |
879 | | assert_eq!(AsAddress::addr(nn), p as usize); |
880 | | } |
881 | | } |