/rust/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.7.35/src/lib.rs
Line | Count | Source |
1 | | // Copyright 2018 The Fuchsia Authors |
2 | | // |
3 | | // Licensed under the 2-Clause BSD License <LICENSE-BSD or |
4 | | // https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0 |
5 | | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
6 | | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
7 | | // This file may not be copied, modified, or distributed except according to |
8 | | // those terms. |
9 | | |
10 | | // After updating the following doc comment, make sure to run the following |
11 | | // command to update `README.md` based on its contents: |
12 | | // |
13 | | // ./generate-readme.sh > README.md |
14 | | |
15 | | //! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy? |
16 | | //! Fill out our [user survey][user-survey]!</span>* |
17 | | //! |
18 | | //! ***<span style="font-size: 140%">Fast, safe, <span |
19 | | //! style="color:red;">compile error</span>. Pick two.</span>*** |
20 | | //! |
21 | | //! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe` |
22 | | //! so you don't have to. |
23 | | //! |
24 | | //! # Overview |
25 | | //! |
26 | | //! Zerocopy provides four core marker traits, each of which can be derived |
27 | | //! (e.g., `#[derive(FromZeroes)]`): |
28 | | //! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid |
29 | | //! instance of a type |
30 | | //! - [`FromBytes`] indicates that a type may safely be converted from an |
31 | | //! arbitrary byte sequence |
32 | | //! - [`AsBytes`] indicates that a type may safely be converted *to* a byte |
33 | | //! sequence |
34 | | //! - [`Unaligned`] indicates that a type's alignment requirement is 1 |
35 | | //! |
36 | | //! Types which implement a subset of these traits can then be converted to/from |
37 | | //! byte sequences with little to no runtime overhead. |
38 | | //! |
39 | | //! Zerocopy also provides byte-order aware integer types that support these |
40 | | //! conversions; see the [`byteorder`] module. These types are especially useful |
41 | | //! for network parsing. |
42 | | //! |
43 | | //! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options |
44 | | //! |
45 | | //! # Cargo Features |
46 | | //! |
47 | | //! - **`alloc`** |
48 | | //! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled, |
49 | | //! the `alloc` crate is added as a dependency, and some allocation-related |
50 | | //! functionality is added. |
51 | | //! |
52 | | //! - **`byteorder`** (enabled by default) |
53 | | //! Adds the [`byteorder`] module and a dependency on the `byteorder` crate. |
54 | | //! The `byteorder` module provides byte order-aware equivalents of the |
55 | | //! multi-byte primitive numerical types. Unlike their primitive equivalents, |
56 | | //! the types in this module have no alignment requirement and support byte |
57 | | //! order conversions. This can be useful in handling file formats, network |
58 | | //! packet layouts, etc which don't provide alignment guarantees and which may |
59 | | //! use a byte order different from that of the execution platform. |
60 | | //! |
61 | | //! - **`derive`** |
62 | | //! Provides derives for the core marker traits via the `zerocopy-derive` |
63 | | //! crate. These derives are re-exported from `zerocopy`, so it is not |
64 | | //! necessary to depend on `zerocopy-derive` directly. |
65 | | //! |
66 | | //! However, you may experience better compile times if you instead directly |
67 | | //! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`, |
68 | | //! since doing so will allow Rust to compile these crates in parallel. To do |
69 | | //! so, do *not* enable the `derive` feature, and list both dependencies in |
70 | | //! your `Cargo.toml` with the same leading non-zero version number; e.g: |
71 | | //! |
72 | | //! ```toml |
73 | | //! [dependencies] |
74 | | //! zerocopy = "0.X" |
75 | | //! zerocopy-derive = "0.X" |
76 | | //! ``` |
77 | | //! |
78 | | //! - **`simd`** |
79 | | //! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and |
80 | | //! `AsBytes` impls are emitted for all stable SIMD types which exist on the |
81 | | //! target platform. Note that the layout of SIMD types is not yet stabilized, |
82 | | //! so these impls may be removed in the future if layout changes make them |
83 | | //! invalid. For more information, see the Unsafe Code Guidelines Reference |
84 | | //! page on the [layout of packed SIMD vectors][simd-layout]. |
85 | | //! |
86 | | //! - **`simd-nightly`** |
87 | | //! Enables the `simd` feature and adds support for SIMD types which are only |
88 | | //! available on nightly. Since these types are unstable, support for any type |
89 | | //! may be removed at any point in the future. |
90 | | //! |
91 | | //! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html |
92 | | //! |
93 | | //! # Security Ethos |
94 | | //! |
95 | | //! Zerocopy is expressly designed for use in security-critical contexts. We |
96 | | //! strive to ensure that that zerocopy code is sound under Rust's current |
97 | | //! memory model, and *any future memory model*. We ensure this by: |
98 | | //! - **...not 'guessing' about Rust's semantics.** |
99 | | //! We annotate `unsafe` code with a precise rationale for its soundness that |
100 | | //! cites a relevant section of Rust's official documentation. When Rust's |
101 | | //! documented semantics are unclear, we work with the Rust Operational |
102 | | //! Semantics Team to clarify Rust's documentation. |
103 | | //! - **...rigorously testing our implementation.** |
104 | | //! We run tests using [Miri], ensuring that zerocopy is sound across a wide |
105 | | //! array of supported target platforms of varying endianness and pointer |
106 | | //! width, and across both current and experimental memory models of Rust. |
107 | | //! - **...formally proving the correctness of our implementation.** |
108 | | //! We apply formal verification tools like [Kani][kani] to prove zerocopy's |
109 | | //! correctness. |
110 | | //! |
111 | | //! For more information, see our full [soundness policy]. |
112 | | //! |
113 | | //! [Miri]: https://github.com/rust-lang/miri |
114 | | //! [Kani]: https://github.com/model-checking/kani |
115 | | //! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness |
116 | | //! |
117 | | //! # Relationship to Project Safe Transmute |
118 | | //! |
119 | | //! [Project Safe Transmute] is an official initiative of the Rust Project to |
120 | | //! develop language-level support for safer transmutation. The Project consults |
121 | | //! with crates like zerocopy to identify aspects of safer transmutation that |
122 | | //! would benefit from compiler support, and has developed an [experimental, |
123 | | //! compiler-supported analysis][mcp-transmutability] which determines whether, |
124 | | //! for a given type, any value of that type may be soundly transmuted into |
125 | | //! another type. Once this functionality is sufficiently mature, zerocopy |
126 | | //! intends to replace its internal transmutability analysis (implemented by our |
127 | | //! custom derives) with the compiler-supported one. This change will likely be |
128 | | //! an implementation detail that is invisible to zerocopy's users. |
129 | | //! |
130 | | //! Project Safe Transmute will not replace the need for most of zerocopy's |
131 | | //! higher-level abstractions. The experimental compiler analysis is a tool for |
132 | | //! checking the soundness of `unsafe` code, not a tool to avoid writing |
133 | | //! `unsafe` code altogether. For the foreseeable future, crates like zerocopy |
134 | | //! will still be required in order to provide higher-level abstractions on top |
135 | | //! of the building block provided by Project Safe Transmute. |
136 | | //! |
137 | | //! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html |
138 | | //! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411 |
139 | | //! |
140 | | //! # MSRV |
141 | | //! |
142 | | //! See our [MSRV policy]. |
143 | | //! |
144 | | //! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv |
145 | | //! |
146 | | //! # Changelog |
147 | | //! |
148 | | //! Zerocopy uses [GitHub Releases]. |
149 | | //! |
150 | | //! [GitHub Releases]: https://github.com/google/zerocopy/releases |
151 | | |
152 | | // Sometimes we want to use lints which were added after our MSRV. |
153 | | // `unknown_lints` is `warn` by default and we deny warnings in CI, so without |
154 | | // this attribute, any unknown lint would cause a CI failure when testing with |
155 | | // our MSRV. |
156 | | // |
157 | | // TODO(#1201): Remove `unexpected_cfgs` |
158 | | #![allow(unknown_lints, non_local_definitions, unexpected_cfgs)] |
159 | | #![deny(renamed_and_removed_lints)] |
160 | | #![deny( |
161 | | anonymous_parameters, |
162 | | deprecated_in_future, |
163 | | late_bound_lifetime_arguments, |
164 | | missing_copy_implementations, |
165 | | missing_debug_implementations, |
166 | | missing_docs, |
167 | | path_statements, |
168 | | patterns_in_fns_without_body, |
169 | | rust_2018_idioms, |
170 | | trivial_numeric_casts, |
171 | | unreachable_pub, |
172 | | unsafe_op_in_unsafe_fn, |
173 | | unused_extern_crates, |
174 | | // We intentionally choose not to deny `unused_qualifications`. When items |
175 | | // are added to the prelude (e.g., `core::mem::size_of`), this has the |
176 | | // consequence of making some uses trigger this lint on the latest toolchain |
177 | | // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`) |
178 | | // does not work on older toolchains. |
179 | | // |
180 | | // We tested a more complicated fix in #1413, but ultimately decided that, |
181 | | // since this lint is just a minor style lint, the complexity isn't worth it |
182 | | // - it's fine to occasionally have unused qualifications slip through, |
183 | | // especially since these do not affect our user-facing API in any way. |
184 | | variant_size_differences |
185 | | )] |
186 | | #![cfg_attr( |
187 | | __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, |
188 | | deny(fuzzy_provenance_casts, lossy_provenance_casts) |
189 | | )] |
190 | | #![deny( |
191 | | clippy::all, |
192 | | clippy::alloc_instead_of_core, |
193 | | clippy::arithmetic_side_effects, |
194 | | clippy::as_underscore, |
195 | | clippy::assertions_on_result_states, |
196 | | clippy::as_conversions, |
197 | | clippy::correctness, |
198 | | clippy::dbg_macro, |
199 | | clippy::decimal_literal_representation, |
200 | | clippy::get_unwrap, |
201 | | clippy::indexing_slicing, |
202 | | clippy::missing_inline_in_public_items, |
203 | | clippy::missing_safety_doc, |
204 | | clippy::obfuscated_if_else, |
205 | | clippy::perf, |
206 | | clippy::print_stdout, |
207 | | clippy::std_instead_of_core, |
208 | | clippy::style, |
209 | | clippy::suspicious, |
210 | | clippy::todo, |
211 | | clippy::undocumented_unsafe_blocks, |
212 | | clippy::unimplemented, |
213 | | clippy::unnested_or_patterns, |
214 | | clippy::unwrap_used, |
215 | | clippy::use_debug |
216 | | )] |
217 | | #![deny( |
218 | | rustdoc::bare_urls, |
219 | | rustdoc::broken_intra_doc_links, |
220 | | rustdoc::invalid_codeblock_attributes, |
221 | | rustdoc::invalid_html_tags, |
222 | | rustdoc::invalid_rust_codeblocks, |
223 | | rustdoc::missing_crate_level_docs, |
224 | | rustdoc::private_intra_doc_links |
225 | | )] |
226 | | // In test code, it makes sense to weight more heavily towards concise, readable |
227 | | // code over correct or debuggable code. |
228 | | #![cfg_attr(any(test, kani), allow( |
229 | | // In tests, you get line numbers and have access to source code, so panic |
230 | | // messages are less important. You also often unwrap a lot, which would |
231 | | // make expect'ing instead very verbose. |
232 | | clippy::unwrap_used, |
233 | | // In tests, there's no harm to "panic risks" - the worst that can happen is |
234 | | // that your test will fail, and you'll fix it. By contrast, panic risks in |
235 | | // production code introduce the possibly of code panicking unexpectedly "in |
236 | | // the field". |
237 | | clippy::arithmetic_side_effects, |
238 | | clippy::indexing_slicing, |
239 | | ))] |
240 | | #![cfg_attr(not(test), no_std)] |
241 | | #![cfg_attr( |
242 | | all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")), |
243 | | feature(stdarch_x86_avx512) |
244 | | )] |
245 | | #![cfg_attr( |
246 | | all(feature = "simd-nightly", target_arch = "arm"), |
247 | | feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics) |
248 | | )] |
249 | | #![cfg_attr( |
250 | | all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")), |
251 | | feature(stdarch_powerpc) |
252 | | )] |
253 | | #![cfg_attr(doc_cfg, feature(doc_cfg))] |
254 | | #![cfg_attr( |
255 | | __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, |
256 | | feature(layout_for_ptr, strict_provenance) |
257 | | )] |
258 | | |
259 | | // This is a hack to allow zerocopy-derive derives to work in this crate. They |
260 | | // assume that zerocopy is linked as an extern crate, so they access items from |
261 | | // it as `zerocopy::Xxx`. This makes that still work. |
262 | | #[cfg(any(feature = "derive", test))] |
263 | | extern crate self as zerocopy; |
264 | | |
265 | | #[macro_use] |
266 | | mod macros; |
267 | | |
268 | | #[cfg(feature = "byteorder")] |
269 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))] |
270 | | pub mod byteorder; |
271 | | #[doc(hidden)] |
272 | | pub mod macro_util; |
273 | | mod post_monomorphization_compile_fail_tests; |
274 | | mod util; |
275 | | // TODO(#252): If we make this pub, come up with a better name. |
276 | | mod wrappers; |
277 | | |
278 | | #[cfg(feature = "byteorder")] |
279 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))] |
280 | | pub use crate::byteorder::*; |
281 | | pub use crate::wrappers::*; |
282 | | |
283 | | #[cfg(any(feature = "derive", test))] |
284 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] |
285 | | pub use zerocopy_derive::Unaligned; |
286 | | |
287 | | // `pub use` separately here so that we can mark it `#[doc(hidden)]`. |
288 | | // |
289 | | // TODO(#29): Remove this or add a doc comment. |
290 | | #[cfg(any(feature = "derive", test))] |
291 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] |
292 | | #[doc(hidden)] |
293 | | pub use zerocopy_derive::KnownLayout; |
294 | | |
295 | | use core::{ |
296 | | cell::{self, RefMut}, |
297 | | cmp::Ordering, |
298 | | fmt::{self, Debug, Display, Formatter}, |
299 | | hash::Hasher, |
300 | | marker::PhantomData, |
301 | | mem::{self, ManuallyDrop, MaybeUninit}, |
302 | | num::{ |
303 | | NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, |
304 | | NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping, |
305 | | }, |
306 | | ops::{Deref, DerefMut}, |
307 | | ptr::{self, NonNull}, |
308 | | slice, |
309 | | }; |
310 | | |
311 | | #[cfg(feature = "alloc")] |
312 | | extern crate alloc; |
313 | | #[cfg(feature = "alloc")] |
314 | | use alloc::{boxed::Box, vec::Vec}; |
315 | | |
316 | | #[cfg(any(feature = "alloc", kani))] |
317 | | use core::alloc::Layout; |
318 | | |
319 | | // Used by `TryFromBytes::is_bit_valid`. |
320 | | #[doc(hidden)] |
321 | | pub use crate::util::ptr::Ptr; |
322 | | |
323 | | // For each polyfill, as soon as the corresponding feature is stable, the |
324 | | // polyfill import will be unused because method/function resolution will prefer |
325 | | // the inherent method/function over a trait method/function. Thus, we suppress |
326 | | // the `unused_imports` warning. |
327 | | // |
328 | | // See the documentation on `util::polyfills` for more information. |
329 | | #[allow(unused_imports)] |
330 | | use crate::util::polyfills::NonNullExt as _; |
331 | | |
332 | | #[rustversion::nightly] |
333 | | #[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))] |
334 | | const _: () = { |
335 | | #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""] |
336 | | const _WARNING: () = (); |
337 | | #[warn(deprecated)] |
338 | | _WARNING |
339 | | }; |
340 | | |
341 | | /// The target pointer width, counted in bits. |
342 | | const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8; |
343 | | |
344 | | /// The layout of a type which might be dynamically-sized. |
345 | | /// |
346 | | /// `DstLayout` describes the layout of sized types, slice types, and "slice |
347 | | /// DSTs" - ie, those that are known by the type system to have a trailing slice |
348 | | /// (as distinguished from `dyn Trait` types - such types *might* have a |
349 | | /// trailing slice type, but the type system isn't aware of it). |
350 | | /// |
351 | | /// # Safety |
352 | | /// |
353 | | /// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full |
354 | | /// Rust types - ie, those that satisfy the layout requirements outlined by |
355 | | /// [the reference]. Callers may assume that an instance of `DstLayout` |
356 | | /// satisfies any conditions imposed on Rust types by the reference. |
357 | | /// |
358 | | /// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that: |
359 | | /// - `layout.align` is equal to `T`'s alignment |
360 | | /// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and |
361 | | /// `size_of::<T>() == size` |
362 | | /// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then |
363 | | /// - `T` is a slice DST |
364 | | /// - The `size` of an instance of `T` with `elems` trailing slice elements is |
365 | | /// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up |
366 | | /// to the nearest multiple of `layout.align`. Any bytes in the range |
367 | | /// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding |
368 | | /// and must not be assumed to be initialized. |
369 | | /// |
370 | | /// [the reference]: https://doc.rust-lang.org/reference/type-layout.html |
371 | | #[doc(hidden)] |
372 | | #[allow(missing_debug_implementations, missing_copy_implementations)] |
373 | | #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] |
374 | | pub struct DstLayout { |
375 | | align: NonZeroUsize, |
376 | | size_info: SizeInfo, |
377 | | } |
378 | | |
379 | | #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] |
380 | | enum SizeInfo<E = usize> { |
381 | | Sized { _size: usize }, |
382 | | SliceDst(TrailingSliceLayout<E>), |
383 | | } |
384 | | |
385 | | #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] |
386 | | struct TrailingSliceLayout<E = usize> { |
387 | | // The offset of the first byte of the trailing slice field. Note that this |
388 | | // is NOT the same as the minimum size of the type. For example, consider |
389 | | // the following type: |
390 | | // |
391 | | // struct Foo { |
392 | | // a: u16, |
393 | | // b: u8, |
394 | | // c: [u8], |
395 | | // } |
396 | | // |
397 | | // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed |
398 | | // by a padding byte. |
399 | | _offset: usize, |
400 | | // The size of the element type of the trailing slice field. |
401 | | _elem_size: E, |
402 | | } |
403 | | |
404 | | impl SizeInfo { |
405 | | /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a |
406 | | /// `NonZeroUsize`. If `elem_size` is 0, returns `None`. |
407 | | #[allow(unused)] |
408 | | const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> { |
409 | | Some(match *self { |
410 | | SizeInfo::Sized { _size } => SizeInfo::Sized { _size }, |
411 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => { |
412 | | if let Some(_elem_size) = NonZeroUsize::new(_elem_size) { |
413 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) |
414 | | } else { |
415 | | return None; |
416 | | } |
417 | | } |
418 | | }) |
419 | | } |
420 | | } |
421 | | |
422 | | #[doc(hidden)] |
423 | | #[derive(Copy, Clone)] |
424 | | #[cfg_attr(test, derive(Debug))] |
425 | | #[allow(missing_debug_implementations)] |
426 | | pub enum _CastType { |
427 | | _Prefix, |
428 | | _Suffix, |
429 | | } |
430 | | |
431 | | impl DstLayout { |
432 | | /// The minimum possible alignment of a type. |
433 | | const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) { |
434 | | Some(min_align) => min_align, |
435 | | None => unreachable!(), |
436 | | }; |
437 | | |
438 | | /// The maximum theoretic possible alignment of a type. |
439 | | /// |
440 | | /// For compatibility with future Rust versions, this is defined as the |
441 | | /// maximum power-of-two that fits into a `usize`. See also |
442 | | /// [`DstLayout::CURRENT_MAX_ALIGN`]. |
443 | | const THEORETICAL_MAX_ALIGN: NonZeroUsize = |
444 | | match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) { |
445 | | Some(max_align) => max_align, |
446 | | None => unreachable!(), |
447 | | }; |
448 | | |
449 | | /// The current, documented max alignment of a type \[1\]. |
450 | | /// |
451 | | /// \[1\] Per <https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers>: |
452 | | /// |
453 | | /// The alignment value must be a power of two from 1 up to |
454 | | /// 2<sup>29</sup>. |
455 | | #[cfg(not(kani))] |
456 | | const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) { |
457 | | Some(max_align) => max_align, |
458 | | None => unreachable!(), |
459 | | }; |
460 | | |
461 | | /// Constructs a `DstLayout` for a zero-sized type with `repr_align` |
462 | | /// alignment (or 1). If `repr_align` is provided, then it must be a power |
463 | | /// of two. |
464 | | /// |
465 | | /// # Panics |
466 | | /// |
467 | | /// This function panics if the supplied `repr_align` is not a power of two. |
468 | | /// |
469 | | /// # Safety |
470 | | /// |
471 | | /// Unsafe code may assume that the contract of this function is satisfied. |
472 | | #[doc(hidden)] |
473 | | #[inline] |
474 | | pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout { |
475 | | let align = match repr_align { |
476 | | Some(align) => align, |
477 | | None => Self::MIN_ALIGN, |
478 | | }; |
479 | | |
480 | | assert!(align.is_power_of_two()); |
481 | | |
482 | | DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } } |
483 | | } |
484 | | |
485 | | /// Constructs a `DstLayout` which describes `T`. |
486 | | /// |
487 | | /// # Safety |
488 | | /// |
489 | | /// Unsafe code may assume that `DstLayout` is the correct layout for `T`. |
490 | | #[doc(hidden)] |
491 | | #[inline] |
492 | | pub const fn for_type<T>() -> DstLayout { |
493 | | // SAFETY: `align` is correct by construction. `T: Sized`, and so it is |
494 | | // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the |
495 | | // `size` field is also correct by construction. |
496 | | DstLayout { |
497 | | align: match NonZeroUsize::new(mem::align_of::<T>()) { |
498 | | Some(align) => align, |
499 | | None => unreachable!(), |
500 | | }, |
501 | | size_info: SizeInfo::Sized { _size: mem::size_of::<T>() }, |
502 | | } |
503 | | } |
504 | | |
505 | | /// Constructs a `DstLayout` which describes `[T]`. |
506 | | /// |
507 | | /// # Safety |
508 | | /// |
509 | | /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`. |
510 | | const fn for_slice<T>() -> DstLayout { |
511 | | // SAFETY: The alignment of a slice is equal to the alignment of its |
512 | | // element type, and so `align` is initialized correctly. |
513 | | // |
514 | | // Since this is just a slice type, there is no offset between the |
515 | | // beginning of the type and the beginning of the slice, so it is |
516 | | // correct to set `offset: 0`. The `elem_size` is correct by |
517 | | // construction. Since `[T]` is a (degenerate case of a) slice DST, it |
518 | | // is correct to initialize `size_info` to `SizeInfo::SliceDst`. |
519 | | DstLayout { |
520 | | align: match NonZeroUsize::new(mem::align_of::<T>()) { |
521 | | Some(align) => align, |
522 | | None => unreachable!(), |
523 | | }, |
524 | | size_info: SizeInfo::SliceDst(TrailingSliceLayout { |
525 | | _offset: 0, |
526 | | _elem_size: mem::size_of::<T>(), |
527 | | }), |
528 | | } |
529 | | } |
530 | | |
531 | | /// Like `Layout::extend`, this creates a layout that describes a record |
532 | | /// whose layout consists of `self` followed by `next` that includes the |
533 | | /// necessary inter-field padding, but not any trailing padding. |
534 | | /// |
535 | | /// In order to match the layout of a `#[repr(C)]` struct, this method |
536 | | /// should be invoked for each field in declaration order. To add trailing |
537 | | /// padding, call `DstLayout::pad_to_align` after extending the layout for |
538 | | /// all fields. If `self` corresponds to a type marked with |
539 | | /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`, |
540 | | /// otherwise `None`. |
541 | | /// |
542 | | /// This method cannot be used to match the layout of a record with the |
543 | | /// default representation, as that representation is mostly unspecified. |
544 | | /// |
545 | | /// # Safety |
546 | | /// |
547 | | /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with |
548 | | /// fields whose layout are `self`, and those fields are immediately |
549 | | /// followed by a field whose layout is `field`, then unsafe code may rely |
550 | | /// on `self.extend(field, repr_packed)` producing a layout that correctly |
551 | | /// encompasses those two components. |
552 | | /// |
553 | | /// We make no guarantees to the behavior of this method if these fragments |
554 | | /// cannot appear in a valid Rust type (e.g., the concatenation of the |
555 | | /// layouts would lead to a size larger than `isize::MAX`). |
556 | | #[doc(hidden)] |
557 | | #[inline] |
558 | | pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self { |
559 | | use util::{core_layout::padding_needed_for, max, min}; |
560 | | |
561 | | // If `repr_packed` is `None`, there are no alignment constraints, and |
562 | | // the value can be defaulted to `THEORETICAL_MAX_ALIGN`. |
563 | | let max_align = match repr_packed { |
564 | | Some(max_align) => max_align, |
565 | | None => Self::THEORETICAL_MAX_ALIGN, |
566 | | }; |
567 | | |
568 | | assert!(max_align.is_power_of_two()); |
569 | | |
570 | | // We use Kani to prove that this method is robust to future increases |
571 | | // in Rust's maximum allowed alignment. However, if such a change ever |
572 | | // actually occurs, we'd like to be notified via assertion failures. |
573 | | #[cfg(not(kani))] |
574 | | { |
575 | | debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); |
576 | | debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); |
577 | | if let Some(repr_packed) = repr_packed { |
578 | | debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); |
579 | | } |
580 | | } |
581 | | |
582 | | // The field's alignment is clamped by `repr_packed` (i.e., the |
583 | | // `repr(packed(N))` attribute, if any) [1]. |
584 | | // |
585 | | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
586 | | // |
587 | | // The alignments of each field, for the purpose of positioning |
588 | | // fields, is the smaller of the specified alignment and the alignment |
589 | | // of the field's type. |
590 | | let field_align = min(field.align, max_align); |
591 | | |
592 | | // The struct's alignment is the maximum of its previous alignment and |
593 | | // `field_align`. |
594 | | let align = max(self.align, field_align); |
595 | | |
596 | | let size_info = match self.size_info { |
597 | | // If the layout is already a DST, we panic; DSTs cannot be extended |
598 | | // with additional fields. |
599 | | SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."), |
600 | | |
601 | | SizeInfo::Sized { _size: preceding_size } => { |
602 | | // Compute the minimum amount of inter-field padding needed to |
603 | | // satisfy the field's alignment, and offset of the trailing |
604 | | // field. [1] |
605 | | // |
606 | | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
607 | | // |
608 | | // Inter-field padding is guaranteed to be the minimum |
609 | | // required in order to satisfy each field's (possibly |
610 | | // altered) alignment. |
611 | | let padding = padding_needed_for(preceding_size, field_align); |
612 | | |
613 | | // This will not panic (and is proven to not panic, with Kani) |
614 | | // if the layout components can correspond to a leading layout |
615 | | // fragment of a valid Rust type, but may panic otherwise (e.g., |
616 | | // combining or aligning the components would create a size |
617 | | // exceeding `isize::MAX`). |
618 | | let offset = match preceding_size.checked_add(padding) { |
619 | | Some(offset) => offset, |
620 | | None => panic!("Adding padding to `self`'s size overflows `usize`."), |
621 | | }; |
622 | | |
623 | | match field.size_info { |
624 | | SizeInfo::Sized { _size: field_size } => { |
625 | | // If the trailing field is sized, the resulting layout |
626 | | // will be sized. Its size will be the sum of the |
627 | | // preceeding layout, the size of the new field, and the |
628 | | // size of inter-field padding between the two. |
629 | | // |
630 | | // This will not panic (and is proven with Kani to not |
631 | | // panic) if the layout components can correspond to a |
632 | | // leading layout fragment of a valid Rust type, but may |
633 | | // panic otherwise (e.g., combining or aligning the |
634 | | // components would create a size exceeding |
635 | | // `usize::MAX`). |
636 | | let size = match offset.checked_add(field_size) { |
637 | | Some(size) => size, |
638 | | None => panic!("`field` cannot be appended without the total size overflowing `usize`"), |
639 | | }; |
640 | | SizeInfo::Sized { _size: size } |
641 | | } |
642 | | SizeInfo::SliceDst(TrailingSliceLayout { |
643 | | _offset: trailing_offset, |
644 | | _elem_size, |
645 | | }) => { |
646 | | // If the trailing field is dynamically sized, so too |
647 | | // will the resulting layout. The offset of the trailing |
648 | | // slice component is the sum of the offset of the |
649 | | // trailing field and the trailing slice offset within |
650 | | // that field. |
651 | | // |
652 | | // This will not panic (and is proven with Kani to not |
653 | | // panic) if the layout components can correspond to a |
654 | | // leading layout fragment of a valid Rust type, but may |
655 | | // panic otherwise (e.g., combining or aligning the |
656 | | // components would create a size exceeding |
657 | | // `usize::MAX`). |
658 | | let offset = match offset.checked_add(trailing_offset) { |
659 | | Some(offset) => offset, |
660 | | None => panic!("`field` cannot be appended without the total size overflowing `usize`"), |
661 | | }; |
662 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size }) |
663 | | } |
664 | | } |
665 | | } |
666 | | }; |
667 | | |
668 | | DstLayout { align, size_info } |
669 | | } |
670 | | |
671 | | /// Like `Layout::pad_to_align`, this routine rounds the size of this layout |
672 | | /// up to the nearest multiple of this type's alignment or `repr_packed` |
673 | | /// (whichever is less). This method leaves DST layouts unchanged, since the |
674 | | /// trailing padding of DSTs is computed at runtime. |
675 | | /// |
676 | | /// In order to match the layout of a `#[repr(C)]` struct, this method |
677 | | /// should be invoked after the invocations of [`DstLayout::extend`]. If |
678 | | /// `self` corresponds to a type marked with `repr(packed(N))`, then |
679 | | /// `repr_packed` should be set to `Some(N)`, otherwise `None`. |
680 | | /// |
681 | | /// This method cannot be used to match the layout of a record with the |
682 | | /// default representation, as that representation is mostly unspecified. |
683 | | /// |
684 | | /// # Safety |
685 | | /// |
686 | | /// If a (potentially hypothetical) valid `repr(C)` type begins with fields |
687 | | /// whose layout are `self` followed only by zero or more bytes of trailing |
688 | | /// padding (not included in `self`), then unsafe code may rely on |
689 | | /// `self.pad_to_align(repr_packed)` producing a layout that correctly |
690 | | /// encapsulates the layout of that type. |
691 | | /// |
692 | | /// We make no guarantees to the behavior of this method if `self` cannot |
693 | | /// appear in a valid Rust type (e.g., because the addition of trailing |
694 | | /// padding would lead to a size larger than `isize::MAX`). |
695 | | #[doc(hidden)] |
696 | | #[inline] |
697 | | pub const fn pad_to_align(self) -> Self { |
698 | | use util::core_layout::padding_needed_for; |
699 | | |
700 | | let size_info = match self.size_info { |
701 | | // For sized layouts, we add the minimum amount of trailing padding |
702 | | // needed to satisfy alignment. |
703 | | SizeInfo::Sized { _size: unpadded_size } => { |
704 | | let padding = padding_needed_for(unpadded_size, self.align); |
705 | | let size = match unpadded_size.checked_add(padding) { |
706 | | Some(size) => size, |
707 | | None => panic!("Adding padding caused size to overflow `usize`."), |
708 | | }; |
709 | | SizeInfo::Sized { _size: size } |
710 | | } |
711 | | // For DST layouts, trailing padding depends on the length of the |
712 | | // trailing DST and is computed at runtime. This does not alter the |
713 | | // offset or element size of the layout, so we leave `size_info` |
714 | | // unchanged. |
715 | | size_info @ SizeInfo::SliceDst(_) => size_info, |
716 | | }; |
717 | | |
718 | | DstLayout { align: self.align, size_info } |
719 | | } |
720 | | |
721 | | /// Validates that a cast is sound from a layout perspective. |
722 | | /// |
723 | | /// Validates that the size and alignment requirements of a type with the |
724 | | /// layout described in `self` would not be violated by performing a |
725 | | /// `cast_type` cast from a pointer with address `addr` which refers to a |
726 | | /// memory region of size `bytes_len`. |
727 | | /// |
728 | | /// If the cast is valid, `validate_cast_and_convert_metadata` returns |
729 | | /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then |
730 | | /// `elems` is the maximum number of trailing slice elements for which a |
731 | | /// cast would be valid (for sized types, `elem` is meaningless and should |
732 | | /// be ignored). `split_at` is the index at which to split the memory region |
733 | | /// in order for the prefix (suffix) to contain the result of the cast, and |
734 | | /// in order for the remaining suffix (prefix) to contain the leftover |
735 | | /// bytes. |
736 | | /// |
737 | | /// There are three conditions under which a cast can fail: |
738 | | /// - The smallest possible value for the type is larger than the provided |
739 | | /// memory region |
740 | | /// - A prefix cast is requested, and `addr` does not satisfy `self`'s |
741 | | /// alignment requirement |
742 | | /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy |
743 | | /// `self`'s alignment requirement (as a consequence, since all instances |
744 | | /// of the type are a multiple of its alignment, no size for the type will |
745 | | /// result in a starting address which is properly aligned) |
746 | | /// |
747 | | /// # Safety |
748 | | /// |
749 | | /// The caller may assume that this implementation is correct, and may rely |
750 | | /// on that assumption for the soundness of their code. In particular, the |
751 | | /// caller may assume that, if `validate_cast_and_convert_metadata` returns |
752 | | /// `Some((elems, split_at))`, then: |
753 | | /// - A pointer to the type (for dynamically sized types, this includes |
754 | | /// `elems` as its pointer metadata) describes an object of size `size <= |
755 | | /// bytes_len` |
756 | | /// - If this is a prefix cast: |
757 | | /// - `addr` satisfies `self`'s alignment |
758 | | /// - `size == split_at` |
759 | | /// - If this is a suffix cast: |
760 | | /// - `split_at == bytes_len - size` |
761 | | /// - `addr + split_at` satisfies `self`'s alignment |
762 | | /// |
763 | | /// Note that this method does *not* ensure that a pointer constructed from |
764 | | /// its return values will be a valid pointer. In particular, this method |
765 | | /// does not reason about `isize` overflow, which is a requirement of many |
766 | | /// Rust pointer APIs, and may at some point be determined to be a validity |
767 | | /// invariant of pointer types themselves. This should never be a problem so |
768 | | /// long as the arguments to this method are derived from a known-valid |
769 | | /// pointer (e.g., one derived from a safe Rust reference), but it is |
770 | | /// nonetheless the caller's responsibility to justify that pointer |
771 | | /// arithmetic will not overflow based on a safety argument *other than* the |
772 | | /// mere fact that this method returned successfully. |
773 | | /// |
774 | | /// # Panics |
775 | | /// |
776 | | /// `validate_cast_and_convert_metadata` will panic if `self` describes a |
777 | | /// DST whose trailing slice element is zero-sized. |
778 | | /// |
779 | | /// If `addr + bytes_len` overflows `usize`, |
780 | | /// `validate_cast_and_convert_metadata` may panic, or it may return |
781 | | /// incorrect results. No guarantees are made about when |
782 | | /// `validate_cast_and_convert_metadata` will panic. The caller should not |
783 | | /// rely on `validate_cast_and_convert_metadata` panicking in any particular |
784 | | /// condition, even if `debug_assertions` are enabled. |
785 | | #[allow(unused)] |
786 | | const fn validate_cast_and_convert_metadata( |
787 | | &self, |
788 | | addr: usize, |
789 | | bytes_len: usize, |
790 | | cast_type: _CastType, |
791 | | ) -> Option<(usize, usize)> { |
792 | | // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`. |
793 | | macro_rules! __debug_assert { |
794 | | ($e:expr $(, $msg:expr)?) => { |
795 | | debug_assert!({ |
796 | | #[allow(clippy::arithmetic_side_effects)] |
797 | | let e = $e; |
798 | | e |
799 | | } $(, $msg)?); |
800 | | }; |
801 | | } |
802 | | |
803 | | // Note that, in practice, `self` is always a compile-time constant. We |
804 | | // do this check earlier than needed to ensure that we always panic as a |
805 | | // result of bugs in the program (such as calling this function on an |
806 | | // invalid type) instead of allowing this panic to be hidden if the cast |
807 | | // would have failed anyway for runtime reasons (such as a too-small |
808 | | // memory region). |
809 | | // |
810 | | // TODO(#67): Once our MSRV is 1.65, use let-else: |
811 | | // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements |
812 | | let size_info = match self.size_info.try_to_nonzero_elem_size() { |
813 | | Some(size_info) => size_info, |
814 | | None => panic!("attempted to cast to slice type with zero-sized element"), |
815 | | }; |
816 | | |
817 | | // Precondition |
818 | | __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX"); |
819 | | |
820 | | // Alignment checks go in their own block to avoid introducing variables |
821 | | // into the top-level scope. |
822 | | { |
823 | | // We check alignment for `addr` (for prefix casts) or `addr + |
824 | | // bytes_len` (for suffix casts). For a prefix cast, the correctness |
825 | | // of this check is trivial - `addr` is the address the object will |
826 | | // live at. |
827 | | // |
828 | | // For a suffix cast, we know that all valid sizes for the type are |
829 | | // a multiple of the alignment (and by safety precondition, we know |
830 | | // `DstLayout` may only describe valid Rust types). Thus, a |
831 | | // validly-sized instance which lives at a validly-aligned address |
832 | | // must also end at a validly-aligned address. Thus, if the end |
833 | | // address for a suffix cast (`addr + bytes_len`) is not aligned, |
834 | | // then no valid start address will be aligned either. |
835 | | let offset = match cast_type { |
836 | | _CastType::_Prefix => 0, |
837 | | _CastType::_Suffix => bytes_len, |
838 | | }; |
839 | | |
840 | | // Addition is guaranteed not to overflow because `offset <= |
841 | | // bytes_len`, and `addr + bytes_len <= usize::MAX` is a |
842 | | // precondition of this method. Modulus is guaranteed not to divide |
843 | | // by 0 because `align` is non-zero. |
844 | | #[allow(clippy::arithmetic_side_effects)] |
845 | | if (addr + offset) % self.align.get() != 0 { |
846 | | return None; |
847 | | } |
848 | | } |
849 | | |
850 | | let (elems, self_bytes) = match size_info { |
851 | | SizeInfo::Sized { _size: size } => { |
852 | | if size > bytes_len { |
853 | | return None; |
854 | | } |
855 | | (0, size) |
856 | | } |
857 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => { |
858 | | // Calculate the maximum number of bytes that could be consumed |
859 | | // - any number of bytes larger than this will either not be a |
860 | | // multiple of the alignment, or will be larger than |
861 | | // `bytes_len`. |
862 | | let max_total_bytes = |
863 | | util::round_down_to_next_multiple_of_alignment(bytes_len, self.align); |
864 | | // Calculate the maximum number of bytes that could be consumed |
865 | | // by the trailing slice. |
866 | | // |
867 | | // TODO(#67): Once our MSRV is 1.65, use let-else: |
868 | | // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements |
869 | | let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) { |
870 | | Some(max) => max, |
871 | | // `bytes_len` too small even for 0 trailing slice elements. |
872 | | None => return None, |
873 | | }; |
874 | | |
875 | | // Calculate the number of elements that fit in |
876 | | // `max_slice_and_padding_bytes`; any remaining bytes will be |
877 | | // considered padding. |
878 | | // |
879 | | // Guaranteed not to divide by zero: `elem_size` is non-zero. |
880 | | #[allow(clippy::arithmetic_side_effects)] |
881 | | let elems = max_slice_and_padding_bytes / elem_size.get(); |
882 | | // Guaranteed not to overflow on multiplication: `usize::MAX >= |
883 | | // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes / |
884 | | // elem_size) * elem_size`. |
885 | | // |
886 | | // Guaranteed not to overflow on addition: |
887 | | // - max_slice_and_padding_bytes == max_total_bytes - offset |
888 | | // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset |
889 | | // - elems * elem_size + offset <= max_total_bytes <= usize::MAX |
890 | | #[allow(clippy::arithmetic_side_effects)] |
891 | | let without_padding = offset + elems * elem_size.get(); |
892 | | // `self_bytes` is equal to the offset bytes plus the bytes |
893 | | // consumed by the trailing slice plus any padding bytes |
894 | | // required to satisfy the alignment. Note that we have computed |
895 | | // the maximum number of trailing slice elements that could fit |
896 | | // in `self_bytes`, so any padding is guaranteed to be less than |
897 | | // the size of an extra element. |
898 | | // |
899 | | // Guaranteed not to overflow: |
900 | | // - By previous comment: without_padding == elems * elem_size + |
901 | | // offset <= max_total_bytes |
902 | | // - By construction, `max_total_bytes` is a multiple of |
903 | | // `self.align`. |
904 | | // - At most, adding padding needed to round `without_padding` |
905 | | // up to the next multiple of the alignment will bring |
906 | | // `self_bytes` up to `max_total_bytes`. |
907 | | #[allow(clippy::arithmetic_side_effects)] |
908 | | let self_bytes = without_padding |
909 | | + util::core_layout::padding_needed_for(without_padding, self.align); |
910 | | (elems, self_bytes) |
911 | | } |
912 | | }; |
913 | | |
914 | | __debug_assert!(self_bytes <= bytes_len); |
915 | | |
916 | | let split_at = match cast_type { |
917 | | _CastType::_Prefix => self_bytes, |
918 | | // Guaranteed not to underflow: |
919 | | // - In the `Sized` branch, only returns `size` if `size <= |
920 | | // bytes_len`. |
921 | | // - In the `SliceDst` branch, calculates `self_bytes <= |
922 | | // max_toatl_bytes`, which is upper-bounded by `bytes_len`. |
923 | | #[allow(clippy::arithmetic_side_effects)] |
924 | | _CastType::_Suffix => bytes_len - self_bytes, |
925 | | }; |
926 | | |
927 | | Some((elems, split_at)) |
928 | | } |
929 | | } |
930 | | |
931 | | /// A trait which carries information about a type's layout that is used by the |
932 | | /// internals of this crate. |
933 | | /// |
934 | | /// This trait is not meant for consumption by code outside of this crate. While |
935 | | /// the normal semver stability guarantees apply with respect to which types |
936 | | /// implement this trait and which trait implementations are implied by this |
937 | | /// trait, no semver stability guarantees are made regarding its internals; they |
938 | | /// may change at any time, and code which makes use of them may break. |
939 | | /// |
940 | | /// # Safety |
941 | | /// |
942 | | /// This trait does not convey any safety guarantees to code outside this crate. |
943 | | #[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs |
944 | | pub unsafe trait KnownLayout { |
945 | | // The `Self: Sized` bound makes it so that `KnownLayout` can still be |
946 | | // object safe. It's not currently object safe thanks to `const LAYOUT`, and |
947 | | // it likely won't be in the future, but there's no reason not to be |
948 | | // forwards-compatible with object safety. |
949 | | #[doc(hidden)] |
950 | | fn only_derive_is_allowed_to_implement_this_trait() |
951 | | where |
952 | | Self: Sized; |
953 | | |
954 | | #[doc(hidden)] |
955 | | const LAYOUT: DstLayout; |
956 | | |
957 | | /// SAFETY: The returned pointer has the same address and provenance as |
958 | | /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems` |
959 | | /// elements in its trailing slice. If `Self` is sized, `elems` is ignored. |
960 | | #[doc(hidden)] |
961 | | fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>; |
962 | | } |
963 | | |
964 | | // SAFETY: Delegates safety to `DstLayout::for_slice`. |
965 | | unsafe impl<T: KnownLayout> KnownLayout for [T] { |
966 | | #[allow(clippy::missing_inline_in_public_items)] |
967 | | fn only_derive_is_allowed_to_implement_this_trait() |
968 | | where |
969 | | Self: Sized, |
970 | | { |
971 | | } |
972 | | const LAYOUT: DstLayout = DstLayout::for_slice::<T>(); |
973 | | |
974 | | // SAFETY: `.cast` preserves address and provenance. The returned pointer |
975 | | // refers to an object with `elems` elements by construction. |
976 | | #[inline(always)] |
977 | | fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> { |
978 | | // TODO(#67): Remove this allow. See NonNullExt for more details. |
979 | | #[allow(unstable_name_collisions)] |
980 | | NonNull::slice_from_raw_parts(data.cast::<T>(), elems) |
981 | | } |
982 | | } |
983 | | |
984 | | #[rustfmt::skip] |
985 | | impl_known_layout!( |
986 | | (), |
987 | | u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64, |
988 | | bool, char, |
989 | | NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, |
990 | | NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize |
991 | | ); |
992 | | #[rustfmt::skip] |
993 | | impl_known_layout!( |
994 | | T => Option<T>, |
995 | | T: ?Sized => PhantomData<T>, |
996 | | T => Wrapping<T>, |
997 | | T => MaybeUninit<T>, |
998 | | T: ?Sized => *const T, |
999 | | T: ?Sized => *mut T, |
1000 | | ); |
1001 | | impl_known_layout!(const N: usize, T => [T; N]); |
1002 | | |
1003 | | safety_comment! { |
1004 | | /// SAFETY: |
1005 | | /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as |
1006 | | /// `[u8]` and `[T]` repsectively. `str` has different bit validity than |
1007 | | /// `[u8]`, but that doesn't affect the soundness of this impl. |
1008 | | /// |
1009 | | /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: |
1010 | | /// |
1011 | | /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
1012 | | /// validity as `T` |
1013 | | /// |
1014 | | /// TODO(#429): |
1015 | | /// - Add quotes from docs. |
1016 | | /// - Once [1] (added in |
1017 | | /// https://github.com/rust-lang/rust/pull/115522) is available on stable, |
1018 | | /// quote the stable docs instead of the nightly docs. |
1019 | | unsafe_impl_known_layout!(#[repr([u8])] str); |
1020 | | unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>); |
1021 | | } |
1022 | | |
1023 | | /// Analyzes whether a type is [`FromZeroes`]. |
1024 | | /// |
1025 | | /// This derive analyzes, at compile time, whether the annotated type satisfies |
1026 | | /// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is |
1027 | | /// sound to do so. This derive can be applied to structs, enums, and unions; |
1028 | | /// e.g.: |
1029 | | /// |
1030 | | /// ``` |
1031 | | /// # use zerocopy_derive::FromZeroes; |
1032 | | /// #[derive(FromZeroes)] |
1033 | | /// struct MyStruct { |
1034 | | /// # /* |
1035 | | /// ... |
1036 | | /// # */ |
1037 | | /// } |
1038 | | /// |
1039 | | /// #[derive(FromZeroes)] |
1040 | | /// #[repr(u8)] |
1041 | | /// enum MyEnum { |
1042 | | /// # Variant0, |
1043 | | /// # /* |
1044 | | /// ... |
1045 | | /// # */ |
1046 | | /// } |
1047 | | /// |
1048 | | /// #[derive(FromZeroes)] |
1049 | | /// union MyUnion { |
1050 | | /// # variant: u8, |
1051 | | /// # /* |
1052 | | /// ... |
1053 | | /// # */ |
1054 | | /// } |
1055 | | /// ``` |
1056 | | /// |
1057 | | /// [safety conditions]: trait@FromZeroes#safety |
1058 | | /// |
1059 | | /// # Analysis |
1060 | | /// |
1061 | | /// *This section describes, roughly, the analysis performed by this derive to |
1062 | | /// determine whether it is sound to implement `FromZeroes` for a given type. |
1063 | | /// Unless you are modifying the implementation of this derive, or attempting to |
1064 | | /// manually implement `FromZeroes` for a type yourself, you don't need to read |
1065 | | /// this section.* |
1066 | | /// |
1067 | | /// If a type has the following properties, then this derive can implement |
1068 | | /// `FromZeroes` for that type: |
1069 | | /// |
1070 | | /// - If the type is a struct, all of its fields must be `FromZeroes`. |
1071 | | /// - If the type is an enum, it must be C-like (meaning that all variants have |
1072 | | /// no fields) and it must have a variant with a discriminant of `0`. See [the |
1073 | | /// reference] for a description of how discriminant values are chosen. |
1074 | | /// - The type must not contain any [`UnsafeCell`]s (this is required in order |
1075 | | /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of |
1076 | | /// memory). The type may contain references or pointers to `UnsafeCell`s so |
1077 | | /// long as those values can themselves be initialized from zeroes |
1078 | | /// (`FromZeroes` is not currently implemented for, e.g., |
1079 | | /// `Option<&UnsafeCell<_>>`, but it could be one day). |
1080 | | /// |
1081 | | /// This analysis is subject to change. Unsafe code may *only* rely on the |
1082 | | /// documented [safety conditions] of `FromZeroes`, and must *not* rely on the |
1083 | | /// implementation details of this derive. |
1084 | | /// |
1085 | | /// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations |
1086 | | /// [`UnsafeCell`]: core::cell::UnsafeCell |
1087 | | /// |
1088 | | /// ## Why isn't an explicit representation required for structs? |
1089 | | /// |
1090 | | /// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires |
1091 | | /// that structs are marked with `#[repr(C)]`. |
1092 | | /// |
1093 | | /// Per the [Rust reference](reference), |
1094 | | /// |
1095 | | /// > The representation of a type can change the padding between fields, but |
1096 | | /// > does not change the layout of the fields themselves. |
1097 | | /// |
1098 | | /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations |
1099 | | /// |
1100 | | /// Since the layout of structs only consists of padding bytes and field bytes, |
1101 | | /// a struct is soundly `FromZeroes` if: |
1102 | | /// 1. its padding is soundly `FromZeroes`, and |
1103 | | /// 2. its fields are soundly `FromZeroes`. |
1104 | | /// |
1105 | | /// The answer to the first question is always yes: padding bytes do not have |
1106 | | /// any validity constraints. A [discussion] of this question in the Unsafe Code |
1107 | | /// Guidelines Working Group concluded that it would be virtually unimaginable |
1108 | | /// for future versions of rustc to add validity constraints to padding bytes. |
1109 | | /// |
1110 | | /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 |
1111 | | /// |
1112 | | /// Whether a struct is soundly `FromZeroes` therefore solely depends on whether |
1113 | | /// its fields are `FromZeroes`. |
1114 | | // TODO(#146): Document why we don't require an enum to have an explicit `repr` |
1115 | | // attribute. |
1116 | | #[cfg(any(feature = "derive", test))] |
1117 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] |
1118 | | pub use zerocopy_derive::FromZeroes; |
1119 | | |
1120 | | /// Types whose validity can be checked at runtime, allowing them to be |
1121 | | /// conditionally converted from byte slices. |
1122 | | /// |
1123 | | /// WARNING: Do not implement this trait yourself! Instead, use |
1124 | | /// `#[derive(TryFromBytes)]`. |
1125 | | /// |
1126 | | /// `TryFromBytes` types can safely be deserialized from an untrusted sequence |
1127 | | /// of bytes by performing a runtime check that the byte sequence contains a |
1128 | | /// valid instance of `Self`. |
1129 | | /// |
1130 | | /// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see |
1131 | | /// the [`byteorder`] module. |
1132 | | /// |
1133 | | /// # What is a "valid instance"? |
1134 | | /// |
1135 | | /// In Rust, each type has *bit validity*, which refers to the set of bit |
1136 | | /// patterns which may appear in an instance of that type. It is impossible for |
1137 | | /// safe Rust code to produce values which violate bit validity (ie, values |
1138 | | /// outside of the "valid" set of bit patterns). If `unsafe` code produces an |
1139 | | /// invalid value, this is considered [undefined behavior]. |
1140 | | /// |
1141 | | /// Rust's bit validity rules are currently being decided, which means that some |
1142 | | /// types have three classes of bit patterns: those which are definitely valid, |
1143 | | /// and whose validity is documented in the language; those which may or may not |
1144 | | /// be considered valid at some point in the future; and those which are |
1145 | | /// definitely invalid. |
1146 | | /// |
1147 | | /// Zerocopy takes a conservative approach, and only considers a bit pattern to |
1148 | | /// be valid if its validity is a documenteed guarantee provided by the |
1149 | | /// language. |
1150 | | /// |
1151 | | /// For most use cases, Rust's current guarantees align with programmers' |
1152 | | /// intuitions about what ought to be valid. As a result, zerocopy's |
1153 | | /// conservatism should not affect most users. One notable exception is unions, |
1154 | | /// whose bit validity is very up in the air; zerocopy does not permit |
1155 | | /// implementing `TryFromBytes` for any union type. |
1156 | | /// |
1157 | | /// If you are negatively affected by lack of support for a particular type, |
1158 | | /// we encourage you to let us know by [filing an issue][github-repo]. |
1159 | | /// |
1160 | | /// # Safety |
1161 | | /// |
1162 | | /// On its own, `T: TryFromBytes` does not make any guarantees about the layout |
1163 | | /// or representation of `T`. It merely provides the ability to perform a |
1164 | | /// validity check at runtime via methods like [`try_from_ref`]. |
1165 | | /// |
1166 | | /// Currently, it is not possible to stably implement `TryFromBytes` other than |
1167 | | /// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items |
1168 | | /// on this trait that provide well-defined safety invariants, no stability |
1169 | | /// guarantees are made with respect to these items. In particular, future |
1170 | | /// releases of zerocopy may make backwards-breaking changes to these items, |
1171 | | /// including changes that only affect soundness, which may cause code which |
1172 | | /// uses those items to silently become unsound. |
1173 | | /// |
1174 | | /// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html |
1175 | | /// [github-repo]: https://github.com/google/zerocopy |
1176 | | /// [`try_from_ref`]: TryFromBytes::try_from_ref |
1177 | | // TODO(#5): Update `try_from_ref` doc link once it exists |
1178 | | #[doc(hidden)] |
1179 | | pub unsafe trait TryFromBytes { |
1180 | | /// Does a given memory range contain a valid instance of `Self`? |
1181 | | /// |
1182 | | /// # Safety |
1183 | | /// |
1184 | | /// ## Preconditions |
1185 | | /// |
1186 | | /// The memory referenced by `candidate` may only be accessed via reads for |
1187 | | /// the duration of this method call. This prohibits writes through mutable |
1188 | | /// references and through [`UnsafeCell`]s. There may exist immutable |
1189 | | /// references to the same memory which contain `UnsafeCell`s so long as: |
1190 | | /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in |
1191 | | /// `Self`. This is a bidirectional property: `Self` may not contain |
1192 | | /// `UnsafeCell`s where other references to the same memory do not, and |
1193 | | /// vice-versa. |
1194 | | /// - Those `UnsafeCell`s are never used to perform mutation for the |
1195 | | /// duration of this method call. |
1196 | | /// |
1197 | | /// The memory referenced by `candidate` may not be referenced by any |
1198 | | /// mutable references even if these references are not used to perform |
1199 | | /// mutation. |
1200 | | /// |
1201 | | /// `candidate` is not required to refer to a valid `Self`. However, it must |
1202 | | /// satisfy the requirement that uninitialized bytes may only be present |
1203 | | /// where it is possible for them to be present in `Self`. This is a dynamic |
1204 | | /// property: if, at a particular byte offset, a valid enum discriminant is |
1205 | | /// set, the subsequent bytes may only have uninitialized bytes as |
1206 | | /// specificed by the corresponding enum. |
1207 | | /// |
1208 | | /// Formally, given `len = size_of_val_raw(candidate)`, at every byte |
1209 | | /// offset, `b`, in the range `[0, len)`: |
1210 | | /// - If, in all instances `s: Self` of length `len`, the byte at offset `b` |
1211 | | /// in `s` is initialized, then the byte at offset `b` within `*candidate` |
1212 | | /// must be initialized. |
1213 | | /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`. |
1214 | | /// Let `S` be the subset of valid instances of `Self` of length `len` |
1215 | | /// which contain `c` in the offset range `[0, b)`. If, for all instances |
1216 | | /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized, |
1217 | | /// then the byte at offset `b` in `*candidate` must be initialized. |
1218 | | /// |
1219 | | /// Pragmatically, this means that if `*candidate` is guaranteed to |
1220 | | /// contain an enum type at a particular offset, and the enum discriminant |
1221 | | /// stored in `*candidate` corresponds to a valid variant of that enum |
1222 | | /// type, then it is guaranteed that the appropriate bytes of `*candidate` |
1223 | | /// are initialized as defined by that variant's bit validity (although |
1224 | | /// note that the variant may contain another enum type, in which case the |
1225 | | /// same rules apply depending on the state of its discriminant, and so on |
1226 | | /// recursively). |
1227 | | /// |
1228 | | /// ## Postconditions |
1229 | | /// |
1230 | | /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true, |
1231 | | /// `*candidate` contains a valid `Self`. |
1232 | | /// |
1233 | | /// # Panics |
1234 | | /// |
1235 | | /// `is_bit_valid` may panic. Callers are responsible for ensuring that any |
1236 | | /// `unsafe` code remains sound even in the face of `is_bit_valid` |
1237 | | /// panicking. (We support user-defined validation routines; so long as |
1238 | | /// these routines are not required to be `unsafe`, there is no way to |
1239 | | /// ensure that these do not generate panics.) |
1240 | | /// |
1241 | | /// [`UnsafeCell`]: core::cell::UnsafeCell |
1242 | | #[doc(hidden)] |
1243 | | unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool; |
1244 | | |
1245 | | /// Attempts to interpret a byte slice as a `Self`. |
1246 | | /// |
1247 | | /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that |
1248 | | /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is |
1249 | | /// reinterpreted as a `Self`. |
1250 | | /// |
1251 | | /// Note that Rust's bit validity rules are still being decided. As such, |
1252 | | /// there exist types whose bit validity is ambiguous. See the |
1253 | | /// `TryFromBytes` docs for a discussion of how these cases are handled. |
1254 | | // TODO(#251): In a future in which we distinguish between `FromBytes` and |
1255 | | // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow |
1256 | | // interior mutability. |
1257 | | #[inline] |
1258 | | #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute. |
1259 | | fn try_from_ref(bytes: &[u8]) -> Option<&Self> |
1260 | | where |
1261 | | Self: KnownLayout, |
1262 | | { |
1263 | | let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?; |
1264 | | |
1265 | | // SAFETY: |
1266 | | // - Since `bytes` is an immutable reference, we know that no mutable |
1267 | | // references exist to this memory region. |
1268 | | // - Since `[u8]` contains no `UnsafeCell`s, we know there are no |
1269 | | // `&UnsafeCell` references to this memory region. |
1270 | | // - Since we don't permit implementing `TryFromBytes` for types which |
1271 | | // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so |
1272 | | // the requirement that all references contain `UnsafeCell`s at the |
1273 | | // same offsets is trivially satisfied. |
1274 | | // - All bytes of `bytes` are initialized. |
1275 | | // |
1276 | | // This call may panic. If that happens, it doesn't cause any soundness |
1277 | | // issues, as we have not generated any invalid state which we need to |
1278 | | // fix before returning. |
1279 | | if unsafe { !Self::is_bit_valid(maybe_self) } { |
1280 | | return None; |
1281 | | } |
1282 | | |
1283 | | // SAFETY: |
1284 | | // - Preconditions for `as_ref`: |
1285 | | // - `is_bit_valid` guarantees that `*maybe_self` contains a valid |
1286 | | // `Self`. Since `&[u8]` does not permit interior mutation, this |
1287 | | // cannot be invalidated after this method returns. |
1288 | | // - Since the argument and return types are immutable references, |
1289 | | // Rust will prevent the caller from producing any mutable |
1290 | | // references to the same memory region. |
1291 | | // - Since `Self` is not allowed to contain any `UnsafeCell`s and the |
1292 | | // same is true of `[u8]`, interior mutation is not possible. Thus, |
1293 | | // no mutation is possible. For the same reason, there is no |
1294 | | // mismatch between the two types in terms of which byte ranges are |
1295 | | // referenced as `UnsafeCell`s. |
1296 | | // - Since interior mutation isn't possible within `Self`, there's no |
1297 | | // way for the returned reference to be used to modify the byte range, |
1298 | | // and thus there's no way for the returned reference to be used to |
1299 | | // write an invalid `[u8]` which would be observable via the original |
1300 | | // `&[u8]`. |
1301 | | Some(unsafe { maybe_self.as_ref() }) |
1302 | | } |
1303 | | } |
1304 | | |
1305 | | /// Types for which a sequence of bytes all set to zero represents a valid |
1306 | | /// instance of the type. |
1307 | | /// |
1308 | | /// Any memory region of the appropriate length which is guaranteed to contain |
1309 | | /// only zero bytes can be viewed as any `FromZeroes` type with no runtime |
1310 | | /// overhead. This is useful whenever memory is known to be in a zeroed state, |
1311 | | /// such memory returned from some allocation routines. |
1312 | | /// |
1313 | | /// # Implementation |
1314 | | /// |
1315 | | /// **Do not implement this trait yourself!** Instead, use |
1316 | | /// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature); |
1317 | | /// e.g.: |
1318 | | /// |
1319 | | /// ``` |
1320 | | /// # use zerocopy_derive::FromZeroes; |
1321 | | /// #[derive(FromZeroes)] |
1322 | | /// struct MyStruct { |
1323 | | /// # /* |
1324 | | /// ... |
1325 | | /// # */ |
1326 | | /// } |
1327 | | /// |
1328 | | /// #[derive(FromZeroes)] |
1329 | | /// #[repr(u8)] |
1330 | | /// enum MyEnum { |
1331 | | /// # Variant0, |
1332 | | /// # /* |
1333 | | /// ... |
1334 | | /// # */ |
1335 | | /// } |
1336 | | /// |
1337 | | /// #[derive(FromZeroes)] |
1338 | | /// union MyUnion { |
1339 | | /// # variant: u8, |
1340 | | /// # /* |
1341 | | /// ... |
1342 | | /// # */ |
1343 | | /// } |
1344 | | /// ``` |
1345 | | /// |
1346 | | /// This derive performs a sophisticated, compile-time safety analysis to |
1347 | | /// determine whether a type is `FromZeroes`. |
1348 | | /// |
1349 | | /// # Safety |
1350 | | /// |
1351 | | /// *This section describes what is required in order for `T: FromZeroes`, and |
1352 | | /// what unsafe code may assume of such types. If you don't plan on implementing |
1353 | | /// `FromZeroes` manually, and you don't plan on writing unsafe code that |
1354 | | /// operates on `FromZeroes` types, then you don't need to read this section.* |
1355 | | /// |
1356 | | /// If `T: FromZeroes`, then unsafe code may assume that: |
1357 | | /// - It is sound to treat any initialized sequence of zero bytes of length |
1358 | | /// `size_of::<T>()` as a `T`. |
1359 | | /// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to |
1360 | | /// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to |
1361 | | /// construct a `t: &T` at the same address as `b`, and it is sound for both |
1362 | | /// `b` and `t` to be live at the same time. |
1363 | | /// |
1364 | | /// If a type is marked as `FromZeroes` which violates this contract, it may |
1365 | | /// cause undefined behavior. |
1366 | | /// |
1367 | | /// `#[derive(FromZeroes)]` only permits [types which satisfy these |
1368 | | /// requirements][derive-analysis]. |
1369 | | /// |
1370 | | #[cfg_attr( |
1371 | | feature = "derive", |
1372 | | doc = "[derive]: zerocopy_derive::FromZeroes", |
1373 | | doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis" |
1374 | | )] |
1375 | | #[cfg_attr( |
1376 | | not(feature = "derive"), |
1377 | | doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"), |
1378 | | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"), |
1379 | | )] |
1380 | | pub unsafe trait FromZeroes { |
1381 | | // The `Self: Sized` bound makes it so that `FromZeroes` is still object |
1382 | | // safe. |
1383 | | #[doc(hidden)] |
1384 | | fn only_derive_is_allowed_to_implement_this_trait() |
1385 | | where |
1386 | | Self: Sized; |
1387 | | |
1388 | | /// Overwrites `self` with zeroes. |
1389 | | /// |
1390 | | /// Sets every byte in `self` to 0. While this is similar to doing `*self = |
1391 | | /// Self::new_zeroed()`, it differs in that `zero` does not semantically |
1392 | | /// drop the current value and replace it with a new one - it simply |
1393 | | /// modifies the bytes of the existing value. |
1394 | | /// |
1395 | | /// # Examples |
1396 | | /// |
1397 | | /// ``` |
1398 | | /// # use zerocopy::FromZeroes; |
1399 | | /// # use zerocopy_derive::*; |
1400 | | /// # |
1401 | | /// #[derive(FromZeroes)] |
1402 | | /// #[repr(C)] |
1403 | | /// struct PacketHeader { |
1404 | | /// src_port: [u8; 2], |
1405 | | /// dst_port: [u8; 2], |
1406 | | /// length: [u8; 2], |
1407 | | /// checksum: [u8; 2], |
1408 | | /// } |
1409 | | /// |
1410 | | /// let mut header = PacketHeader { |
1411 | | /// src_port: 100u16.to_be_bytes(), |
1412 | | /// dst_port: 200u16.to_be_bytes(), |
1413 | | /// length: 300u16.to_be_bytes(), |
1414 | | /// checksum: 400u16.to_be_bytes(), |
1415 | | /// }; |
1416 | | /// |
1417 | | /// header.zero(); |
1418 | | /// |
1419 | | /// assert_eq!(header.src_port, [0, 0]); |
1420 | | /// assert_eq!(header.dst_port, [0, 0]); |
1421 | | /// assert_eq!(header.length, [0, 0]); |
1422 | | /// assert_eq!(header.checksum, [0, 0]); |
1423 | | /// ``` |
1424 | | #[inline(always)] |
1425 | | fn zero(&mut self) { |
1426 | | let slf: *mut Self = self; |
1427 | | let len = mem::size_of_val(self); |
1428 | | // SAFETY: |
1429 | | // - `self` is guaranteed by the type system to be valid for writes of |
1430 | | // size `size_of_val(self)`. |
1431 | | // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned |
1432 | | // as required by `u8`. |
1433 | | // - Since `Self: FromZeroes`, the all-zeroes instance is a valid |
1434 | | // instance of `Self.` |
1435 | | // |
1436 | | // TODO(#429): Add references to docs and quotes. |
1437 | | unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) }; |
1438 | | } |
1439 | | |
1440 | | /// Creates an instance of `Self` from zeroed bytes. |
1441 | | /// |
1442 | | /// # Examples |
1443 | | /// |
1444 | | /// ``` |
1445 | | /// # use zerocopy::FromZeroes; |
1446 | | /// # use zerocopy_derive::*; |
1447 | | /// # |
1448 | | /// #[derive(FromZeroes)] |
1449 | | /// #[repr(C)] |
1450 | | /// struct PacketHeader { |
1451 | | /// src_port: [u8; 2], |
1452 | | /// dst_port: [u8; 2], |
1453 | | /// length: [u8; 2], |
1454 | | /// checksum: [u8; 2], |
1455 | | /// } |
1456 | | /// |
1457 | | /// let header: PacketHeader = FromZeroes::new_zeroed(); |
1458 | | /// |
1459 | | /// assert_eq!(header.src_port, [0, 0]); |
1460 | | /// assert_eq!(header.dst_port, [0, 0]); |
1461 | | /// assert_eq!(header.length, [0, 0]); |
1462 | | /// assert_eq!(header.checksum, [0, 0]); |
1463 | | /// ``` |
1464 | | #[inline(always)] |
1465 | | fn new_zeroed() -> Self |
1466 | | where |
1467 | | Self: Sized, |
1468 | | { |
1469 | | // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal. |
1470 | | unsafe { mem::zeroed() } |
1471 | | } |
1472 | | |
1473 | | /// Creates a `Box<Self>` from zeroed bytes. |
1474 | | /// |
1475 | | /// This function is useful for allocating large values on the heap and |
1476 | | /// zero-initializing them, without ever creating a temporary instance of |
1477 | | /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()` |
1478 | | /// will allocate `[u8; 1048576]` directly on the heap; it does not require |
1479 | | /// storing `[u8; 1048576]` in a temporary variable on the stack. |
1480 | | /// |
1481 | | /// On systems that use a heap implementation that supports allocating from |
1482 | | /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may |
1483 | | /// have performance benefits. |
1484 | | /// |
1485 | | /// Note that `Box<Self>` can be converted to `Arc<Self>` and other |
1486 | | /// container types without reallocation. |
1487 | | /// |
1488 | | /// # Panics |
1489 | | /// |
1490 | | /// Panics if allocation of `size_of::<Self>()` bytes fails. |
1491 | | #[cfg(feature = "alloc")] |
1492 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] |
1493 | | #[inline] |
1494 | | fn new_box_zeroed() -> Box<Self> |
1495 | | where |
1496 | | Self: Sized, |
1497 | | { |
1498 | | // If `T` is a ZST, then return a proper boxed instance of it. There is |
1499 | | // no allocation, but `Box` does require a correct dangling pointer. |
1500 | | let layout = Layout::new::<Self>(); |
1501 | | if layout.size() == 0 { |
1502 | | return Box::new(Self::new_zeroed()); |
1503 | | } |
1504 | | |
1505 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
1506 | | #[allow(clippy::undocumented_unsafe_blocks)] |
1507 | | let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() }; |
1508 | | if ptr.is_null() { |
1509 | | alloc::alloc::handle_alloc_error(layout); |
1510 | | } |
1511 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
1512 | | #[allow(clippy::undocumented_unsafe_blocks)] |
1513 | | unsafe { |
1514 | | Box::from_raw(ptr) |
1515 | | } |
1516 | | } |
1517 | | |
1518 | | /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes. |
1519 | | /// |
1520 | | /// This function is useful for allocating large values of `[Self]` on the |
1521 | | /// heap and zero-initializing them, without ever creating a temporary |
1522 | | /// instance of `[Self; _]` on the stack. For example, |
1523 | | /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on |
1524 | | /// the heap; it does not require storing the slice on the stack. |
1525 | | /// |
1526 | | /// On systems that use a heap implementation that supports allocating from |
1527 | | /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance |
1528 | | /// benefits. |
1529 | | /// |
1530 | | /// If `Self` is a zero-sized type, then this function will return a |
1531 | | /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any |
1532 | | /// actual information, but its `len()` property will report the correct |
1533 | | /// value. |
1534 | | /// |
1535 | | /// # Panics |
1536 | | /// |
1537 | | /// * Panics if `size_of::<Self>() * len` overflows. |
1538 | | /// * Panics if allocation of `size_of::<Self>() * len` bytes fails. |
1539 | | #[cfg(feature = "alloc")] |
1540 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] |
1541 | | #[inline] |
1542 | | fn new_box_slice_zeroed(len: usize) -> Box<[Self]> |
1543 | | where |
1544 | | Self: Sized, |
1545 | | { |
1546 | | let size = mem::size_of::<Self>() |
1547 | | .checked_mul(len) |
1548 | | .expect("mem::size_of::<Self>() * len overflows `usize`"); |
1549 | | let align = mem::align_of::<Self>(); |
1550 | | // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a |
1551 | | // bug in which sufficiently-large allocations (those which, when |
1552 | | // rounded up to the alignment, overflow `isize`) are not rejected, |
1553 | | // which can cause undefined behavior. See #64 for details. |
1554 | | // |
1555 | | // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion. |
1556 | | #[allow(clippy::as_conversions)] |
1557 | | let max_alloc = (isize::MAX as usize).saturating_sub(align); |
1558 | | assert!(size <= max_alloc); |
1559 | | // TODO(https://github.com/rust-lang/rust/issues/55724): Use |
1560 | | // `Layout::repeat` once it's stabilized. |
1561 | | let layout = |
1562 | | Layout::from_size_align(size, align).expect("total allocation size overflows `isize`"); |
1563 | | |
1564 | | let ptr = if layout.size() != 0 { |
1565 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
1566 | | #[allow(clippy::undocumented_unsafe_blocks)] |
1567 | | let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() }; |
1568 | | if ptr.is_null() { |
1569 | | alloc::alloc::handle_alloc_error(layout); |
1570 | | } |
1571 | | ptr |
1572 | | } else { |
1573 | | // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` |
1574 | | // is zero, but it does require a non-null dangling pointer for its |
1575 | | // allocation. |
1576 | | NonNull::<Self>::dangling().as_ptr() |
1577 | | }; |
1578 | | |
1579 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
1580 | | #[allow(clippy::undocumented_unsafe_blocks)] |
1581 | | unsafe { |
1582 | | Box::from_raw(slice::from_raw_parts_mut(ptr, len)) |
1583 | | } |
1584 | | } |
1585 | | |
1586 | | /// Creates a `Vec<Self>` from zeroed bytes. |
1587 | | /// |
1588 | | /// This function is useful for allocating large values of `Vec`s and |
1589 | | /// zero-initializing them, without ever creating a temporary instance of |
1590 | | /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For |
1591 | | /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the |
1592 | | /// heap; it does not require storing intermediate values on the stack. |
1593 | | /// |
1594 | | /// On systems that use a heap implementation that supports allocating from |
1595 | | /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits. |
1596 | | /// |
1597 | | /// If `Self` is a zero-sized type, then this function will return a |
1598 | | /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any |
1599 | | /// actual information, but its `len()` property will report the correct |
1600 | | /// value. |
1601 | | /// |
1602 | | /// # Panics |
1603 | | /// |
1604 | | /// * Panics if `size_of::<Self>() * len` overflows. |
1605 | | /// * Panics if allocation of `size_of::<Self>() * len` bytes fails. |
1606 | | #[cfg(feature = "alloc")] |
1607 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))] |
1608 | | #[inline(always)] |
1609 | | fn new_vec_zeroed(len: usize) -> Vec<Self> |
1610 | | where |
1611 | | Self: Sized, |
1612 | | { |
1613 | | Self::new_box_slice_zeroed(len).into() |
1614 | | } |
1615 | | } |
1616 | | |
1617 | | /// Analyzes whether a type is [`FromBytes`]. |
1618 | | /// |
1619 | | /// This derive analyzes, at compile time, whether the annotated type satisfies |
1620 | | /// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is |
1621 | | /// sound to do so. This derive can be applied to structs, enums, and unions; |
1622 | | /// e.g.: |
1623 | | /// |
1624 | | /// ``` |
1625 | | /// # use zerocopy_derive::{FromBytes, FromZeroes}; |
1626 | | /// #[derive(FromZeroes, FromBytes)] |
1627 | | /// struct MyStruct { |
1628 | | /// # /* |
1629 | | /// ... |
1630 | | /// # */ |
1631 | | /// } |
1632 | | /// |
1633 | | /// #[derive(FromZeroes, FromBytes)] |
1634 | | /// #[repr(u8)] |
1635 | | /// enum MyEnum { |
1636 | | /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, |
1637 | | /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, |
1638 | | /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, |
1639 | | /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, |
1640 | | /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, |
1641 | | /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, |
1642 | | /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, |
1643 | | /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, |
1644 | | /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, |
1645 | | /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, |
1646 | | /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, |
1647 | | /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, |
1648 | | /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, |
1649 | | /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, |
1650 | | /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, |
1651 | | /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, |
1652 | | /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, |
1653 | | /// # VFF, |
1654 | | /// # /* |
1655 | | /// ... |
1656 | | /// # */ |
1657 | | /// } |
1658 | | /// |
1659 | | /// #[derive(FromZeroes, FromBytes)] |
1660 | | /// union MyUnion { |
1661 | | /// # variant: u8, |
1662 | | /// # /* |
1663 | | /// ... |
1664 | | /// # */ |
1665 | | /// } |
1666 | | /// ``` |
1667 | | /// |
1668 | | /// [safety conditions]: trait@FromBytes#safety |
1669 | | /// |
1670 | | /// # Analysis |
1671 | | /// |
1672 | | /// *This section describes, roughly, the analysis performed by this derive to |
1673 | | /// determine whether it is sound to implement `FromBytes` for a given type. |
1674 | | /// Unless you are modifying the implementation of this derive, or attempting to |
1675 | | /// manually implement `FromBytes` for a type yourself, you don't need to read |
1676 | | /// this section.* |
1677 | | /// |
1678 | | /// If a type has the following properties, then this derive can implement |
1679 | | /// `FromBytes` for that type: |
1680 | | /// |
1681 | | /// - If the type is a struct, all of its fields must be `FromBytes`. |
1682 | | /// - If the type is an enum: |
1683 | | /// - It must be a C-like enum (meaning that all variants have no fields). |
1684 | | /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, |
1685 | | /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). |
1686 | | /// - The maximum number of discriminants must be used (so that every possible |
1687 | | /// bit pattern is a valid one). Be very careful when using the `C`, |
1688 | | /// `usize`, or `isize` representations, as their size is |
1689 | | /// platform-dependent. |
1690 | | /// - The type must not contain any [`UnsafeCell`]s (this is required in order |
1691 | | /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of |
1692 | | /// memory). The type may contain references or pointers to `UnsafeCell`s so |
1693 | | /// long as those values can themselves be initialized from zeroes |
1694 | | /// (`FromBytes` is not currently implemented for, e.g., `Option<*const |
1695 | | /// UnsafeCell<_>>`, but it could be one day). |
1696 | | /// |
1697 | | /// [`UnsafeCell`]: core::cell::UnsafeCell |
1698 | | /// |
1699 | | /// This analysis is subject to change. Unsafe code may *only* rely on the |
1700 | | /// documented [safety conditions] of `FromBytes`, and must *not* rely on the |
1701 | | /// implementation details of this derive. |
1702 | | /// |
1703 | | /// ## Why isn't an explicit representation required for structs? |
1704 | | /// |
1705 | | /// Neither this derive, nor the [safety conditions] of `FromBytes`, requires |
1706 | | /// that structs are marked with `#[repr(C)]`. |
1707 | | /// |
1708 | | /// Per the [Rust reference](reference), |
1709 | | /// |
1710 | | /// > The representation of a type can change the padding between fields, but |
1711 | | /// > does not change the layout of the fields themselves. |
1712 | | /// |
1713 | | /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations |
1714 | | /// |
1715 | | /// Since the layout of structs only consists of padding bytes and field bytes, |
1716 | | /// a struct is soundly `FromBytes` if: |
1717 | | /// 1. its padding is soundly `FromBytes`, and |
1718 | | /// 2. its fields are soundly `FromBytes`. |
1719 | | /// |
1720 | | /// The answer to the first question is always yes: padding bytes do not have |
1721 | | /// any validity constraints. A [discussion] of this question in the Unsafe Code |
1722 | | /// Guidelines Working Group concluded that it would be virtually unimaginable |
1723 | | /// for future versions of rustc to add validity constraints to padding bytes. |
1724 | | /// |
1725 | | /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 |
1726 | | /// |
1727 | | /// Whether a struct is soundly `FromBytes` therefore solely depends on whether |
1728 | | /// its fields are `FromBytes`. |
1729 | | // TODO(#146): Document why we don't require an enum to have an explicit `repr` |
1730 | | // attribute. |
1731 | | #[cfg(any(feature = "derive", test))] |
1732 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] |
1733 | | pub use zerocopy_derive::FromBytes; |
1734 | | |
1735 | | /// Types for which any bit pattern is valid. |
1736 | | /// |
1737 | | /// Any memory region of the appropriate length which contains initialized bytes |
1738 | | /// can be viewed as any `FromBytes` type with no runtime overhead. This is |
1739 | | /// useful for efficiently parsing bytes as structured data. |
1740 | | /// |
1741 | | /// # Implementation |
1742 | | /// |
1743 | | /// **Do not implement this trait yourself!** Instead, use |
1744 | | /// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature); |
1745 | | /// e.g.: |
1746 | | /// |
1747 | | /// ``` |
1748 | | /// # use zerocopy_derive::{FromBytes, FromZeroes}; |
1749 | | /// #[derive(FromZeroes, FromBytes)] |
1750 | | /// struct MyStruct { |
1751 | | /// # /* |
1752 | | /// ... |
1753 | | /// # */ |
1754 | | /// } |
1755 | | /// |
1756 | | /// #[derive(FromZeroes, FromBytes)] |
1757 | | /// #[repr(u8)] |
1758 | | /// enum MyEnum { |
1759 | | /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, |
1760 | | /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, |
1761 | | /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, |
1762 | | /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, |
1763 | | /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, |
1764 | | /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, |
1765 | | /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, |
1766 | | /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, |
1767 | | /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, |
1768 | | /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, |
1769 | | /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, |
1770 | | /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, |
1771 | | /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, |
1772 | | /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, |
1773 | | /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, |
1774 | | /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, |
1775 | | /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, |
1776 | | /// # VFF, |
1777 | | /// # /* |
1778 | | /// ... |
1779 | | /// # */ |
1780 | | /// } |
1781 | | /// |
1782 | | /// #[derive(FromZeroes, FromBytes)] |
1783 | | /// union MyUnion { |
1784 | | /// # variant: u8, |
1785 | | /// # /* |
1786 | | /// ... |
1787 | | /// # */ |
1788 | | /// } |
1789 | | /// ``` |
1790 | | /// |
1791 | | /// This derive performs a sophisticated, compile-time safety analysis to |
1792 | | /// determine whether a type is `FromBytes`. |
1793 | | /// |
1794 | | /// # Safety |
1795 | | /// |
1796 | | /// *This section describes what is required in order for `T: FromBytes`, and |
1797 | | /// what unsafe code may assume of such types. If you don't plan on implementing |
1798 | | /// `FromBytes` manually, and you don't plan on writing unsafe code that |
1799 | | /// operates on `FromBytes` types, then you don't need to read this section.* |
1800 | | /// |
1801 | | /// If `T: FromBytes`, then unsafe code may assume that: |
1802 | | /// - It is sound to treat any initialized sequence of bytes of length |
1803 | | /// `size_of::<T>()` as a `T`. |
1804 | | /// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to |
1805 | | /// `align_of::<T>()` it is sound to construct a `t: &T` at the same address |
1806 | | /// as `b`, and it is sound for both `b` and `t` to be live at the same time. |
1807 | | /// |
1808 | | /// If a type is marked as `FromBytes` which violates this contract, it may |
1809 | | /// cause undefined behavior. |
1810 | | /// |
1811 | | /// `#[derive(FromBytes)]` only permits [types which satisfy these |
1812 | | /// requirements][derive-analysis]. |
1813 | | /// |
1814 | | #[cfg_attr( |
1815 | | feature = "derive", |
1816 | | doc = "[derive]: zerocopy_derive::FromBytes", |
1817 | | doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis" |
1818 | | )] |
1819 | | #[cfg_attr( |
1820 | | not(feature = "derive"), |
1821 | | doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"), |
1822 | | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"), |
1823 | | )] |
1824 | | pub unsafe trait FromBytes: FromZeroes { |
1825 | | // The `Self: Sized` bound makes it so that `FromBytes` is still object |
1826 | | // safe. |
1827 | | #[doc(hidden)] |
1828 | | fn only_derive_is_allowed_to_implement_this_trait() |
1829 | | where |
1830 | | Self: Sized; |
1831 | | |
1832 | | /// Interprets the given `bytes` as a `&Self` without copying. |
1833 | | /// |
1834 | | /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to |
1835 | | /// `align_of::<Self>()`, this returns `None`. |
1836 | | /// |
1837 | | /// # Examples |
1838 | | /// |
1839 | | /// ``` |
1840 | | /// use zerocopy::FromBytes; |
1841 | | /// # use zerocopy_derive::*; |
1842 | | /// |
1843 | | /// #[derive(FromZeroes, FromBytes)] |
1844 | | /// #[repr(C)] |
1845 | | /// struct PacketHeader { |
1846 | | /// src_port: [u8; 2], |
1847 | | /// dst_port: [u8; 2], |
1848 | | /// length: [u8; 2], |
1849 | | /// checksum: [u8; 2], |
1850 | | /// } |
1851 | | /// |
1852 | | /// // These bytes encode a `PacketHeader`. |
1853 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); |
1854 | | /// |
1855 | | /// let header = PacketHeader::ref_from(bytes).unwrap(); |
1856 | | /// |
1857 | | /// assert_eq!(header.src_port, [0, 1]); |
1858 | | /// assert_eq!(header.dst_port, [2, 3]); |
1859 | | /// assert_eq!(header.length, [4, 5]); |
1860 | | /// assert_eq!(header.checksum, [6, 7]); |
1861 | | /// ``` |
1862 | | #[inline] |
1863 | | fn ref_from(bytes: &[u8]) -> Option<&Self> |
1864 | | where |
1865 | | Self: Sized, |
1866 | | { |
1867 | | Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref) |
1868 | | } |
1869 | | |
1870 | | /// Interprets the prefix of the given `bytes` as a `&Self` without copying. |
1871 | | /// |
1872 | | /// `ref_from_prefix` returns a reference to the first `size_of::<Self>()` |
1873 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not |
1874 | | /// aligned to `align_of::<Self>()`, this returns `None`. |
1875 | | /// |
1876 | | /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use |
1877 | | /// [`Ref::into_ref`] to get a `&Self` with the same lifetime. |
1878 | | /// |
1879 | | /// # Examples |
1880 | | /// |
1881 | | /// ``` |
1882 | | /// use zerocopy::FromBytes; |
1883 | | /// # use zerocopy_derive::*; |
1884 | | /// |
1885 | | /// #[derive(FromZeroes, FromBytes)] |
1886 | | /// #[repr(C)] |
1887 | | /// struct PacketHeader { |
1888 | | /// src_port: [u8; 2], |
1889 | | /// dst_port: [u8; 2], |
1890 | | /// length: [u8; 2], |
1891 | | /// checksum: [u8; 2], |
1892 | | /// } |
1893 | | /// |
1894 | | /// // These are more bytes than are needed to encode a `PacketHeader`. |
1895 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
1896 | | /// |
1897 | | /// let header = PacketHeader::ref_from_prefix(bytes).unwrap(); |
1898 | | /// |
1899 | | /// assert_eq!(header.src_port, [0, 1]); |
1900 | | /// assert_eq!(header.dst_port, [2, 3]); |
1901 | | /// assert_eq!(header.length, [4, 5]); |
1902 | | /// assert_eq!(header.checksum, [6, 7]); |
1903 | | /// ``` |
1904 | | #[inline] |
1905 | | fn ref_from_prefix(bytes: &[u8]) -> Option<&Self> |
1906 | | where |
1907 | | Self: Sized, |
1908 | | { |
1909 | | Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref()) |
1910 | | } |
1911 | | |
1912 | | /// Interprets the suffix of the given `bytes` as a `&Self` without copying. |
1913 | | /// |
1914 | | /// `ref_from_suffix` returns a reference to the last `size_of::<Self>()` |
1915 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of |
1916 | | /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`. |
1917 | | /// |
1918 | | /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use |
1919 | | /// [`Ref::into_ref`] to get a `&Self` with the same lifetime. |
1920 | | /// |
1921 | | /// # Examples |
1922 | | /// |
1923 | | /// ``` |
1924 | | /// use zerocopy::FromBytes; |
1925 | | /// # use zerocopy_derive::*; |
1926 | | /// |
1927 | | /// #[derive(FromZeroes, FromBytes)] |
1928 | | /// #[repr(C)] |
1929 | | /// struct PacketTrailer { |
1930 | | /// frame_check_sequence: [u8; 4], |
1931 | | /// } |
1932 | | /// |
1933 | | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
1934 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
1935 | | /// |
1936 | | /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap(); |
1937 | | /// |
1938 | | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
1939 | | /// ``` |
1940 | | #[inline] |
1941 | | fn ref_from_suffix(bytes: &[u8]) -> Option<&Self> |
1942 | | where |
1943 | | Self: Sized, |
1944 | | { |
1945 | | Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref()) |
1946 | | } |
1947 | | |
1948 | | /// Interprets the given `bytes` as a `&mut Self` without copying. |
1949 | | /// |
1950 | | /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to |
1951 | | /// `align_of::<Self>()`, this returns `None`. |
1952 | | /// |
1953 | | /// # Examples |
1954 | | /// |
1955 | | /// ``` |
1956 | | /// use zerocopy::FromBytes; |
1957 | | /// # use zerocopy_derive::*; |
1958 | | /// |
1959 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
1960 | | /// #[repr(C)] |
1961 | | /// struct PacketHeader { |
1962 | | /// src_port: [u8; 2], |
1963 | | /// dst_port: [u8; 2], |
1964 | | /// length: [u8; 2], |
1965 | | /// checksum: [u8; 2], |
1966 | | /// } |
1967 | | /// |
1968 | | /// // These bytes encode a `PacketHeader`. |
1969 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; |
1970 | | /// |
1971 | | /// let header = PacketHeader::mut_from(bytes).unwrap(); |
1972 | | /// |
1973 | | /// assert_eq!(header.src_port, [0, 1]); |
1974 | | /// assert_eq!(header.dst_port, [2, 3]); |
1975 | | /// assert_eq!(header.length, [4, 5]); |
1976 | | /// assert_eq!(header.checksum, [6, 7]); |
1977 | | /// |
1978 | | /// header.checksum = [0, 0]; |
1979 | | /// |
1980 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]); |
1981 | | /// ``` |
1982 | | #[inline] |
1983 | | fn mut_from(bytes: &mut [u8]) -> Option<&mut Self> |
1984 | | where |
1985 | | Self: Sized + AsBytes, |
1986 | | { |
1987 | | Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut) |
1988 | | } |
1989 | | |
1990 | | /// Interprets the prefix of the given `bytes` as a `&mut Self` without |
1991 | | /// copying. |
1992 | | /// |
1993 | | /// `mut_from_prefix` returns a reference to the first `size_of::<Self>()` |
1994 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not |
1995 | | /// aligned to `align_of::<Self>()`, this returns `None`. |
1996 | | /// |
1997 | | /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use |
1998 | | /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime. |
1999 | | /// |
2000 | | /// # Examples |
2001 | | /// |
2002 | | /// ``` |
2003 | | /// use zerocopy::FromBytes; |
2004 | | /// # use zerocopy_derive::*; |
2005 | | /// |
2006 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2007 | | /// #[repr(C)] |
2008 | | /// struct PacketHeader { |
2009 | | /// src_port: [u8; 2], |
2010 | | /// dst_port: [u8; 2], |
2011 | | /// length: [u8; 2], |
2012 | | /// checksum: [u8; 2], |
2013 | | /// } |
2014 | | /// |
2015 | | /// // These are more bytes than are needed to encode a `PacketHeader`. |
2016 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
2017 | | /// |
2018 | | /// let header = PacketHeader::mut_from_prefix(bytes).unwrap(); |
2019 | | /// |
2020 | | /// assert_eq!(header.src_port, [0, 1]); |
2021 | | /// assert_eq!(header.dst_port, [2, 3]); |
2022 | | /// assert_eq!(header.length, [4, 5]); |
2023 | | /// assert_eq!(header.checksum, [6, 7]); |
2024 | | /// |
2025 | | /// header.checksum = [0, 0]; |
2026 | | /// |
2027 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]); |
2028 | | /// ``` |
2029 | | #[inline] |
2030 | | fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self> |
2031 | | where |
2032 | | Self: Sized + AsBytes, |
2033 | | { |
2034 | | Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut()) |
2035 | | } |
2036 | | |
2037 | | /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying. |
2038 | | /// |
2039 | | /// `mut_from_suffix` returns a reference to the last `size_of::<Self>()` |
2040 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of |
2041 | | /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`. |
2042 | | /// |
2043 | | /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, |
2044 | | /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime. |
2045 | | /// |
2046 | | /// # Examples |
2047 | | /// |
2048 | | /// ``` |
2049 | | /// use zerocopy::FromBytes; |
2050 | | /// # use zerocopy_derive::*; |
2051 | | /// |
2052 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2053 | | /// #[repr(C)] |
2054 | | /// struct PacketTrailer { |
2055 | | /// frame_check_sequence: [u8; 4], |
2056 | | /// } |
2057 | | /// |
2058 | | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
2059 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
2060 | | /// |
2061 | | /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap(); |
2062 | | /// |
2063 | | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
2064 | | /// |
2065 | | /// trailer.frame_check_sequence = [0, 0, 0, 0]; |
2066 | | /// |
2067 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]); |
2068 | | /// ``` |
2069 | | #[inline] |
2070 | | fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self> |
2071 | | where |
2072 | | Self: Sized + AsBytes, |
2073 | | { |
2074 | | Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut()) |
2075 | | } |
2076 | | |
2077 | | /// Interprets the given `bytes` as a `&[Self]` without copying. |
2078 | | /// |
2079 | | /// If `bytes.len() % size_of::<Self>() != 0` or `bytes` is not aligned to |
2080 | | /// `align_of::<Self>()`, this returns `None`. |
2081 | | /// |
2082 | | /// If you need to convert a specific number of slice elements, see |
2083 | | /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or |
2084 | | /// [`slice_from_suffix`](FromBytes::slice_from_suffix). |
2085 | | /// |
2086 | | /// # Panics |
2087 | | /// |
2088 | | /// If `Self` is a zero-sized type. |
2089 | | /// |
2090 | | /// # Examples |
2091 | | /// |
2092 | | /// ``` |
2093 | | /// use zerocopy::FromBytes; |
2094 | | /// # use zerocopy_derive::*; |
2095 | | /// |
2096 | | /// # #[derive(Debug, PartialEq, Eq)] |
2097 | | /// #[derive(FromZeroes, FromBytes)] |
2098 | | /// #[repr(C)] |
2099 | | /// struct Pixel { |
2100 | | /// r: u8, |
2101 | | /// g: u8, |
2102 | | /// b: u8, |
2103 | | /// a: u8, |
2104 | | /// } |
2105 | | /// |
2106 | | /// // These bytes encode two `Pixel`s. |
2107 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); |
2108 | | /// |
2109 | | /// let pixels = Pixel::slice_from(bytes).unwrap(); |
2110 | | /// |
2111 | | /// assert_eq!(pixels, &[ |
2112 | | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
2113 | | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
2114 | | /// ]); |
2115 | | /// ``` |
2116 | | #[inline] |
2117 | | fn slice_from(bytes: &[u8]) -> Option<&[Self]> |
2118 | | where |
2119 | | Self: Sized, |
2120 | | { |
2121 | | Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice()) |
2122 | | } |
2123 | | |
2124 | | /// Interprets the prefix of the given `bytes` as a `&[Self]` with length |
2125 | | /// equal to `count` without copying. |
2126 | | /// |
2127 | | /// This method verifies that `bytes.len() >= size_of::<T>() * count` |
2128 | | /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
2129 | | /// first `size_of::<T>() * count` bytes from `bytes` to construct a |
2130 | | /// `&[Self]`, and returns the remaining bytes to the caller. It also |
2131 | | /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
2132 | | /// If any of the length, alignment, or overflow checks fail, it returns |
2133 | | /// `None`. |
2134 | | /// |
2135 | | /// # Panics |
2136 | | /// |
2137 | | /// If `T` is a zero-sized type. |
2138 | | /// |
2139 | | /// # Examples |
2140 | | /// |
2141 | | /// ``` |
2142 | | /// use zerocopy::FromBytes; |
2143 | | /// # use zerocopy_derive::*; |
2144 | | /// |
2145 | | /// # #[derive(Debug, PartialEq, Eq)] |
2146 | | /// #[derive(FromZeroes, FromBytes)] |
2147 | | /// #[repr(C)] |
2148 | | /// struct Pixel { |
2149 | | /// r: u8, |
2150 | | /// g: u8, |
2151 | | /// b: u8, |
2152 | | /// a: u8, |
2153 | | /// } |
2154 | | /// |
2155 | | /// // These are more bytes than are needed to encode two `Pixel`s. |
2156 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
2157 | | /// |
2158 | | /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap(); |
2159 | | /// |
2160 | | /// assert_eq!(pixels, &[ |
2161 | | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
2162 | | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
2163 | | /// ]); |
2164 | | /// |
2165 | | /// assert_eq!(rest, &[8, 9]); |
2166 | | /// ``` |
2167 | | #[inline] |
2168 | | fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])> |
2169 | | where |
2170 | | Self: Sized, |
2171 | | { |
2172 | | Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b)) |
2173 | | } |
2174 | | |
2175 | | /// Interprets the suffix of the given `bytes` as a `&[Self]` with length |
2176 | | /// equal to `count` without copying. |
2177 | | /// |
2178 | | /// This method verifies that `bytes.len() >= size_of::<T>() * count` |
2179 | | /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
2180 | | /// last `size_of::<T>() * count` bytes from `bytes` to construct a |
2181 | | /// `&[Self]`, and returns the preceding bytes to the caller. It also |
2182 | | /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
2183 | | /// If any of the length, alignment, or overflow checks fail, it returns |
2184 | | /// `None`. |
2185 | | /// |
2186 | | /// # Panics |
2187 | | /// |
2188 | | /// If `T` is a zero-sized type. |
2189 | | /// |
2190 | | /// # Examples |
2191 | | /// |
2192 | | /// ``` |
2193 | | /// use zerocopy::FromBytes; |
2194 | | /// # use zerocopy_derive::*; |
2195 | | /// |
2196 | | /// # #[derive(Debug, PartialEq, Eq)] |
2197 | | /// #[derive(FromZeroes, FromBytes)] |
2198 | | /// #[repr(C)] |
2199 | | /// struct Pixel { |
2200 | | /// r: u8, |
2201 | | /// g: u8, |
2202 | | /// b: u8, |
2203 | | /// a: u8, |
2204 | | /// } |
2205 | | /// |
2206 | | /// // These are more bytes than are needed to encode two `Pixel`s. |
2207 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
2208 | | /// |
2209 | | /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap(); |
2210 | | /// |
2211 | | /// assert_eq!(rest, &[0, 1]); |
2212 | | /// |
2213 | | /// assert_eq!(pixels, &[ |
2214 | | /// Pixel { r: 2, g: 3, b: 4, a: 5 }, |
2215 | | /// Pixel { r: 6, g: 7, b: 8, a: 9 }, |
2216 | | /// ]); |
2217 | | /// ``` |
2218 | | #[inline] |
2219 | | fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])> |
2220 | | where |
2221 | | Self: Sized, |
2222 | | { |
2223 | | Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice())) |
2224 | | } |
2225 | | |
2226 | | /// Interprets the given `bytes` as a `&mut [Self]` without copying. |
2227 | | /// |
2228 | | /// If `bytes.len() % size_of::<T>() != 0` or `bytes` is not aligned to |
2229 | | /// `align_of::<T>()`, this returns `None`. |
2230 | | /// |
2231 | | /// If you need to convert a specific number of slice elements, see |
2232 | | /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or |
2233 | | /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix). |
2234 | | /// |
2235 | | /// # Panics |
2236 | | /// |
2237 | | /// If `T` is a zero-sized type. |
2238 | | /// |
2239 | | /// # Examples |
2240 | | /// |
2241 | | /// ``` |
2242 | | /// use zerocopy::FromBytes; |
2243 | | /// # use zerocopy_derive::*; |
2244 | | /// |
2245 | | /// # #[derive(Debug, PartialEq, Eq)] |
2246 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2247 | | /// #[repr(C)] |
2248 | | /// struct Pixel { |
2249 | | /// r: u8, |
2250 | | /// g: u8, |
2251 | | /// b: u8, |
2252 | | /// a: u8, |
2253 | | /// } |
2254 | | /// |
2255 | | /// // These bytes encode two `Pixel`s. |
2256 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; |
2257 | | /// |
2258 | | /// let pixels = Pixel::mut_slice_from(bytes).unwrap(); |
2259 | | /// |
2260 | | /// assert_eq!(pixels, &[ |
2261 | | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
2262 | | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
2263 | | /// ]); |
2264 | | /// |
2265 | | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
2266 | | /// |
2267 | | /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]); |
2268 | | /// ``` |
2269 | | #[inline] |
2270 | | fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]> |
2271 | | where |
2272 | | Self: Sized + AsBytes, |
2273 | | { |
2274 | | Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice()) |
2275 | | } |
2276 | | |
2277 | | /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length |
2278 | | /// equal to `count` without copying. |
2279 | | /// |
2280 | | /// This method verifies that `bytes.len() >= size_of::<T>() * count` |
2281 | | /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
2282 | | /// first `size_of::<T>() * count` bytes from `bytes` to construct a |
2283 | | /// `&[Self]`, and returns the remaining bytes to the caller. It also |
2284 | | /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
2285 | | /// If any of the length, alignment, or overflow checks fail, it returns |
2286 | | /// `None`. |
2287 | | /// |
2288 | | /// # Panics |
2289 | | /// |
2290 | | /// If `T` is a zero-sized type. |
2291 | | /// |
2292 | | /// # Examples |
2293 | | /// |
2294 | | /// ``` |
2295 | | /// use zerocopy::FromBytes; |
2296 | | /// # use zerocopy_derive::*; |
2297 | | /// |
2298 | | /// # #[derive(Debug, PartialEq, Eq)] |
2299 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2300 | | /// #[repr(C)] |
2301 | | /// struct Pixel { |
2302 | | /// r: u8, |
2303 | | /// g: u8, |
2304 | | /// b: u8, |
2305 | | /// a: u8, |
2306 | | /// } |
2307 | | /// |
2308 | | /// // These are more bytes than are needed to encode two `Pixel`s. |
2309 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
2310 | | /// |
2311 | | /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap(); |
2312 | | /// |
2313 | | /// assert_eq!(pixels, &[ |
2314 | | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
2315 | | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
2316 | | /// ]); |
2317 | | /// |
2318 | | /// assert_eq!(rest, &[8, 9]); |
2319 | | /// |
2320 | | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
2321 | | /// |
2322 | | /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]); |
2323 | | /// ``` |
2324 | | #[inline] |
2325 | | fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])> |
2326 | | where |
2327 | | Self: Sized + AsBytes, |
2328 | | { |
2329 | | Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b)) |
2330 | | } |
2331 | | |
2332 | | /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length |
2333 | | /// equal to `count` without copying. |
2334 | | /// |
2335 | | /// This method verifies that `bytes.len() >= size_of::<T>() * count` |
2336 | | /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
2337 | | /// last `size_of::<T>() * count` bytes from `bytes` to construct a |
2338 | | /// `&[Self]`, and returns the preceding bytes to the caller. It also |
2339 | | /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
2340 | | /// If any of the length, alignment, or overflow checks fail, it returns |
2341 | | /// `None`. |
2342 | | /// |
2343 | | /// # Panics |
2344 | | /// |
2345 | | /// If `T` is a zero-sized type. |
2346 | | /// |
2347 | | /// # Examples |
2348 | | /// |
2349 | | /// ``` |
2350 | | /// use zerocopy::FromBytes; |
2351 | | /// # use zerocopy_derive::*; |
2352 | | /// |
2353 | | /// # #[derive(Debug, PartialEq, Eq)] |
2354 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2355 | | /// #[repr(C)] |
2356 | | /// struct Pixel { |
2357 | | /// r: u8, |
2358 | | /// g: u8, |
2359 | | /// b: u8, |
2360 | | /// a: u8, |
2361 | | /// } |
2362 | | /// |
2363 | | /// // These are more bytes than are needed to encode two `Pixel`s. |
2364 | | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
2365 | | /// |
2366 | | /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap(); |
2367 | | /// |
2368 | | /// assert_eq!(rest, &[0, 1]); |
2369 | | /// |
2370 | | /// assert_eq!(pixels, &[ |
2371 | | /// Pixel { r: 2, g: 3, b: 4, a: 5 }, |
2372 | | /// Pixel { r: 6, g: 7, b: 8, a: 9 }, |
2373 | | /// ]); |
2374 | | /// |
2375 | | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
2376 | | /// |
2377 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]); |
2378 | | /// ``` |
2379 | | #[inline] |
2380 | | fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])> |
2381 | | where |
2382 | | Self: Sized + AsBytes, |
2383 | | { |
2384 | | Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice())) |
2385 | | } |
2386 | | |
2387 | | /// Reads a copy of `Self` from `bytes`. |
2388 | | /// |
2389 | | /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`. |
2390 | | /// |
2391 | | /// # Examples |
2392 | | /// |
2393 | | /// ``` |
2394 | | /// use zerocopy::FromBytes; |
2395 | | /// # use zerocopy_derive::*; |
2396 | | /// |
2397 | | /// #[derive(FromZeroes, FromBytes)] |
2398 | | /// #[repr(C)] |
2399 | | /// struct PacketHeader { |
2400 | | /// src_port: [u8; 2], |
2401 | | /// dst_port: [u8; 2], |
2402 | | /// length: [u8; 2], |
2403 | | /// checksum: [u8; 2], |
2404 | | /// } |
2405 | | /// |
2406 | | /// // These bytes encode a `PacketHeader`. |
2407 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); |
2408 | | /// |
2409 | | /// let header = PacketHeader::read_from(bytes).unwrap(); |
2410 | | /// |
2411 | | /// assert_eq!(header.src_port, [0, 1]); |
2412 | | /// assert_eq!(header.dst_port, [2, 3]); |
2413 | | /// assert_eq!(header.length, [4, 5]); |
2414 | | /// assert_eq!(header.checksum, [6, 7]); |
2415 | | /// ``` |
2416 | | #[inline] |
2417 | 0 | fn read_from(bytes: &[u8]) -> Option<Self> |
2418 | 0 | where |
2419 | 0 | Self: Sized, |
2420 | | { |
2421 | 0 | Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner()) Unexecuted instantiation: <td_shim_interface::acpi::Ccel as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <td_shim::e820::E820Entry as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::TcgEfiSpecIdevent as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <migtd::migration::data::ServiceQueryResponse as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::TcgEfiSpecIdevent as zerocopy::FromBytes>::read_from::{closure#0}Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::FromBytes>::read_from::{closure#0} |
2422 | 0 | } Unexecuted instantiation: <td_shim_interface::acpi::Ccel as zerocopy::FromBytes>::read_from Unexecuted instantiation: <td_shim::e820::E820Entry as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::TcgEfiSpecIdevent as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::FromBytes>::read_from Unexecuted instantiation: <migtd::migration::data::ServiceQueryResponse as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::TcgEfiSpecIdevent as zerocopy::FromBytes>::read_from Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::FromBytes>::read_from |
2423 | | |
2424 | | /// Reads a copy of `Self` from the prefix of `bytes`. |
2425 | | /// |
2426 | | /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()` |
2427 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns |
2428 | | /// `None`. |
2429 | | /// |
2430 | | /// # Examples |
2431 | | /// |
2432 | | /// ``` |
2433 | | /// use zerocopy::FromBytes; |
2434 | | /// # use zerocopy_derive::*; |
2435 | | /// |
2436 | | /// #[derive(FromZeroes, FromBytes)] |
2437 | | /// #[repr(C)] |
2438 | | /// struct PacketHeader { |
2439 | | /// src_port: [u8; 2], |
2440 | | /// dst_port: [u8; 2], |
2441 | | /// length: [u8; 2], |
2442 | | /// checksum: [u8; 2], |
2443 | | /// } |
2444 | | /// |
2445 | | /// // These are more bytes than are needed to encode a `PacketHeader`. |
2446 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
2447 | | /// |
2448 | | /// let header = PacketHeader::read_from_prefix(bytes).unwrap(); |
2449 | | /// |
2450 | | /// assert_eq!(header.src_port, [0, 1]); |
2451 | | /// assert_eq!(header.dst_port, [2, 3]); |
2452 | | /// assert_eq!(header.length, [4, 5]); |
2453 | | /// assert_eq!(header.checksum, [6, 7]); |
2454 | | /// ``` |
2455 | | #[inline] |
2456 | | fn read_from_prefix(bytes: &[u8]) -> Option<Self> |
2457 | | where |
2458 | | Self: Sized, |
2459 | | { |
2460 | | Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes) |
2461 | | .map(|(r, _)| r.read().into_inner()) |
2462 | | } |
2463 | | |
2464 | | /// Reads a copy of `Self` from the suffix of `bytes`. |
2465 | | /// |
2466 | | /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()` |
2467 | | /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns |
2468 | | /// `None`. |
2469 | | /// |
2470 | | /// # Examples |
2471 | | /// |
2472 | | /// ``` |
2473 | | /// use zerocopy::FromBytes; |
2474 | | /// # use zerocopy_derive::*; |
2475 | | /// |
2476 | | /// #[derive(FromZeroes, FromBytes)] |
2477 | | /// #[repr(C)] |
2478 | | /// struct PacketTrailer { |
2479 | | /// frame_check_sequence: [u8; 4], |
2480 | | /// } |
2481 | | /// |
2482 | | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
2483 | | /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); |
2484 | | /// |
2485 | | /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap(); |
2486 | | /// |
2487 | | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
2488 | | /// ``` |
2489 | | #[inline] |
2490 | | fn read_from_suffix(bytes: &[u8]) -> Option<Self> |
2491 | | where |
2492 | | Self: Sized, |
2493 | | { |
2494 | | Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes) |
2495 | | .map(|(_, r)| r.read().into_inner()) |
2496 | | } |
2497 | | } |
2498 | | |
2499 | | /// Analyzes whether a type is [`AsBytes`]. |
2500 | | /// |
2501 | | /// This derive analyzes, at compile time, whether the annotated type satisfies |
2502 | | /// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is |
2503 | | /// sound to do so. This derive can be applied to structs, enums, and unions; |
2504 | | /// e.g.: |
2505 | | /// |
2506 | | /// ``` |
2507 | | /// # use zerocopy_derive::{AsBytes}; |
2508 | | /// #[derive(AsBytes)] |
2509 | | /// #[repr(C)] |
2510 | | /// struct MyStruct { |
2511 | | /// # /* |
2512 | | /// ... |
2513 | | /// # */ |
2514 | | /// } |
2515 | | /// |
2516 | | /// #[derive(AsBytes)] |
2517 | | /// #[repr(u8)] |
2518 | | /// enum MyEnum { |
2519 | | /// # Variant, |
2520 | | /// # /* |
2521 | | /// ... |
2522 | | /// # */ |
2523 | | /// } |
2524 | | /// |
2525 | | /// #[derive(AsBytes)] |
2526 | | /// #[repr(C)] |
2527 | | /// union MyUnion { |
2528 | | /// # variant: u8, |
2529 | | /// # /* |
2530 | | /// ... |
2531 | | /// # */ |
2532 | | /// } |
2533 | | /// ``` |
2534 | | /// |
2535 | | /// [safety conditions]: trait@AsBytes#safety |
2536 | | /// |
2537 | | /// # Error Messages |
2538 | | /// |
2539 | | /// Due to the way that the custom derive for `AsBytes` is implemented, you may |
2540 | | /// get an error like this: |
2541 | | /// |
2542 | | /// ```text |
2543 | | /// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied |
2544 | | /// --> lib.rs:23:10 |
2545 | | /// | |
2546 | | /// 1 | #[derive(AsBytes)] |
2547 | | /// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>` |
2548 | | /// | |
2549 | | /// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>` |
2550 | | /// ``` |
2551 | | /// |
2552 | | /// This error indicates that the type being annotated has padding bytes, which |
2553 | | /// is illegal for `AsBytes` types. Consider reducing the alignment of some |
2554 | | /// fields by using types in the [`byteorder`] module, adding explicit struct |
2555 | | /// fields where those padding bytes would be, or using `#[repr(packed)]`. See |
2556 | | /// the Rust Reference's page on [type layout] for more information |
2557 | | /// about type layout and padding. |
2558 | | /// |
2559 | | /// [type layout]: https://doc.rust-lang.org/reference/type-layout.html |
2560 | | /// |
2561 | | /// # Analysis |
2562 | | /// |
2563 | | /// *This section describes, roughly, the analysis performed by this derive to |
2564 | | /// determine whether it is sound to implement `AsBytes` for a given type. |
2565 | | /// Unless you are modifying the implementation of this derive, or attempting to |
2566 | | /// manually implement `AsBytes` for a type yourself, you don't need to read |
2567 | | /// this section.* |
2568 | | /// |
2569 | | /// If a type has the following properties, then this derive can implement |
2570 | | /// `AsBytes` for that type: |
2571 | | /// |
2572 | | /// - If the type is a struct: |
2573 | | /// - It must have a defined representation (`repr(C)`, `repr(transparent)`, |
2574 | | /// or `repr(packed)`). |
2575 | | /// - All of its fields must be `AsBytes`. |
2576 | | /// - Its layout must have no padding. This is always true for |
2577 | | /// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout |
2578 | | /// algorithm described in the [Rust Reference]. |
2579 | | /// - If the type is an enum: |
2580 | | /// - It must be a C-like enum (meaning that all variants have no fields). |
2581 | | /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, |
2582 | | /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). |
2583 | | /// - The type must not contain any [`UnsafeCell`]s (this is required in order |
2584 | | /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of |
2585 | | /// memory). The type may contain references or pointers to `UnsafeCell`s so |
2586 | | /// long as those values can themselves be initialized from zeroes (`AsBytes` |
2587 | | /// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it |
2588 | | /// could be one day). |
2589 | | /// |
2590 | | /// [`UnsafeCell`]: core::cell::UnsafeCell |
2591 | | /// |
2592 | | /// This analysis is subject to change. Unsafe code may *only* rely on the |
2593 | | /// documented [safety conditions] of `FromBytes`, and must *not* rely on the |
2594 | | /// implementation details of this derive. |
2595 | | /// |
2596 | | /// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html |
2597 | | #[cfg(any(feature = "derive", test))] |
2598 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] |
2599 | | pub use zerocopy_derive::AsBytes; |
2600 | | |
2601 | | /// Types that can be viewed as an immutable slice of initialized bytes. |
2602 | | /// |
2603 | | /// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same |
2604 | | /// size. This is useful for efficiently serializing structured data as raw |
2605 | | /// bytes. |
2606 | | /// |
2607 | | /// # Implementation |
2608 | | /// |
2609 | | /// **Do not implement this trait yourself!** Instead, use |
2610 | | /// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.: |
2611 | | /// |
2612 | | /// ``` |
2613 | | /// # use zerocopy_derive::AsBytes; |
2614 | | /// #[derive(AsBytes)] |
2615 | | /// #[repr(C)] |
2616 | | /// struct MyStruct { |
2617 | | /// # /* |
2618 | | /// ... |
2619 | | /// # */ |
2620 | | /// } |
2621 | | /// |
2622 | | /// #[derive(AsBytes)] |
2623 | | /// #[repr(u8)] |
2624 | | /// enum MyEnum { |
2625 | | /// # Variant0, |
2626 | | /// # /* |
2627 | | /// ... |
2628 | | /// # */ |
2629 | | /// } |
2630 | | /// |
2631 | | /// #[derive(AsBytes)] |
2632 | | /// #[repr(C)] |
2633 | | /// union MyUnion { |
2634 | | /// # variant: u8, |
2635 | | /// # /* |
2636 | | /// ... |
2637 | | /// # */ |
2638 | | /// } |
2639 | | /// ``` |
2640 | | /// |
2641 | | /// This derive performs a sophisticated, compile-time safety analysis to |
2642 | | /// determine whether a type is `AsBytes`. See the [derive |
2643 | | /// documentation][derive] for guidance on how to interpret error messages |
2644 | | /// produced by the derive's analysis. |
2645 | | /// |
2646 | | /// # Safety |
2647 | | /// |
2648 | | /// *This section describes what is required in order for `T: AsBytes`, and |
2649 | | /// what unsafe code may assume of such types. If you don't plan on implementing |
2650 | | /// `AsBytes` manually, and you don't plan on writing unsafe code that |
2651 | | /// operates on `AsBytes` types, then you don't need to read this section.* |
2652 | | /// |
2653 | | /// If `T: AsBytes`, then unsafe code may assume that: |
2654 | | /// - It is sound to treat any `t: T` as an immutable `[u8]` of length |
2655 | | /// `size_of_val(t)`. |
2656 | | /// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() == |
2657 | | /// size_of_val(t)` at the same address as `t`, and it is sound for both `b` |
2658 | | /// and `t` to be live at the same time. |
2659 | | /// |
2660 | | /// If a type is marked as `AsBytes` which violates this contract, it may cause |
2661 | | /// undefined behavior. |
2662 | | /// |
2663 | | /// `#[derive(AsBytes)]` only permits [types which satisfy these |
2664 | | /// requirements][derive-analysis]. |
2665 | | /// |
2666 | | #[cfg_attr( |
2667 | | feature = "derive", |
2668 | | doc = "[derive]: zerocopy_derive::AsBytes", |
2669 | | doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis" |
2670 | | )] |
2671 | | #[cfg_attr( |
2672 | | not(feature = "derive"), |
2673 | | doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"), |
2674 | | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"), |
2675 | | )] |
2676 | | pub unsafe trait AsBytes { |
2677 | | // The `Self: Sized` bound makes it so that this function doesn't prevent |
2678 | | // `AsBytes` from being object safe. Note that other `AsBytes` methods |
2679 | | // prevent object safety, but those provide a benefit in exchange for object |
2680 | | // safety. If at some point we remove those methods, change their type |
2681 | | // signatures, or move them out of this trait so that `AsBytes` is object |
2682 | | // safe again, it's important that this function not prevent object safety. |
2683 | | #[doc(hidden)] |
2684 | | fn only_derive_is_allowed_to_implement_this_trait() |
2685 | | where |
2686 | | Self: Sized; |
2687 | | |
2688 | | /// Gets the bytes of this value. |
2689 | | /// |
2690 | | /// `as_bytes` provides access to the bytes of this value as an immutable |
2691 | | /// byte slice. |
2692 | | /// |
2693 | | /// # Examples |
2694 | | /// |
2695 | | /// ``` |
2696 | | /// use zerocopy::AsBytes; |
2697 | | /// # use zerocopy_derive::*; |
2698 | | /// |
2699 | | /// #[derive(AsBytes)] |
2700 | | /// #[repr(C)] |
2701 | | /// struct PacketHeader { |
2702 | | /// src_port: [u8; 2], |
2703 | | /// dst_port: [u8; 2], |
2704 | | /// length: [u8; 2], |
2705 | | /// checksum: [u8; 2], |
2706 | | /// } |
2707 | | /// |
2708 | | /// let header = PacketHeader { |
2709 | | /// src_port: [0, 1], |
2710 | | /// dst_port: [2, 3], |
2711 | | /// length: [4, 5], |
2712 | | /// checksum: [6, 7], |
2713 | | /// }; |
2714 | | /// |
2715 | | /// let bytes = header.as_bytes(); |
2716 | | /// |
2717 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
2718 | | /// ``` |
2719 | | #[inline(always)] |
2720 | 0 | fn as_bytes(&self) -> &[u8] { |
2721 | | // Note that this method does not have a `Self: Sized` bound; |
2722 | | // `size_of_val` works for unsized values too. |
2723 | 0 | let len = mem::size_of_val(self); |
2724 | 0 | let slf: *const Self = self; |
2725 | | |
2726 | | // SAFETY: |
2727 | | // - `slf.cast::<u8>()` is valid for reads for `len * |
2728 | | // mem::size_of::<u8>()` many bytes because... |
2729 | | // - `slf` is the same pointer as `self`, and `self` is a reference |
2730 | | // which points to an object whose size is `len`. Thus... |
2731 | | // - The entire region of `len` bytes starting at `slf` is contained |
2732 | | // within a single allocation. |
2733 | | // - `slf` is non-null. |
2734 | | // - `slf` is trivially aligned to `align_of::<u8>() == 1`. |
2735 | | // - `Self: AsBytes` ensures that all of the bytes of `slf` are |
2736 | | // initialized. |
2737 | | // - Since `slf` is derived from `self`, and `self` is an immutable |
2738 | | // reference, the only other references to this memory region that |
2739 | | // could exist are other immutable references, and those don't allow |
2740 | | // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s, |
2741 | | // which are the only types for which this rule wouldn't be sufficient. |
2742 | | // - The total size of the resulting slice is no larger than |
2743 | | // `isize::MAX` because no allocation produced by safe code can be |
2744 | | // larger than `isize::MAX`. |
2745 | | // |
2746 | | // TODO(#429): Add references to docs and quotes. |
2747 | 0 | unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) } |
2748 | 0 | } Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::AsBytes>::as_bytes Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::AsBytes>::as_bytes Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::AsBytes>::as_bytes Unexecuted instantiation: <cc_measurement::CcEventHeader as zerocopy::AsBytes>::as_bytes Unexecuted instantiation: <cc_measurement::TcgPcrEventHeader as zerocopy::AsBytes>::as_bytes |
2749 | | |
2750 | | /// Gets the bytes of this value mutably. |
2751 | | /// |
2752 | | /// `as_bytes_mut` provides access to the bytes of this value as a mutable |
2753 | | /// byte slice. |
2754 | | /// |
2755 | | /// # Examples |
2756 | | /// |
2757 | | /// ``` |
2758 | | /// use zerocopy::AsBytes; |
2759 | | /// # use zerocopy_derive::*; |
2760 | | /// |
2761 | | /// # #[derive(Eq, PartialEq, Debug)] |
2762 | | /// #[derive(AsBytes, FromZeroes, FromBytes)] |
2763 | | /// #[repr(C)] |
2764 | | /// struct PacketHeader { |
2765 | | /// src_port: [u8; 2], |
2766 | | /// dst_port: [u8; 2], |
2767 | | /// length: [u8; 2], |
2768 | | /// checksum: [u8; 2], |
2769 | | /// } |
2770 | | /// |
2771 | | /// let mut header = PacketHeader { |
2772 | | /// src_port: [0, 1], |
2773 | | /// dst_port: [2, 3], |
2774 | | /// length: [4, 5], |
2775 | | /// checksum: [6, 7], |
2776 | | /// }; |
2777 | | /// |
2778 | | /// let bytes = header.as_bytes_mut(); |
2779 | | /// |
2780 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
2781 | | /// |
2782 | | /// bytes.reverse(); |
2783 | | /// |
2784 | | /// assert_eq!(header, PacketHeader { |
2785 | | /// src_port: [7, 6], |
2786 | | /// dst_port: [5, 4], |
2787 | | /// length: [3, 2], |
2788 | | /// checksum: [1, 0], |
2789 | | /// }); |
2790 | | /// ``` |
2791 | | #[inline(always)] |
2792 | | fn as_bytes_mut(&mut self) -> &mut [u8] |
2793 | | where |
2794 | | Self: FromBytes, |
2795 | | { |
2796 | | // Note that this method does not have a `Self: Sized` bound; |
2797 | | // `size_of_val` works for unsized values too. |
2798 | | let len = mem::size_of_val(self); |
2799 | | let slf: *mut Self = self; |
2800 | | |
2801 | | // SAFETY: |
2802 | | // - `slf.cast::<u8>()` is valid for reads and writes for `len * |
2803 | | // mem::size_of::<u8>()` many bytes because... |
2804 | | // - `slf` is the same pointer as `self`, and `self` is a reference |
2805 | | // which points to an object whose size is `len`. Thus... |
2806 | | // - The entire region of `len` bytes starting at `slf` is contained |
2807 | | // within a single allocation. |
2808 | | // - `slf` is non-null. |
2809 | | // - `slf` is trivially aligned to `align_of::<u8>() == 1`. |
2810 | | // - `Self: AsBytes` ensures that all of the bytes of `slf` are |
2811 | | // initialized. |
2812 | | // - `Self: FromBytes` ensures that no write to this memory region |
2813 | | // could result in it containing an invalid `Self`. |
2814 | | // - Since `slf` is derived from `self`, and `self` is a mutable |
2815 | | // reference, no other references to this memory region can exist. |
2816 | | // - The total size of the resulting slice is no larger than |
2817 | | // `isize::MAX` because no allocation produced by safe code can be |
2818 | | // larger than `isize::MAX`. |
2819 | | // |
2820 | | // TODO(#429): Add references to docs and quotes. |
2821 | | unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) } |
2822 | | } |
2823 | | |
2824 | | /// Writes a copy of `self` to `bytes`. |
2825 | | /// |
2826 | | /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`. |
2827 | | /// |
2828 | | /// # Examples |
2829 | | /// |
2830 | | /// ``` |
2831 | | /// use zerocopy::AsBytes; |
2832 | | /// # use zerocopy_derive::*; |
2833 | | /// |
2834 | | /// #[derive(AsBytes)] |
2835 | | /// #[repr(C)] |
2836 | | /// struct PacketHeader { |
2837 | | /// src_port: [u8; 2], |
2838 | | /// dst_port: [u8; 2], |
2839 | | /// length: [u8; 2], |
2840 | | /// checksum: [u8; 2], |
2841 | | /// } |
2842 | | /// |
2843 | | /// let header = PacketHeader { |
2844 | | /// src_port: [0, 1], |
2845 | | /// dst_port: [2, 3], |
2846 | | /// length: [4, 5], |
2847 | | /// checksum: [6, 7], |
2848 | | /// }; |
2849 | | /// |
2850 | | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0]; |
2851 | | /// |
2852 | | /// header.write_to(&mut bytes[..]); |
2853 | | /// |
2854 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
2855 | | /// ``` |
2856 | | /// |
2857 | | /// If too many or too few target bytes are provided, `write_to` returns |
2858 | | /// `None` and leaves the target bytes unmodified: |
2859 | | /// |
2860 | | /// ``` |
2861 | | /// # use zerocopy::AsBytes; |
2862 | | /// # let header = u128::MAX; |
2863 | | /// let mut excessive_bytes = &mut [0u8; 128][..]; |
2864 | | /// |
2865 | | /// let write_result = header.write_to(excessive_bytes); |
2866 | | /// |
2867 | | /// assert!(write_result.is_none()); |
2868 | | /// assert_eq!(excessive_bytes, [0u8; 128]); |
2869 | | /// ``` |
2870 | | #[inline] |
2871 | | fn write_to(&self, bytes: &mut [u8]) -> Option<()> { |
2872 | | if bytes.len() != mem::size_of_val(self) { |
2873 | | return None; |
2874 | | } |
2875 | | |
2876 | | bytes.copy_from_slice(self.as_bytes()); |
2877 | | Some(()) |
2878 | | } |
2879 | | |
2880 | | /// Writes a copy of `self` to the prefix of `bytes`. |
2881 | | /// |
2882 | | /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes |
2883 | | /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`. |
2884 | | /// |
2885 | | /// # Examples |
2886 | | /// |
2887 | | /// ``` |
2888 | | /// use zerocopy::AsBytes; |
2889 | | /// # use zerocopy_derive::*; |
2890 | | /// |
2891 | | /// #[derive(AsBytes)] |
2892 | | /// #[repr(C)] |
2893 | | /// struct PacketHeader { |
2894 | | /// src_port: [u8; 2], |
2895 | | /// dst_port: [u8; 2], |
2896 | | /// length: [u8; 2], |
2897 | | /// checksum: [u8; 2], |
2898 | | /// } |
2899 | | /// |
2900 | | /// let header = PacketHeader { |
2901 | | /// src_port: [0, 1], |
2902 | | /// dst_port: [2, 3], |
2903 | | /// length: [4, 5], |
2904 | | /// checksum: [6, 7], |
2905 | | /// }; |
2906 | | /// |
2907 | | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; |
2908 | | /// |
2909 | | /// header.write_to_prefix(&mut bytes[..]); |
2910 | | /// |
2911 | | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]); |
2912 | | /// ``` |
2913 | | /// |
2914 | | /// If insufficient target bytes are provided, `write_to_prefix` returns |
2915 | | /// `None` and leaves the target bytes unmodified: |
2916 | | /// |
2917 | | /// ``` |
2918 | | /// # use zerocopy::AsBytes; |
2919 | | /// # let header = u128::MAX; |
2920 | | /// let mut insufficent_bytes = &mut [0, 0][..]; |
2921 | | /// |
2922 | | /// let write_result = header.write_to_suffix(insufficent_bytes); |
2923 | | /// |
2924 | | /// assert!(write_result.is_none()); |
2925 | | /// assert_eq!(insufficent_bytes, [0, 0]); |
2926 | | /// ``` |
2927 | | #[inline] |
2928 | | fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> { |
2929 | | let size = mem::size_of_val(self); |
2930 | | bytes.get_mut(..size)?.copy_from_slice(self.as_bytes()); |
2931 | | Some(()) |
2932 | | } |
2933 | | |
2934 | | /// Writes a copy of `self` to the suffix of `bytes`. |
2935 | | /// |
2936 | | /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of |
2937 | | /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`. |
2938 | | /// |
2939 | | /// # Examples |
2940 | | /// |
2941 | | /// ``` |
2942 | | /// use zerocopy::AsBytes; |
2943 | | /// # use zerocopy_derive::*; |
2944 | | /// |
2945 | | /// #[derive(AsBytes)] |
2946 | | /// #[repr(C)] |
2947 | | /// struct PacketHeader { |
2948 | | /// src_port: [u8; 2], |
2949 | | /// dst_port: [u8; 2], |
2950 | | /// length: [u8; 2], |
2951 | | /// checksum: [u8; 2], |
2952 | | /// } |
2953 | | /// |
2954 | | /// let header = PacketHeader { |
2955 | | /// src_port: [0, 1], |
2956 | | /// dst_port: [2, 3], |
2957 | | /// length: [4, 5], |
2958 | | /// checksum: [6, 7], |
2959 | | /// }; |
2960 | | /// |
2961 | | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; |
2962 | | /// |
2963 | | /// header.write_to_suffix(&mut bytes[..]); |
2964 | | /// |
2965 | | /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); |
2966 | | /// |
2967 | | /// let mut insufficent_bytes = &mut [0, 0][..]; |
2968 | | /// |
2969 | | /// let write_result = header.write_to_suffix(insufficent_bytes); |
2970 | | /// |
2971 | | /// assert!(write_result.is_none()); |
2972 | | /// assert_eq!(insufficent_bytes, [0, 0]); |
2973 | | /// ``` |
2974 | | /// |
2975 | | /// If insufficient target bytes are provided, `write_to_suffix` returns |
2976 | | /// `None` and leaves the target bytes unmodified: |
2977 | | /// |
2978 | | /// ``` |
2979 | | /// # use zerocopy::AsBytes; |
2980 | | /// # let header = u128::MAX; |
2981 | | /// let mut insufficent_bytes = &mut [0, 0][..]; |
2982 | | /// |
2983 | | /// let write_result = header.write_to_suffix(insufficent_bytes); |
2984 | | /// |
2985 | | /// assert!(write_result.is_none()); |
2986 | | /// assert_eq!(insufficent_bytes, [0, 0]); |
2987 | | /// ``` |
2988 | | #[inline] |
2989 | | fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> { |
2990 | | let start = bytes.len().checked_sub(mem::size_of_val(self))?; |
2991 | | bytes |
2992 | | .get_mut(start..) |
2993 | | .expect("`start` should be in-bounds of `bytes`") |
2994 | | .copy_from_slice(self.as_bytes()); |
2995 | | Some(()) |
2996 | | } |
2997 | | } |
2998 | | |
2999 | | /// Types with no alignment requirement. |
3000 | | /// |
3001 | | /// WARNING: Do not implement this trait yourself! Instead, use |
3002 | | /// `#[derive(Unaligned)]` (requires the `derive` Cargo feature). |
3003 | | /// |
3004 | | /// If `T: Unaligned`, then `align_of::<T>() == 1`. |
3005 | | /// |
3006 | | /// # Safety |
3007 | | /// |
3008 | | /// *This section describes what is required in order for `T: Unaligned`, and |
3009 | | /// what unsafe code may assume of such types. `#[derive(Unaligned)]` only |
3010 | | /// permits types which satisfy these requirements. If you don't plan on |
3011 | | /// implementing `Unaligned` manually, and you don't plan on writing unsafe code |
3012 | | /// that operates on `Unaligned` types, then you don't need to read this |
3013 | | /// section.* |
3014 | | /// |
3015 | | /// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a |
3016 | | /// reference to `T` at any memory location regardless of alignment. If a type |
3017 | | /// is marked as `Unaligned` which violates this contract, it may cause |
3018 | | /// undefined behavior. |
3019 | | pub unsafe trait Unaligned { |
3020 | | // The `Self: Sized` bound makes it so that `Unaligned` is still object |
3021 | | // safe. |
3022 | | #[doc(hidden)] |
3023 | | fn only_derive_is_allowed_to_implement_this_trait() |
3024 | | where |
3025 | | Self: Sized; |
3026 | | } |
3027 | | |
3028 | | safety_comment! { |
3029 | | /// SAFETY: |
3030 | | /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a |
3031 | | /// zero-sized type to have a size of 0 and an alignment of 1." |
3032 | | /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There |
3033 | | /// is only one possible sequence of 0 bytes, and `()` is inhabited. |
3034 | | /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes. |
3035 | | /// - `Unaligned`: `()` has alignment 1. |
3036 | | /// |
3037 | | /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout |
3038 | | unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
3039 | | assert_unaligned!(()); |
3040 | | } |
3041 | | |
3042 | | safety_comment! { |
3043 | | /// SAFETY: |
3044 | | /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit |
3045 | | /// patterns are valid for numeric types [1] |
3046 | | /// - `AsBytes`: numeric types have no padding bytes [1] |
3047 | | /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size |
3048 | | /// of `u8` and `i8` as 1 byte. We also know that: |
3049 | | /// - Alignment is >= 1 [3] |
3050 | | /// - Size is an integer multiple of alignment [4] |
3051 | | /// - The only value >= 1 for which 1 is an integer multiple is 1 |
3052 | | /// Therefore, the only possible alignment for `u8` and `i8` is 1. |
3053 | | /// |
3054 | | /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity: |
3055 | | /// |
3056 | | /// For every numeric type, `T`, the bit validity of `T` is equivalent to |
3057 | | /// the bit validity of `[u8; size_of::<T>()]`. An uninitialized byte is |
3058 | | /// not a valid `u8`. |
3059 | | /// |
3060 | | /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text |
3061 | | /// is available on the Stable docs, cite those instead. |
3062 | | /// |
3063 | | /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout |
3064 | | /// |
3065 | | /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: |
3066 | | /// |
3067 | | /// Alignment is measured in bytes, and must be at least 1. |
3068 | | /// |
3069 | | /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: |
3070 | | /// |
3071 | | /// The size of a value is always a multiple of its alignment. |
3072 | | /// |
3073 | | /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather |
3074 | | /// than bits or bytes, update this comment, especially the reference to |
3075 | | /// [1]. |
3076 | | unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
3077 | | unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
3078 | | assert_unaligned!(u8, i8); |
3079 | | unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3080 | | unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3081 | | unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3082 | | unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3083 | | unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3084 | | unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3085 | | unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3086 | | unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3087 | | unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3088 | | unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3089 | | unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3090 | | unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3091 | | } |
3092 | | |
3093 | | safety_comment! { |
3094 | | /// SAFETY: |
3095 | | /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern |
3096 | | /// 0x00" [1]. |
3097 | | /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each" |
3098 | | /// and "The value false has the bit pattern 0x00 and the value true has |
3099 | | /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always |
3100 | | /// initialized. |
3101 | | /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type |
3102 | | /// has a size and alignment of 1 each." |
3103 | | /// |
3104 | | /// [1] https://doc.rust-lang.org/reference/types/boolean.html |
3105 | | unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned); |
3106 | | assert_unaligned!(bool); |
3107 | | /// SAFETY: |
3108 | | /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` |
3109 | | /// closure: |
3110 | | /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object |
3111 | | /// of the same size as that referred to by `t`. This is true because |
3112 | | /// `bool` and `u8` have the same size (1 byte) [1]. |
3113 | | /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>` |
3114 | | /// which satisfies the preconditions of |
3115 | | /// `TryFromBytes::<bool>::is_bit_valid`, it must be guaranteed that the |
3116 | | /// memory referenced by that `Ptr` always contains a valid `u8`. Since |
3117 | | /// `bool`'s single byte is always initialized, `is_bit_valid`'s |
3118 | | /// precondition requires that the same is true of its argument. Since |
3119 | | /// `u8`'s only bit validity invariant is that its single byte must be |
3120 | | /// initialized, this memory is guaranteed to contain a valid `u8`. |
3121 | | /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2] |
3122 | | /// - The impl must only return `true` for its argument if the original |
3123 | | /// `Ptr<bool>` refers to a valid `bool`. We only return true if the |
3124 | | /// `u8` value is 0 or 1, and both of these are valid values for `bool`. |
3125 | | /// [3] |
3126 | | /// |
3127 | | /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout: |
3128 | | /// |
3129 | | /// The size of most primitives is given in this table. |
3130 | | /// |
3131 | | /// | Type | `size_of::<Type>() ` | |
3132 | | /// |-----------|----------------------| |
3133 | | /// | `bool` | 1 | |
3134 | | /// | `u8`/`i8` | 1 | |
3135 | | /// |
3136 | | /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: |
3137 | | /// |
3138 | | /// The size of a value is always a multiple of its alignment. |
3139 | | /// |
3140 | | /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html: |
3141 | | /// |
3142 | | /// The value false has the bit pattern 0x00 and the value true has the |
3143 | | /// bit pattern 0x01. |
3144 | | unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2); |
3145 | | } |
3146 | | safety_comment! { |
3147 | | /// SAFETY: |
3148 | | /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode |
3149 | | /// scalar value (i.e. a code point that is not a surrogate), represented |
3150 | | /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to |
3151 | | /// 0x10FFFF range" which contains 0x0000. |
3152 | | /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit |
3153 | | /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not |
3154 | | /// all bit patterns are valid for `char`. |
3155 | | /// |
3156 | | /// [1] https://doc.rust-lang.org/reference/types/textual.html |
3157 | | unsafe_impl!(char: FromZeroes, AsBytes); |
3158 | | /// SAFETY: |
3159 | | /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` |
3160 | | /// closure: |
3161 | | /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object |
3162 | | /// of the same size as that referred to by `t`. This is true because |
3163 | | /// `char` and `u32` have the same size [1]. |
3164 | | /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>` |
3165 | | /// which satisfies the preconditions of |
3166 | | /// `TryFromBytes::<char>::is_bit_valid`, it must be guaranteed that the |
3167 | | /// memory referenced by that `Ptr` always contains a valid `u32`. Since |
3168 | | /// `char`'s bytes are always initialized [2], `is_bit_valid`'s |
3169 | | /// precondition requires that the same is true of its argument. Since |
3170 | | /// `u32`'s only bit validity invariant is that its bytes must be |
3171 | | /// initialized, this memory is guaranteed to contain a valid `u32`. |
3172 | | /// - The alignment of `char` is equal to the alignment of `u32`. [1] |
3173 | | /// - The impl must only return `true` for its argument if the original |
3174 | | /// `Ptr<char>` refers to a valid `char`. `char::from_u32` guarantees |
3175 | | /// that it returns `None` if its input is not a valid `char`. [3] |
3176 | | /// |
3177 | | /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity: |
3178 | | /// |
3179 | | /// `char` is guaranteed to have the same size and alignment as `u32` on |
3180 | | /// all platforms. |
3181 | | /// |
3182 | | /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: |
3183 | | /// |
3184 | | /// Every byte of a `char` is guaranteed to be initialized. |
3185 | | /// |
3186 | | /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: |
3187 | | /// |
3188 | | /// `from_u32()` will return `None` if the input is not a valid value for |
3189 | | /// a `char`. |
3190 | | unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some()); |
3191 | | } |
3192 | | safety_comment! { |
3193 | | /// SAFETY: |
3194 | | /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str` |
3195 | | /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`, |
3196 | | /// and `Unaligned`. |
3197 | | /// |
3198 | | /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!` |
3199 | | /// uses `align_of`, which only works for `Sized` types. |
3200 | | /// |
3201 | | /// TODO(#429): Add quotes from documentation. |
3202 | | /// |
3203 | | /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout |
3204 | | unsafe_impl!(str: FromZeroes, AsBytes, Unaligned); |
3205 | | /// SAFETY: |
3206 | | /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` |
3207 | | /// closure: |
3208 | | /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object |
3209 | | /// of the same size as that referred to by `t`. This is true because |
3210 | | /// `str` and `[u8]` have the same representation. [1] |
3211 | | /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>` |
3212 | | /// which satisfies the preconditions of |
3213 | | /// `TryFromBytes::<str>::is_bit_valid`, it must be guaranteed that the |
3214 | | /// memory referenced by that `Ptr` always contains a valid `[u8]`. |
3215 | | /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s |
3216 | | /// precondition requires that the same is true of its argument. Since |
3217 | | /// `[u8]`'s only bit validity invariant is that its bytes must be |
3218 | | /// initialized, this memory is guaranteed to contain a valid `[u8]`. |
3219 | | /// - The alignment of `str` is equal to the alignment of `[u8]`. [1] |
3220 | | /// - The impl must only return `true` for its argument if the original |
3221 | | /// `Ptr<str>` refers to a valid `str`. `str::from_utf8` guarantees that |
3222 | | /// it returns `Err` if its input is not a valid `str`. [2] |
3223 | | /// |
3224 | | /// [1] Per https://doc.rust-lang.org/reference/types/textual.html: |
3225 | | /// |
3226 | | /// A value of type `str` is represented the same was as `[u8]`. |
3227 | | /// |
3228 | | /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors: |
3229 | | /// |
3230 | | /// Returns `Err` if the slice is not UTF-8. |
3231 | | unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok()); |
3232 | | } |
3233 | | |
3234 | | safety_comment! { |
3235 | | // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`. |
3236 | | // |
3237 | | /// SAFETY: |
3238 | | /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated |
3239 | | /// primitive. Since it is the same size, this guarantees it has no |
3240 | | /// padding - integers have no padding, and there's no room for padding |
3241 | | /// if it can represent all of the same values except 0. |
3242 | | /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that |
3243 | | /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2] |
3244 | | /// This is worded in a way that makes it unclear whether it's meant as a |
3245 | | /// guarantee, but given the purpose of those types, it's virtually |
3246 | | /// unthinkable that that would ever change. `Option` cannot be smaller |
3247 | | /// than its contained type, which implies that, and `NonZeroX8` are of |
3248 | | /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot |
3249 | | /// be 0 bytes, which means that they must be 1 byte. The only valid |
3250 | | /// alignment for a 1-byte type is 1. |
3251 | | /// |
3252 | | /// TODO(#429): Add quotes from documentation. |
3253 | | /// |
3254 | | /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html |
3255 | | /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html |
3256 | | /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation |
3257 | | /// that layout is the same as primitive layout. |
3258 | | unsafe_impl!(NonZeroU8: AsBytes, Unaligned); |
3259 | | unsafe_impl!(NonZeroI8: AsBytes, Unaligned); |
3260 | | assert_unaligned!(NonZeroU8, NonZeroI8); |
3261 | | unsafe_impl!(NonZeroU16: AsBytes); |
3262 | | unsafe_impl!(NonZeroI16: AsBytes); |
3263 | | unsafe_impl!(NonZeroU32: AsBytes); |
3264 | | unsafe_impl!(NonZeroI32: AsBytes); |
3265 | | unsafe_impl!(NonZeroU64: AsBytes); |
3266 | | unsafe_impl!(NonZeroI64: AsBytes); |
3267 | | unsafe_impl!(NonZeroU128: AsBytes); |
3268 | | unsafe_impl!(NonZeroI128: AsBytes); |
3269 | | unsafe_impl!(NonZeroUsize: AsBytes); |
3270 | | unsafe_impl!(NonZeroIsize: AsBytes); |
3271 | | /// SAFETY: |
3272 | | /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` |
3273 | | /// closure: |
3274 | | /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an |
3275 | | /// object of the same size as that referred to by `t`. This is true |
3276 | | /// because `NonZeroXxx` and `xxx` have the same size. [1] |
3277 | | /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a, |
3278 | | /// NonZeroXxx>` which satisfies the preconditions of |
3279 | | /// `TryFromBytes::<NonZeroXxx>::is_bit_valid`, it must be guaranteed |
3280 | | /// that the memory referenced by that `Ptr` always contains a valid |
3281 | | /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1], |
3282 | | /// `is_bit_valid`'s precondition requires that the same is true of its |
3283 | | /// argument. Since `xxx`'s only bit validity invariant is that its |
3284 | | /// bytes must be initialized, this memory is guaranteed to contain a |
3285 | | /// valid `xxx`. |
3286 | | /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`. |
3287 | | /// [1] |
3288 | | /// - The impl must only return `true` for its argument if the original |
3289 | | /// `Ptr<NonZeroXxx>` refers to a valid `NonZeroXxx`. The only `xxx` |
3290 | | /// which is not also a valid `NonZeroXxx` is 0. [1] |
3291 | | /// |
3292 | | /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: |
3293 | | /// |
3294 | | /// `NonZeroU16` is guaranteed to have the same layout and bit validity as |
3295 | | /// `u16` with the exception that `0` is not a valid instance. |
3296 | | unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0); |
3297 | | unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0); |
3298 | | unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0); |
3299 | | unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0); |
3300 | | unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0); |
3301 | | unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0); |
3302 | | unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0); |
3303 | | unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0); |
3304 | | unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0); |
3305 | | unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0); |
3306 | | unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0); |
3307 | | unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0); |
3308 | | } |
3309 | | safety_comment! { |
3310 | | /// SAFETY: |
3311 | | /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`, |
3312 | | /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so |
3313 | | /// `size_of::<Option<NonZeroXxx>>() == size_of::<xxx>()`; see |
3314 | | /// `NonZeroXxx` documentation. |
3315 | | /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that |
3316 | | /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2] |
3317 | | /// This is worded in a way that makes it unclear whether it's meant as a |
3318 | | /// guarantee, but given the purpose of those types, it's virtually |
3319 | | /// unthinkable that that would ever change. The only valid alignment for |
3320 | | /// a 1-byte type is 1. |
3321 | | /// |
3322 | | /// TODO(#429): Add quotes from documentation. |
3323 | | /// |
3324 | | /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html |
3325 | | /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html |
3326 | | /// |
3327 | | /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation |
3328 | | /// for layout guarantees. |
3329 | | unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
3330 | | unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
3331 | | assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>); |
3332 | | unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3333 | | unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3334 | | unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3335 | | unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3336 | | unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3337 | | unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3338 | | unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3339 | | unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3340 | | unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3341 | | unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes); |
3342 | | } |
3343 | | |
3344 | | safety_comment! { |
3345 | | /// SAFETY: |
3346 | | /// The following types can be transmuted from `[0u8; size_of::<T>()]`. [1] |
3347 | | /// None of them contain `UnsafeCell`s, and so they all soundly implement |
3348 | | /// `FromZeroes`. |
3349 | | /// |
3350 | | /// [1] Per |
3351 | | /// https://doc.rust-lang.org/nightly/core/option/index.html#representation: |
3352 | | /// |
3353 | | /// Rust guarantees to optimize the following types `T` such that |
3354 | | /// [`Option<T>`] has the same size and alignment as `T`. In some of these |
3355 | | /// cases, Rust further guarantees that `transmute::<_, Option<T>>([0u8; |
3356 | | /// size_of::<T>()])` is sound and produces `Option::<T>::None`. These |
3357 | | /// cases are identified by the second column: |
3358 | | /// |
3359 | | /// | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? | |
3360 | | /// |-----------------------|-----------------------------------------------------------| |
3361 | | /// | [`Box<U>`] | when `U: Sized` | |
3362 | | /// | `&U` | when `U: Sized` | |
3363 | | /// | `&mut U` | when `U: Sized` | |
3364 | | /// | [`ptr::NonNull<U>`] | when `U: Sized` | |
3365 | | /// | `fn`, `extern "C" fn` | always | |
3366 | | /// |
3367 | | /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite |
3368 | | /// the Stable docs once they're available. |
3369 | | #[cfg(feature = "alloc")] |
3370 | | unsafe_impl!( |
3371 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] |
3372 | | T => FromZeroes for Option<Box<T>> |
3373 | | ); |
3374 | | unsafe_impl!(T => FromZeroes for Option<&'_ T>); |
3375 | | unsafe_impl!(T => FromZeroes for Option<&'_ mut T>); |
3376 | | unsafe_impl!(T => FromZeroes for Option<NonNull<T>>); |
3377 | | unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...)); |
3378 | | unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...)); |
3379 | | } |
3380 | | |
3381 | | safety_comment! { |
3382 | | /// SAFETY: |
3383 | | /// Per reference [1]: |
3384 | | /// "For all T, the following are guaranteed: |
3385 | | /// size_of::<PhantomData<T>>() == 0 |
3386 | | /// align_of::<PhantomData<T>>() == 1". |
3387 | | /// This gives: |
3388 | | /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There |
3389 | | /// is only one possible sequence of 0 bytes, and `PhantomData` is |
3390 | | /// inhabited. |
3391 | | /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding |
3392 | | /// bytes. |
3393 | | /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment |
3394 | | /// 1. |
3395 | | /// |
3396 | | /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1 |
3397 | | unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>); |
3398 | | unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>); |
3399 | | unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>); |
3400 | | unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>); |
3401 | | unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>); |
3402 | | assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>); |
3403 | | } |
3404 | | safety_comment! { |
3405 | | /// SAFETY: |
3406 | | /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout and |
3407 | | /// bit validity as `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and |
3408 | | /// has a single field, which is `pub`. Per the reference [2], this means |
3409 | | /// that the `#[repr(transparent)]` attribute is "considered part of the |
3410 | | /// public ABI". |
3411 | | /// |
3412 | | /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an |
3413 | | /// `is_bit_valid` closure: |
3414 | | /// - Given `t: *mut Wrapping<T>` and `let r = *mut T`, `r` refers to an |
3415 | | /// object of the same size as that referred to by `t`. This is true |
3416 | | /// because `Wrapping<T>` and `T` have the same layout |
3417 | | /// - The alignment of `Wrapping<T>` is equal to the alignment of `T`. |
3418 | | /// - The impl must only return `true` for its argument if the original |
3419 | | /// `Ptr<Wrapping<T>>` refers to a valid `Wrapping<T>`. Since |
3420 | | /// `Wrapping<T>` has the same bit validity as `T`, and since our impl |
3421 | | /// just calls `T::is_bit_valid`, our impl returns `true` exactly when |
3422 | | /// its argument contains a valid `Wrapping<T>`. |
3423 | | /// - `FromBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if |
3424 | | /// `T: FromBytes`, then all initialized byte sequences are valid |
3425 | | /// instances of `Wrapping<T>`. Similarly, if `T: FromBytes`, then |
3426 | | /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes |
3427 | | /// for Wrapping<T> where T: FromBytes` is a sound impl. |
3428 | | /// - `AsBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if |
3429 | | /// `T: AsBytes`, then all valid instances of `Wrapping<T>` have all of |
3430 | | /// their bytes initialized. Similarly, if `T: AsBytes`, then |
3431 | | /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes |
3432 | | /// for Wrapping<T> where T: AsBytes` is a valid impl. |
3433 | | /// - `Unaligned`: Since `Wrapping<T>` has the same layout as `T`, |
3434 | | /// `Wrapping<T>` has alignment 1 exactly when `T` does. |
3435 | | /// |
3436 | | /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: |
3437 | | /// |
3438 | | /// `NonZeroU16` is guaranteed to have the same layout and bit validity as |
3439 | | /// `u16` with the exception that `0` is not a valid instance. |
3440 | | /// |
3441 | | /// TODO(#429): Add quotes from documentation. |
3442 | | /// |
3443 | | /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1): |
3444 | | /// Reference this documentation once it's available on stable. |
3445 | | /// |
3446 | | /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent |
3447 | | unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| { |
3448 | | // SAFETY: |
3449 | | // - Since `T` and `Wrapping<T>` have the same layout and bit validity |
3450 | | // and contain the same fields, `T` contains `UnsafeCell`s exactly |
3451 | | // where `Wrapping<T>` does. Thus, all memory and `UnsafeCell` |
3452 | | // preconditions of `T::is_bit_valid` hold exactly when the same |
3453 | | // preconditions for `Wrapping<T>::is_bit_valid` hold. |
3454 | | // - By the same token, since `candidate` is guaranteed to have its |
3455 | | // bytes initialized where there are always initialized bytes in |
3456 | | // `Wrapping<T>`, the same is true for `T`. |
3457 | | unsafe { T::is_bit_valid(candidate) } |
3458 | | }); |
3459 | | unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>); |
3460 | | unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>); |
3461 | | unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>); |
3462 | | unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>); |
3463 | | assert_unaligned!(Wrapping<()>, Wrapping<u8>); |
3464 | | } |
3465 | | safety_comment! { |
3466 | | // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes` |
3467 | | // since it may contain uninitialized bytes. |
3468 | | // |
3469 | | /// SAFETY: |
3470 | | /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: |
3471 | | /// `MaybeUninit<T>` has no restrictions on its contents. Unfortunately, |
3472 | | /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and |
3473 | | /// `FromBytes` also require that implementers contain no `UnsafeCell`s. |
3474 | | /// Thus, we require `T: Trait` in order to ensure that `T` - and thus |
3475 | | /// `MaybeUninit<T>` - contains to `UnsafeCell`s. Thus, requiring that `T` |
3476 | | /// implement each of these traits is sufficient. |
3477 | | /// - `Unaligned`: "MaybeUninit<T> is guaranteed to have the same size, |
3478 | | /// alignment, and ABI as T" [1] |
3479 | | /// |
3480 | | /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1 |
3481 | | /// |
3482 | | /// TODO(https://github.com/google/zerocopy/issues/251): If we split |
3483 | | /// `FromBytes` and `RefFromBytes`, or if we introduce a separate |
3484 | | /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes` |
3485 | | /// and `FromBytes`. |
3486 | | unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>); |
3487 | | unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>); |
3488 | | unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>); |
3489 | | unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>); |
3490 | | assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>); |
3491 | | } |
3492 | | safety_comment! { |
3493 | | /// SAFETY: |
3494 | | /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and |
3495 | | /// accessing the inner value is safe (meaning that it's unsound to leave |
3496 | | /// the inner value uninitialized while exposing the `ManuallyDrop` to safe |
3497 | | /// code). |
3498 | | /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any |
3499 | | /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence |
3500 | | /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If |
3501 | | /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid |
3502 | | /// `ManuallyDrop<T>`. |
3503 | | /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound |
3504 | | /// to let safe code access a `ManuallyDrop` whose inner value is |
3505 | | /// uninitialized, safe code can only ever access a `ManuallyDrop` whose |
3506 | | /// contents are a valid `T`. Since `T: AsBytes`, this means that safe |
3507 | | /// code can only ever access a `ManuallyDrop` with all initialized bytes. |
3508 | | /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment) |
3509 | | /// as `T`, and `T: Unaligned` guarantees that that alignment is 1. |
3510 | | /// |
3511 | | /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
3512 | | /// validity as `T` |
3513 | | /// |
3514 | | /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: |
3515 | | /// |
3516 | | /// TODO(#429): |
3517 | | /// - Add quotes from docs. |
3518 | | /// - Once [1] (added in |
3519 | | /// https://github.com/rust-lang/rust/pull/115522) is available on stable, |
3520 | | /// quote the stable docs instead of the nightly docs. |
3521 | | unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>); |
3522 | | unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>); |
3523 | | unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>); |
3524 | | unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>); |
3525 | | assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>); |
3526 | | } |
3527 | | safety_comment! { |
3528 | | /// SAFETY: |
3529 | | /// Per the reference [1]: |
3530 | | /// |
3531 | | /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same |
3532 | | /// alignment of `T`. Arrays are laid out so that the zero-based `nth` |
3533 | | /// element of the array is offset from the start of the array by `n * |
3534 | | /// size_of::<T>()` bytes. |
3535 | | /// |
3536 | | /// ... |
3537 | | /// |
3538 | | /// Slices have the same layout as the section of the array they slice. |
3539 | | /// |
3540 | | /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s |
3541 | | /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T; |
3542 | | /// N]` are `TryFromBytes`, `FromZeroes`, `FromBytes`, and `AsBytes` if `T` |
3543 | | /// is (respectively). Furthermore, since an array/slice has "the same |
3544 | | /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is. |
3545 | | /// |
3546 | | /// Note that we don't `assert_unaligned!` for slice types because |
3547 | | /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types. |
3548 | | /// |
3549 | | /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout |
3550 | | unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]); |
3551 | | unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]); |
3552 | | unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]); |
3553 | | unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]); |
3554 | | assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]); |
3555 | | unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| { |
3556 | | // SAFETY: Assuming the preconditions of `is_bit_valid` are satisfied, |
3557 | | // so too will the postcondition: that, if `is_bit_valid(candidate)` |
3558 | | // returns true, `*candidate` contains a valid `Self`. Per the reference |
3559 | | // [1]: |
3560 | | // |
3561 | | // An array of `[T; N]` has a size of `size_of::<T>() * N` and the |
3562 | | // same alignment of `T`. Arrays are laid out so that the zero-based |
3563 | | // `nth` element of the array is offset from the start of the array by |
3564 | | // `n * size_of::<T>()` bytes. |
3565 | | // |
3566 | | // ... |
3567 | | // |
3568 | | // Slices have the same layout as the section of the array they slice. |
3569 | | // |
3570 | | // In other words, the layout of a `[T] is a sequence of `T`s laid out |
3571 | | // back-to-back with no bytes in between. If all elements in `candidate` |
3572 | | // are `is_bit_valid`, so too is `candidate`. |
3573 | | // |
3574 | | // Note that any of the below calls may panic, but it would still be |
3575 | | // sound even if it did. `is_bit_valid` does not promise that it will |
3576 | | // not panic (in fact, it explicitly warns that it's a possibility), and |
3577 | | // we have not violated any safety invariants that we must fix before |
3578 | | // returning. |
3579 | | c.iter().all(|elem| |
3580 | | // SAFETY: We uphold the safety contract of `is_bit_valid(elem)`, by |
3581 | | // precondition on the surrounding call to `is_bit_valid`. The |
3582 | | // memory referenced by `elem` is contained entirely within `c`, and |
3583 | | // satisfies the preconditions satisfied by `c`. By axiom, we assume |
3584 | | // that `Iterator:all` does not invalidate these preconditions |
3585 | | // (e.g., by writing to `elem`.) Since `elem` is derived from `c`, |
3586 | | // it is only possible for uninitialized bytes to occur in `elem` at |
3587 | | // the same bytes they occur within `c`. |
3588 | | unsafe { <T as TryFromBytes>::is_bit_valid(elem) } |
3589 | | ) |
3590 | | }); |
3591 | | unsafe_impl!(T: FromZeroes => FromZeroes for [T]); |
3592 | | unsafe_impl!(T: FromBytes => FromBytes for [T]); |
3593 | | unsafe_impl!(T: AsBytes => AsBytes for [T]); |
3594 | | unsafe_impl!(T: Unaligned => Unaligned for [T]); |
3595 | | } |
3596 | | safety_comment! { |
3597 | | /// SAFETY: |
3598 | | /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero |
3599 | | /// pointer is considered "null". [1] No operations which require |
3600 | | /// provenance are legal on null pointers, so this is not a footgun. |
3601 | | /// |
3602 | | /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers |
3603 | | /// would be sound, but carries provenance footguns. We want to support |
3604 | | /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are |
3605 | | /// holding off until we can figure out how to address those footguns. |
3606 | | /// |
3607 | | /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the |
3608 | | /// documentation once this PR lands. |
3609 | | unsafe_impl!(T => FromZeroes for *const T); |
3610 | | unsafe_impl!(T => FromZeroes for *mut T); |
3611 | | } |
3612 | | |
3613 | | // SIMD support |
3614 | | // |
3615 | | // Per the Unsafe Code Guidelines Reference [1]: |
3616 | | // |
3617 | | // Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs |
3618 | | // containing `N` elements of type `T` where `N` is a power-of-two and the |
3619 | | // size and alignment requirements of `T` are equal: |
3620 | | // |
3621 | | // ```rust |
3622 | | // #[repr(simd)] |
3623 | | // struct Vector<T, N>(T_0, ..., T_(N - 1)); |
3624 | | // ``` |
3625 | | // |
3626 | | // ... |
3627 | | // |
3628 | | // The size of `Vector` is `N * size_of::<T>()` and its alignment is an |
3629 | | // implementation-defined function of `T` and `N` greater than or equal to |
3630 | | // `align_of::<T>()`. |
3631 | | // |
3632 | | // ... |
3633 | | // |
3634 | | // Vector elements are laid out in source field order, enabling random access |
3635 | | // to vector elements by reinterpreting the vector as an array: |
3636 | | // |
3637 | | // ```rust |
3638 | | // union U { |
3639 | | // vec: Vector<T, N>, |
3640 | | // arr: [T; N] |
3641 | | // } |
3642 | | // |
3643 | | // assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>()); |
3644 | | // assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>()); |
3645 | | // |
3646 | | // unsafe { |
3647 | | // let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) }; |
3648 | | // |
3649 | | // assert_eq!(u.vec.0, u.arr[0]); |
3650 | | // // ... |
3651 | | // assert_eq!(u.vec.(N - 1), u.arr[N - 1]); |
3652 | | // } |
3653 | | // ``` |
3654 | | // |
3655 | | // Given this background, we can observe that: |
3656 | | // - The size and bit pattern requirements of a SIMD type are equivalent to the |
3657 | | // equivalent array type. Thus, for any SIMD type whose primitive `T` is |
3658 | | // `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is |
3659 | | // also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively. |
3660 | | // - Since no upper bound is placed on the alignment, no SIMD type can be |
3661 | | // guaranteed to be `Unaligned`. |
3662 | | // |
3663 | | // Also per [1]: |
3664 | | // |
3665 | | // This chapter represents the consensus from issue #38. The statements in |
3666 | | // here are not (yet) "guaranteed" not to change until an RFC ratifies them. |
3667 | | // |
3668 | | // See issue #38 [2]. While this behavior is not technically guaranteed, the |
3669 | | // likelihood that the behavior will change such that SIMD types are no longer |
3670 | | // `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as |
3671 | | // that would defeat the entire purpose of SIMD types. Nonetheless, we put this |
3672 | | // behavior behind the `simd` Cargo feature, which requires consumers to opt |
3673 | | // into this stability hazard. |
3674 | | // |
3675 | | // [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html |
3676 | | // [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38 |
3677 | | #[cfg(feature = "simd")] |
3678 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))] |
3679 | | mod simd { |
3680 | | /// Defines a module which implements `TryFromBytes`, `FromZeroes`, |
3681 | | /// `FromBytes`, and `AsBytes` for a set of types from a module in |
3682 | | /// `core::arch`. |
3683 | | /// |
3684 | | /// `$arch` is both the name of the defined module and the name of the |
3685 | | /// module in `core::arch`, and `$typ` is the list of items from that module |
3686 | | /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for. |
3687 | | #[allow(unused_macros)] // `allow(unused_macros)` is needed because some |
3688 | | // target/feature combinations don't emit any impls |
3689 | | // and thus don't use this macro. |
3690 | | macro_rules! simd_arch_mod { |
3691 | | (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => { |
3692 | | #[cfg $cfg] |
3693 | | #[cfg_attr(doc_cfg, doc(cfg $cfg))] |
3694 | | mod $mod { |
3695 | | use core::arch::$arch::{$($typ),*}; |
3696 | | |
3697 | | use crate::*; |
3698 | | impl_known_layout!($($typ),*); |
3699 | | safety_comment! { |
3700 | | /// SAFETY: |
3701 | | /// See comment on module definition for justification. |
3702 | | $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )* |
3703 | | } |
3704 | | } |
3705 | | }; |
3706 | | } |
3707 | | |
3708 | | #[rustfmt::skip] |
3709 | | const _: () = { |
3710 | | simd_arch_mod!( |
3711 | | #[cfg(target_arch = "x86")] |
3712 | | x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i |
3713 | | ); |
3714 | | simd_arch_mod!( |
3715 | | #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] |
3716 | | x86, x86_nightly, __m512bh, __m512, __m512d, __m512i |
3717 | | ); |
3718 | | simd_arch_mod!( |
3719 | | #[cfg(target_arch = "x86_64")] |
3720 | | x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i |
3721 | | ); |
3722 | | simd_arch_mod!( |
3723 | | #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] |
3724 | | x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i |
3725 | | ); |
3726 | | simd_arch_mod!( |
3727 | | #[cfg(target_arch = "wasm32")] |
3728 | | wasm32, wasm32, v128 |
3729 | | ); |
3730 | | simd_arch_mod!( |
3731 | | #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] |
3732 | | powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long |
3733 | | ); |
3734 | | simd_arch_mod!( |
3735 | | #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] |
3736 | | powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long |
3737 | | ); |
3738 | | simd_arch_mod!( |
3739 | | #[cfg(target_arch = "aarch64")] |
3740 | | aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, |
3741 | | int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, |
3742 | | int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, |
3743 | | poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, |
3744 | | poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, |
3745 | | uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, |
3746 | | uint64x1_t, uint64x2_t |
3747 | | ); |
3748 | | simd_arch_mod!( |
3749 | | #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] |
3750 | | arm, arm, int8x4_t, uint8x4_t |
3751 | | ); |
3752 | | }; |
3753 | | } |
3754 | | |
3755 | | /// Safely transmutes a value of one type to a value of another type of the same |
3756 | | /// size. |
3757 | | /// |
3758 | | /// The expression `$e` must have a concrete type, `T`, which implements |
3759 | | /// `AsBytes`. The `transmute!` expression must also have a concrete type, `U` |
3760 | | /// (`U` is inferred from the calling context), and `U` must implement |
3761 | | /// `FromBytes`. |
3762 | | /// |
3763 | | /// Note that the `T` produced by the expression `$e` will *not* be dropped. |
3764 | | /// Semantically, its bits will be copied into a new value of type `U`, the |
3765 | | /// original `T` will be forgotten, and the value of type `U` will be returned. |
3766 | | /// |
3767 | | /// # Examples |
3768 | | /// |
3769 | | /// ``` |
3770 | | /// # use zerocopy::transmute; |
3771 | | /// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; |
3772 | | /// |
3773 | | /// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional); |
3774 | | /// |
3775 | | /// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]); |
3776 | | /// ``` |
3777 | | #[macro_export] |
3778 | | macro_rules! transmute { |
3779 | | ($e:expr) => {{ |
3780 | | // NOTE: This must be a macro (rather than a function with trait bounds) |
3781 | | // because there's no way, in a generic context, to enforce that two |
3782 | | // types have the same size. `core::mem::transmute` uses compiler magic |
3783 | | // to enforce this so long as the types are concrete. |
3784 | | |
3785 | | let e = $e; |
3786 | | if false { |
3787 | | // This branch, though never taken, ensures that the type of `e` is |
3788 | | // `AsBytes` and that the type of this macro invocation expression |
3789 | | // is `FromBytes`. |
3790 | | |
3791 | | struct AssertIsAsBytes<T: $crate::AsBytes>(T); |
3792 | | let _ = AssertIsAsBytes(e); |
3793 | | |
3794 | | struct AssertIsFromBytes<U: $crate::FromBytes>(U); |
3795 | | #[allow(unused, unreachable_code)] |
3796 | | let u = AssertIsFromBytes(loop {}); |
3797 | | u.0 |
3798 | | } else { |
3799 | | // SAFETY: `core::mem::transmute` ensures that the type of `e` and |
3800 | | // the type of this macro invocation expression have the same size. |
3801 | | // We know this transmute is safe thanks to the `AsBytes` and |
3802 | | // `FromBytes` bounds enforced by the `false` branch. |
3803 | | // |
3804 | | // We use this reexport of `core::mem::transmute` because we know it |
3805 | | // will always be available for crates which are using the 2015 |
3806 | | // edition of Rust. By contrast, if we were to use |
3807 | | // `std::mem::transmute`, this macro would not work for such crates |
3808 | | // in `no_std` contexts, and if we were to use |
3809 | | // `core::mem::transmute`, this macro would not work in `std` |
3810 | | // contexts in which `core` was not manually imported. This is not a |
3811 | | // problem for 2018 edition crates. |
3812 | | unsafe { |
3813 | | // Clippy: It's okay to transmute a type to itself. |
3814 | | #[allow(clippy::useless_transmute, clippy::missing_transmute_annotations)] |
3815 | | $crate::macro_util::core_reexport::mem::transmute(e) |
3816 | | } |
3817 | | } |
3818 | | }} |
3819 | | } |
3820 | | |
3821 | | /// Safely transmutes a mutable or immutable reference of one type to an |
3822 | | /// immutable reference of another type of the same size. |
3823 | | /// |
3824 | | /// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T: |
3825 | | /// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete |
3826 | | /// type, `&U` (`U` is inferred from the calling context), where `U: Sized + |
3827 | | /// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`. |
3828 | | /// |
3829 | | /// The lifetime of the input type, `&T` or `&mut T`, must be the same as or |
3830 | | /// outlive the lifetime of the output type, `&U`. |
3831 | | /// |
3832 | | /// # Examples |
3833 | | /// |
3834 | | /// ``` |
3835 | | /// # use zerocopy::transmute_ref; |
3836 | | /// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; |
3837 | | /// |
3838 | | /// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional); |
3839 | | /// |
3840 | | /// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); |
3841 | | /// ``` |
3842 | | /// |
3843 | | /// # Alignment increase error message |
3844 | | /// |
3845 | | /// Because of limitations on macros, the error message generated when |
3846 | | /// `transmute_ref!` is used to transmute from a type of lower alignment to a |
3847 | | /// type of higher alignment is somewhat confusing. For example, the following |
3848 | | /// code: |
3849 | | /// |
3850 | | /// ```compile_fail |
3851 | | /// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]); |
3852 | | /// ``` |
3853 | | /// |
3854 | | /// ...generates the following error: |
3855 | | /// |
3856 | | /// ```text |
3857 | | /// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types |
3858 | | /// --> src/lib.rs:1524:34 |
3859 | | /// | |
3860 | | /// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]); |
3861 | | /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
3862 | | /// | |
3863 | | /// = note: source type: `AlignOf<[u8; 2]>` (8 bits) |
3864 | | /// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits) |
3865 | | /// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) |
3866 | | /// ``` |
3867 | | /// |
3868 | | /// This is saying that `max(align_of::<T>(), align_of::<U>()) != |
3869 | | /// align_of::<T>()`, which is equivalent to `align_of::<T>() < |
3870 | | /// align_of::<U>()`. |
3871 | | #[macro_export] |
3872 | | macro_rules! transmute_ref { |
3873 | | ($e:expr) => {{ |
3874 | | // NOTE: This must be a macro (rather than a function with trait bounds) |
3875 | | // because there's no way, in a generic context, to enforce that two |
3876 | | // types have the same size or alignment. |
3877 | | |
3878 | | // Ensure that the source type is a reference or a mutable reference |
3879 | | // (note that mutable references are implicitly reborrowed here). |
3880 | | let e: &_ = $e; |
3881 | | |
3882 | | #[allow(unused, clippy::diverging_sub_expression)] |
3883 | | if false { |
3884 | | // This branch, though never taken, ensures that the type of `e` is |
3885 | | // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro |
3886 | | // expression is `&U` where `U: 'u + Sized + FromBytes`, and that |
3887 | | // `'t` outlives `'u`. |
3888 | | |
3889 | | struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); |
3890 | | let _ = AssertIsAsBytes(e); |
3891 | | |
3892 | | struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U); |
3893 | | #[allow(unused, unreachable_code)] |
3894 | | let u = AssertIsFromBytes(loop {}); |
3895 | | u.0 |
3896 | | } else if false { |
3897 | | // This branch, though never taken, ensures that `size_of::<T>() == |
3898 | | // size_of::<U>()` and that that `align_of::<T>() >= |
3899 | | // align_of::<U>()`. |
3900 | | |
3901 | | // `t` is inferred to have type `T` because it's assigned to `e` (of |
3902 | | // type `&T`) as `&t`. |
3903 | | let mut t = unreachable!(); |
3904 | | e = &t; |
3905 | | |
3906 | | // `u` is inferred to have type `U` because it's used as `&u` as the |
3907 | | // value returned from this branch. |
3908 | | let u; |
3909 | | |
3910 | | $crate::assert_size_eq!(t, u); |
3911 | | $crate::assert_align_gt_eq!(t, u); |
3912 | | |
3913 | | &u |
3914 | | } else { |
3915 | | // SAFETY: For source type `Src` and destination type `Dst`: |
3916 | | // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the |
3917 | | // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above. |
3918 | | // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to |
3919 | | // the use of `assert_size_eq!` above. |
3920 | | // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to |
3921 | | // the use of `assert_align_gt_eq!` above. |
3922 | | unsafe { $crate::macro_util::transmute_ref(e) } |
3923 | | } |
3924 | | }} |
3925 | | } |
3926 | | |
3927 | | /// Safely transmutes a mutable reference of one type to an mutable reference of |
3928 | | /// another type of the same size. |
3929 | | /// |
3930 | | /// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized + |
3931 | | /// AsBytes`. The `transmute_mut!` expression must also have a concrete type, |
3932 | | /// `&mut U` (`U` is inferred from the calling context), where `U: Sized + |
3933 | | /// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`. |
3934 | | /// |
3935 | | /// The lifetime of the input type, `&mut T`, must be the same as or outlive the |
3936 | | /// lifetime of the output type, `&mut U`. |
3937 | | /// |
3938 | | /// # Examples |
3939 | | /// |
3940 | | /// ``` |
3941 | | /// # use zerocopy::transmute_mut; |
3942 | | /// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; |
3943 | | /// |
3944 | | /// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional); |
3945 | | /// |
3946 | | /// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); |
3947 | | /// |
3948 | | /// two_dimensional.reverse(); |
3949 | | /// |
3950 | | /// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]); |
3951 | | /// ``` |
3952 | | /// |
3953 | | /// # Alignment increase error message |
3954 | | /// |
3955 | | /// Because of limitations on macros, the error message generated when |
3956 | | /// `transmute_mut!` is used to transmute from a type of lower alignment to a |
3957 | | /// type of higher alignment is somewhat confusing. For example, the following |
3958 | | /// code: |
3959 | | /// |
3960 | | /// ```compile_fail |
3961 | | /// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]); |
3962 | | /// ``` |
3963 | | /// |
3964 | | /// ...generates the following error: |
3965 | | /// |
3966 | | /// ```text |
3967 | | /// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types |
3968 | | /// --> src/lib.rs:1524:34 |
3969 | | /// | |
3970 | | /// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]); |
3971 | | /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
3972 | | /// | |
3973 | | /// = note: source type: `AlignOf<[u8; 2]>` (8 bits) |
3974 | | /// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits) |
3975 | | /// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) |
3976 | | /// ``` |
3977 | | /// |
3978 | | /// This is saying that `max(align_of::<T>(), align_of::<U>()) != |
3979 | | /// align_of::<T>()`, which is equivalent to `align_of::<T>() < |
3980 | | /// align_of::<U>()`. |
3981 | | #[macro_export] |
3982 | | macro_rules! transmute_mut { |
3983 | | ($e:expr) => {{ |
3984 | | // NOTE: This must be a macro (rather than a function with trait bounds) |
3985 | | // because there's no way, in a generic context, to enforce that two |
3986 | | // types have the same size or alignment. |
3987 | | |
3988 | | // Ensure that the source type is a mutable reference. |
3989 | | let e: &mut _ = $e; |
3990 | | |
3991 | | #[allow(unused, clippy::diverging_sub_expression)] |
3992 | | if false { |
3993 | | // This branch, though never taken, ensures that the type of `e` is |
3994 | | // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the |
3995 | | // type of this macro expression is `&mut U` where `U: 'u + Sized + |
3996 | | // FromBytes + AsBytes`. |
3997 | | |
3998 | | // We use immutable references here rather than mutable so that, if |
3999 | | // this macro is used in a const context (in which, as of this |
4000 | | // writing, mutable references are banned), the error message |
4001 | | // appears to originate in the user's code rather than in the |
4002 | | // internals of this macro. |
4003 | | struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T); |
4004 | | struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); |
4005 | | struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T); |
4006 | | struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); |
4007 | | |
4008 | | if true { |
4009 | | let _ = AssertSrcIsFromBytes(&*e); |
4010 | | } else { |
4011 | | let _ = AssertSrcIsAsBytes(&*e); |
4012 | | } |
4013 | | |
4014 | | if true { |
4015 | | #[allow(unused, unreachable_code)] |
4016 | | let u = AssertDstIsFromBytes(loop {}); |
4017 | | &mut *u.0 |
4018 | | } else { |
4019 | | #[allow(unused, unreachable_code)] |
4020 | | let u = AssertDstIsAsBytes(loop {}); |
4021 | | &mut *u.0 |
4022 | | } |
4023 | | } else if false { |
4024 | | // This branch, though never taken, ensures that `size_of::<T>() == |
4025 | | // size_of::<U>()` and that that `align_of::<T>() >= |
4026 | | // align_of::<U>()`. |
4027 | | |
4028 | | // `t` is inferred to have type `T` because it's assigned to `e` (of |
4029 | | // type `&mut T`) as `&mut t`. |
4030 | | let mut t = unreachable!(); |
4031 | | e = &mut t; |
4032 | | |
4033 | | // `u` is inferred to have type `U` because it's used as `&mut u` as |
4034 | | // the value returned from this branch. |
4035 | | let u; |
4036 | | |
4037 | | $crate::assert_size_eq!(t, u); |
4038 | | $crate::assert_align_gt_eq!(t, u); |
4039 | | |
4040 | | &mut u |
4041 | | } else { |
4042 | | // SAFETY: For source type `Src` and destination type `Dst`: |
4043 | | // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes + |
4044 | | // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`, |
4045 | | // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and |
4046 | | // `AssertDstIsAsBytes` above. |
4047 | | // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to |
4048 | | // the use of `assert_size_eq!` above. |
4049 | | // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to |
4050 | | // the use of `assert_align_gt_eq!` above. |
4051 | | unsafe { $crate::macro_util::transmute_mut(e) } |
4052 | | } |
4053 | | }} |
4054 | | } |
4055 | | |
4056 | | /// Includes a file and safely transmutes it to a value of an arbitrary type. |
4057 | | /// |
4058 | | /// The file will be included as a byte array, `[u8; N]`, which will be |
4059 | | /// transmuted to another type, `T`. `T` is inferred from the calling context, |
4060 | | /// and must implement [`FromBytes`]. |
4061 | | /// |
4062 | | /// The file is located relative to the current file (similarly to how modules |
4063 | | /// are found). The provided path is interpreted in a platform-specific way at |
4064 | | /// compile time. So, for instance, an invocation with a Windows path containing |
4065 | | /// backslashes `\` would not compile correctly on Unix. |
4066 | | /// |
4067 | | /// `include_value!` is ignorant of byte order. For byte order-aware types, see |
4068 | | /// the [`byteorder`] module. |
4069 | | /// |
4070 | | /// # Examples |
4071 | | /// |
4072 | | /// Assume there are two files in the same directory with the following |
4073 | | /// contents: |
4074 | | /// |
4075 | | /// File `data` (no trailing newline): |
4076 | | /// |
4077 | | /// ```text |
4078 | | /// abcd |
4079 | | /// ``` |
4080 | | /// |
4081 | | /// File `main.rs`: |
4082 | | /// |
4083 | | /// ```rust |
4084 | | /// use zerocopy::include_value; |
4085 | | /// # macro_rules! include_value { |
4086 | | /// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) }; |
4087 | | /// # } |
4088 | | /// |
4089 | | /// fn main() { |
4090 | | /// let as_u32: u32 = include_value!("data"); |
4091 | | /// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); |
4092 | | /// let as_i32: i32 = include_value!("data"); |
4093 | | /// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); |
4094 | | /// } |
4095 | | /// ``` |
4096 | | #[doc(alias("include_bytes", "include_data", "include_type"))] |
4097 | | #[macro_export] |
4098 | | macro_rules! include_value { |
4099 | | ($file:expr $(,)?) => { |
4100 | | $crate::transmute!(*::core::include_bytes!($file)) |
4101 | | }; |
4102 | | } |
4103 | | |
4104 | | /// A typed reference derived from a byte slice. |
4105 | | /// |
4106 | | /// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`. |
4107 | | /// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same |
4108 | | /// mutability as the byte slice it was constructed from (`B`). |
4109 | | /// |
4110 | | /// # Examples |
4111 | | /// |
4112 | | /// `Ref` can be used to treat a sequence of bytes as a structured type, and to |
4113 | | /// read and write the fields of that type as if the byte slice reference were |
4114 | | /// simply a reference to that type. |
4115 | | /// |
4116 | | /// ```rust |
4117 | | /// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them |
4118 | | /// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned}; |
4119 | | /// |
4120 | | /// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] |
4121 | | /// #[repr(C)] |
4122 | | /// struct UdpHeader { |
4123 | | /// src_port: [u8; 2], |
4124 | | /// dst_port: [u8; 2], |
4125 | | /// length: [u8; 2], |
4126 | | /// checksum: [u8; 2], |
4127 | | /// } |
4128 | | /// |
4129 | | /// struct UdpPacket<B> { |
4130 | | /// header: Ref<B, UdpHeader>, |
4131 | | /// body: B, |
4132 | | /// } |
4133 | | /// |
4134 | | /// impl<B: ByteSlice> UdpPacket<B> { |
4135 | | /// pub fn parse(bytes: B) -> Option<UdpPacket<B>> { |
4136 | | /// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?; |
4137 | | /// Some(UdpPacket { header, body }) |
4138 | | /// } |
4139 | | /// |
4140 | | /// pub fn get_src_port(&self) -> [u8; 2] { |
4141 | | /// self.header.src_port |
4142 | | /// } |
4143 | | /// } |
4144 | | /// |
4145 | | /// impl<B: ByteSliceMut> UdpPacket<B> { |
4146 | | /// pub fn set_src_port(&mut self, src_port: [u8; 2]) { |
4147 | | /// self.header.src_port = src_port; |
4148 | | /// } |
4149 | | /// } |
4150 | | /// # } |
4151 | | /// ``` |
4152 | | pub struct Ref<B, T: ?Sized>(B, PhantomData<T>); |
4153 | | |
4154 | | /// Deprecated: prefer [`Ref`] instead. |
4155 | | #[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")] |
4156 | | #[doc(hidden)] |
4157 | | pub type LayoutVerified<B, T> = Ref<B, T>; |
4158 | | |
4159 | | impl<B, T> Ref<B, T> |
4160 | | where |
4161 | | B: ByteSlice, |
4162 | | { |
4163 | | /// Constructs a new `Ref`. |
4164 | | /// |
4165 | | /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is |
4166 | | /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of |
4167 | | /// these checks fail, it returns `None`. |
4168 | | #[inline] |
4169 | 0 | pub fn new(bytes: B) -> Option<Ref<B, T>> { |
4170 | 0 | if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) { |
4171 | 0 | return None; |
4172 | 0 | } |
4173 | 0 | Some(Ref(bytes, PhantomData)) |
4174 | 0 | } Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim_interface::acpi::Ccel>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim::e820::E820Entry>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<migtd::migration::data::ServiceQueryResponse>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::new Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::new |
4175 | | |
4176 | | /// Constructs a new `Ref` from the prefix of a byte slice. |
4177 | | /// |
4178 | | /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that |
4179 | | /// `bytes` is aligned to `align_of::<T>()`. It consumes the first |
4180 | | /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns |
4181 | | /// the remaining bytes to the caller. If either the length or alignment |
4182 | | /// checks fail, it returns `None`. |
4183 | | #[inline] |
4184 | | pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> { |
4185 | | if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) { |
4186 | | return None; |
4187 | | } |
4188 | | let (bytes, suffix) = bytes.split_at(mem::size_of::<T>()); |
4189 | | Some((Ref(bytes, PhantomData), suffix)) |
4190 | | } |
4191 | | |
4192 | | /// Constructs a new `Ref` from the suffix of a byte slice. |
4193 | | /// |
4194 | | /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that |
4195 | | /// the last `size_of::<T>()` bytes of `bytes` are aligned to |
4196 | | /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from |
4197 | | /// `bytes` to construct a `Ref`, and returns the preceding bytes to the |
4198 | | /// caller. If either the length or alignment checks fail, it returns |
4199 | | /// `None`. |
4200 | | #[inline] |
4201 | | pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> { |
4202 | | let bytes_len = bytes.len(); |
4203 | | let split_at = bytes_len.checked_sub(mem::size_of::<T>())?; |
4204 | | let (prefix, bytes) = bytes.split_at(split_at); |
4205 | | if !util::aligned_to::<_, T>(bytes.deref()) { |
4206 | | return None; |
4207 | | } |
4208 | | Some((prefix, Ref(bytes, PhantomData))) |
4209 | | } |
4210 | | } |
4211 | | |
4212 | | impl<B, T> Ref<B, [T]> |
4213 | | where |
4214 | | B: ByteSlice, |
4215 | | { |
4216 | | /// Constructs a new `Ref` of a slice type. |
4217 | | /// |
4218 | | /// `new_slice` verifies that `bytes.len()` is a multiple of |
4219 | | /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and |
4220 | | /// constructs a new `Ref`. If either of these checks fail, it returns |
4221 | | /// `None`. |
4222 | | /// |
4223 | | /// # Panics |
4224 | | /// |
4225 | | /// `new_slice` panics if `T` is a zero-sized type. |
4226 | | #[inline] |
4227 | | pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> { |
4228 | | let remainder = bytes |
4229 | | .len() |
4230 | | .checked_rem(mem::size_of::<T>()) |
4231 | | .expect("Ref::new_slice called on a zero-sized type"); |
4232 | | if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) { |
4233 | | return None; |
4234 | | } |
4235 | | Some(Ref(bytes, PhantomData)) |
4236 | | } |
4237 | | |
4238 | | /// Constructs a new `Ref` of a slice type from the prefix of a byte slice. |
4239 | | /// |
4240 | | /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() * |
4241 | | /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
4242 | | /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`, |
4243 | | /// and returns the remaining bytes to the caller. It also ensures that |
4244 | | /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the |
4245 | | /// length, alignment, or overflow checks fail, it returns `None`. |
4246 | | /// |
4247 | | /// # Panics |
4248 | | /// |
4249 | | /// `new_slice_from_prefix` panics if `T` is a zero-sized type. |
4250 | | #[inline] |
4251 | | pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> { |
4252 | | let expected_len = match mem::size_of::<T>().checked_mul(count) { |
4253 | | Some(len) => len, |
4254 | | None => return None, |
4255 | | }; |
4256 | | if bytes.len() < expected_len { |
4257 | | return None; |
4258 | | } |
4259 | | let (prefix, bytes) = bytes.split_at(expected_len); |
4260 | | Self::new_slice(prefix).map(move |l| (l, bytes)) |
4261 | | } |
4262 | | |
4263 | | /// Constructs a new `Ref` of a slice type from the suffix of a byte slice. |
4264 | | /// |
4265 | | /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() * |
4266 | | /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
4267 | | /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`, |
4268 | | /// and returns the preceding bytes to the caller. It also ensures that |
4269 | | /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the |
4270 | | /// length, alignment, or overflow checks fail, it returns `None`. |
4271 | | /// |
4272 | | /// # Panics |
4273 | | /// |
4274 | | /// `new_slice_from_suffix` panics if `T` is a zero-sized type. |
4275 | | #[inline] |
4276 | | pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> { |
4277 | | let expected_len = match mem::size_of::<T>().checked_mul(count) { |
4278 | | Some(len) => len, |
4279 | | None => return None, |
4280 | | }; |
4281 | | let split_at = bytes.len().checked_sub(expected_len)?; |
4282 | | let (bytes, suffix) = bytes.split_at(split_at); |
4283 | | Self::new_slice(suffix).map(move |l| (bytes, l)) |
4284 | | } |
4285 | | } |
4286 | | |
4287 | | fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> { |
4288 | | match opt { |
4289 | | Some(mut r) => { |
4290 | | r.0.fill(0); |
4291 | | Some(r) |
4292 | | } |
4293 | | None => None, |
4294 | | } |
4295 | | } |
4296 | | |
4297 | | fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( |
4298 | | opt: Option<(Ref<B, T>, B)>, |
4299 | | ) -> Option<(Ref<B, T>, B)> { |
4300 | | match opt { |
4301 | | Some((mut r, rest)) => { |
4302 | | r.0.fill(0); |
4303 | | Some((r, rest)) |
4304 | | } |
4305 | | None => None, |
4306 | | } |
4307 | | } |
4308 | | |
4309 | | fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( |
4310 | | opt: Option<(B, Ref<B, T>)>, |
4311 | | ) -> Option<(B, Ref<B, T>)> { |
4312 | | map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a)) |
4313 | | } |
4314 | | |
4315 | | impl<B, T> Ref<B, T> |
4316 | | where |
4317 | | B: ByteSliceMut, |
4318 | | { |
4319 | | /// Constructs a new `Ref` after zeroing the bytes. |
4320 | | /// |
4321 | | /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that |
4322 | | /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If |
4323 | | /// either of these checks fail, it returns `None`. |
4324 | | /// |
4325 | | /// If the checks succeed, then `bytes` will be initialized to zero. This |
4326 | | /// can be useful when re-using buffers to ensure that sensitive data |
4327 | | /// previously stored in the buffer is not leaked. |
4328 | | #[inline(always)] |
4329 | | pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> { |
4330 | | map_zeroed(Self::new(bytes)) |
4331 | | } |
4332 | | |
4333 | | /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the |
4334 | | /// prefix. |
4335 | | /// |
4336 | | /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()` |
4337 | | /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first |
4338 | | /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns |
4339 | | /// the remaining bytes to the caller. If either the length or alignment |
4340 | | /// checks fail, it returns `None`. |
4341 | | /// |
4342 | | /// If the checks succeed, then the prefix which is consumed will be |
4343 | | /// initialized to zero. This can be useful when re-using buffers to ensure |
4344 | | /// that sensitive data previously stored in the buffer is not leaked. |
4345 | | #[inline(always)] |
4346 | | pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> { |
4347 | | map_prefix_tuple_zeroed(Self::new_from_prefix(bytes)) |
4348 | | } |
4349 | | |
4350 | | /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the |
4351 | | /// suffix. |
4352 | | /// |
4353 | | /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()` |
4354 | | /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to |
4355 | | /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from |
4356 | | /// `bytes` to construct a `Ref`, and returns the preceding bytes to the |
4357 | | /// caller. If either the length or alignment checks fail, it returns |
4358 | | /// `None`. |
4359 | | /// |
4360 | | /// If the checks succeed, then the suffix which is consumed will be |
4361 | | /// initialized to zero. This can be useful when re-using buffers to ensure |
4362 | | /// that sensitive data previously stored in the buffer is not leaked. |
4363 | | #[inline(always)] |
4364 | | pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> { |
4365 | | map_suffix_tuple_zeroed(Self::new_from_suffix(bytes)) |
4366 | | } |
4367 | | } |
4368 | | |
4369 | | impl<B, T> Ref<B, [T]> |
4370 | | where |
4371 | | B: ByteSliceMut, |
4372 | | { |
4373 | | /// Constructs a new `Ref` of a slice type after zeroing the bytes. |
4374 | | /// |
4375 | | /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of |
4376 | | /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and |
4377 | | /// constructs a new `Ref`. If either of these checks fail, it returns |
4378 | | /// `None`. |
4379 | | /// |
4380 | | /// If the checks succeed, then `bytes` will be initialized to zero. This |
4381 | | /// can be useful when re-using buffers to ensure that sensitive data |
4382 | | /// previously stored in the buffer is not leaked. |
4383 | | /// |
4384 | | /// # Panics |
4385 | | /// |
4386 | | /// `new_slice` panics if `T` is a zero-sized type. |
4387 | | #[inline(always)] |
4388 | | pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> { |
4389 | | map_zeroed(Self::new_slice(bytes)) |
4390 | | } |
4391 | | |
4392 | | /// Constructs a new `Ref` of a slice type from the prefix of a byte slice, |
4393 | | /// after zeroing the bytes. |
4394 | | /// |
4395 | | /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() * |
4396 | | /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
4397 | | /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`, |
4398 | | /// and returns the remaining bytes to the caller. It also ensures that |
4399 | | /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the |
4400 | | /// length, alignment, or overflow checks fail, it returns `None`. |
4401 | | /// |
4402 | | /// If the checks succeed, then the suffix which is consumed will be |
4403 | | /// initialized to zero. This can be useful when re-using buffers to ensure |
4404 | | /// that sensitive data previously stored in the buffer is not leaked. |
4405 | | /// |
4406 | | /// # Panics |
4407 | | /// |
4408 | | /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type. |
4409 | | #[inline(always)] |
4410 | | pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> { |
4411 | | map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count)) |
4412 | | } |
4413 | | |
4414 | | /// Constructs a new `Ref` of a slice type from the prefix of a byte slice, |
4415 | | /// after zeroing the bytes. |
4416 | | /// |
4417 | | /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() * |
4418 | | /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the |
4419 | | /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`, |
4420 | | /// and returns the preceding bytes to the caller. It also ensures that |
4421 | | /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the |
4422 | | /// length, alignment, or overflow checks fail, it returns `None`. |
4423 | | /// |
4424 | | /// If the checks succeed, then the consumed suffix will be initialized to |
4425 | | /// zero. This can be useful when re-using buffers to ensure that sensitive |
4426 | | /// data previously stored in the buffer is not leaked. |
4427 | | /// |
4428 | | /// # Panics |
4429 | | /// |
4430 | | /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type. |
4431 | | #[inline(always)] |
4432 | | pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> { |
4433 | | map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count)) |
4434 | | } |
4435 | | } |
4436 | | |
4437 | | impl<B, T> Ref<B, T> |
4438 | | where |
4439 | | B: ByteSlice, |
4440 | | T: Unaligned, |
4441 | | { |
4442 | | /// Constructs a new `Ref` for a type with no alignment requirement. |
4443 | | /// |
4444 | | /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and |
4445 | | /// constructs a new `Ref`. If the check fails, it returns `None`. |
4446 | | #[inline(always)] |
4447 | 0 | pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> { |
4448 | 0 | Ref::new(bytes) |
4449 | 0 | } Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim_interface::acpi::Ccel>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim::e820::E820Entry>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<migtd::migration::data::ServiceQueryResponse>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::new_unaligned Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::new_unaligned |
4450 | | |
4451 | | /// Constructs a new `Ref` from the prefix of a byte slice for a type with |
4452 | | /// no alignment requirement. |
4453 | | /// |
4454 | | /// `new_unaligned_from_prefix` verifies that `bytes.len() >= |
4455 | | /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from |
4456 | | /// `bytes` to construct a `Ref`, and returns the remaining bytes to the |
4457 | | /// caller. If the length check fails, it returns `None`. |
4458 | | #[inline(always)] |
4459 | | pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> { |
4460 | | Ref::new_from_prefix(bytes) |
4461 | | } |
4462 | | |
4463 | | /// Constructs a new `Ref` from the suffix of a byte slice for a type with |
4464 | | /// no alignment requirement. |
4465 | | /// |
4466 | | /// `new_unaligned_from_suffix` verifies that `bytes.len() >= |
4467 | | /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from |
4468 | | /// `bytes` to construct a `Ref`, and returns the preceding bytes to the |
4469 | | /// caller. If the length check fails, it returns `None`. |
4470 | | #[inline(always)] |
4471 | | pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> { |
4472 | | Ref::new_from_suffix(bytes) |
4473 | | } |
4474 | | } |
4475 | | |
4476 | | impl<B, T> Ref<B, [T]> |
4477 | | where |
4478 | | B: ByteSlice, |
4479 | | T: Unaligned, |
4480 | | { |
4481 | | /// Constructs a new `Ref` of a slice type with no alignment requirement. |
4482 | | /// |
4483 | | /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of |
4484 | | /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it |
4485 | | /// returns `None`. |
4486 | | /// |
4487 | | /// # Panics |
4488 | | /// |
4489 | | /// `new_slice` panics if `T` is a zero-sized type. |
4490 | | #[inline(always)] |
4491 | | pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> { |
4492 | | Ref::new_slice(bytes) |
4493 | | } |
4494 | | |
4495 | | /// Constructs a new `Ref` of a slice type with no alignment requirement |
4496 | | /// from the prefix of a byte slice. |
4497 | | /// |
4498 | | /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() * |
4499 | | /// count`. It consumes the first `size_of::<T>() * count` bytes from |
4500 | | /// `bytes` to construct a `Ref`, and returns the remaining bytes to the |
4501 | | /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a |
4502 | | /// `usize`. If either the length, or overflow checks fail, it returns |
4503 | | /// `None`. |
4504 | | /// |
4505 | | /// # Panics |
4506 | | /// |
4507 | | /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type. |
4508 | | #[inline(always)] |
4509 | | pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> { |
4510 | | Ref::new_slice_from_prefix(bytes, count) |
4511 | | } |
4512 | | |
4513 | | /// Constructs a new `Ref` of a slice type with no alignment requirement |
4514 | | /// from the suffix of a byte slice. |
4515 | | /// |
4516 | | /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() * |
4517 | | /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes` |
4518 | | /// to construct a `Ref`, and returns the remaining bytes to the caller. It |
4519 | | /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
4520 | | /// If either the length, or overflow checks fail, it returns `None`. |
4521 | | /// |
4522 | | /// # Panics |
4523 | | /// |
4524 | | /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type. |
4525 | | #[inline(always)] |
4526 | | pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> { |
4527 | | Ref::new_slice_from_suffix(bytes, count) |
4528 | | } |
4529 | | } |
4530 | | |
4531 | | impl<B, T> Ref<B, T> |
4532 | | where |
4533 | | B: ByteSliceMut, |
4534 | | T: Unaligned, |
4535 | | { |
4536 | | /// Constructs a new `Ref` for a type with no alignment requirement, zeroing |
4537 | | /// the bytes. |
4538 | | /// |
4539 | | /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and |
4540 | | /// constructs a new `Ref`. If the check fails, it returns `None`. |
4541 | | /// |
4542 | | /// If the check succeeds, then `bytes` will be initialized to zero. This |
4543 | | /// can be useful when re-using buffers to ensure that sensitive data |
4544 | | /// previously stored in the buffer is not leaked. |
4545 | | #[inline(always)] |
4546 | | pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> { |
4547 | | map_zeroed(Self::new_unaligned(bytes)) |
4548 | | } |
4549 | | |
4550 | | /// Constructs a new `Ref` from the prefix of a byte slice for a type with |
4551 | | /// no alignment requirement, zeroing the prefix. |
4552 | | /// |
4553 | | /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >= |
4554 | | /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from |
4555 | | /// `bytes` to construct a `Ref`, and returns the remaining bytes to the |
4556 | | /// caller. If the length check fails, it returns `None`. |
4557 | | /// |
4558 | | /// If the check succeeds, then the prefix which is consumed will be |
4559 | | /// initialized to zero. This can be useful when re-using buffers to ensure |
4560 | | /// that sensitive data previously stored in the buffer is not leaked. |
4561 | | #[inline(always)] |
4562 | | pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> { |
4563 | | map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes)) |
4564 | | } |
4565 | | |
4566 | | /// Constructs a new `Ref` from the suffix of a byte slice for a type with |
4567 | | /// no alignment requirement, zeroing the suffix. |
4568 | | /// |
4569 | | /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >= |
4570 | | /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from |
4571 | | /// `bytes` to construct a `Ref`, and returns the preceding bytes to the |
4572 | | /// caller. If the length check fails, it returns `None`. |
4573 | | /// |
4574 | | /// If the check succeeds, then the suffix which is consumed will be |
4575 | | /// initialized to zero. This can be useful when re-using buffers to ensure |
4576 | | /// that sensitive data previously stored in the buffer is not leaked. |
4577 | | #[inline(always)] |
4578 | | pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> { |
4579 | | map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes)) |
4580 | | } |
4581 | | } |
4582 | | |
4583 | | impl<B, T> Ref<B, [T]> |
4584 | | where |
4585 | | B: ByteSliceMut, |
4586 | | T: Unaligned, |
4587 | | { |
4588 | | /// Constructs a new `Ref` for a slice type with no alignment requirement, |
4589 | | /// zeroing the bytes. |
4590 | | /// |
4591 | | /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple |
4592 | | /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it |
4593 | | /// returns `None`. |
4594 | | /// |
4595 | | /// If the check succeeds, then `bytes` will be initialized to zero. This |
4596 | | /// can be useful when re-using buffers to ensure that sensitive data |
4597 | | /// previously stored in the buffer is not leaked. |
4598 | | /// |
4599 | | /// # Panics |
4600 | | /// |
4601 | | /// `new_slice` panics if `T` is a zero-sized type. |
4602 | | #[inline(always)] |
4603 | | pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> { |
4604 | | map_zeroed(Self::new_slice_unaligned(bytes)) |
4605 | | } |
4606 | | |
4607 | | /// Constructs a new `Ref` of a slice type with no alignment requirement |
4608 | | /// from the prefix of a byte slice, after zeroing the bytes. |
4609 | | /// |
4610 | | /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() * |
4611 | | /// count`. It consumes the first `size_of::<T>() * count` bytes from |
4612 | | /// `bytes` to construct a `Ref`, and returns the remaining bytes to the |
4613 | | /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a |
4614 | | /// `usize`. If either the length, or overflow checks fail, it returns |
4615 | | /// `None`. |
4616 | | /// |
4617 | | /// If the checks succeed, then the prefix will be initialized to zero. This |
4618 | | /// can be useful when re-using buffers to ensure that sensitive data |
4619 | | /// previously stored in the buffer is not leaked. |
4620 | | /// |
4621 | | /// # Panics |
4622 | | /// |
4623 | | /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized |
4624 | | /// type. |
4625 | | #[inline(always)] |
4626 | | pub fn new_slice_unaligned_from_prefix_zeroed( |
4627 | | bytes: B, |
4628 | | count: usize, |
4629 | | ) -> Option<(Ref<B, [T]>, B)> { |
4630 | | map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count)) |
4631 | | } |
4632 | | |
4633 | | /// Constructs a new `Ref` of a slice type with no alignment requirement |
4634 | | /// from the suffix of a byte slice, after zeroing the bytes. |
4635 | | /// |
4636 | | /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() * |
4637 | | /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes` |
4638 | | /// to construct a `Ref`, and returns the remaining bytes to the caller. It |
4639 | | /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`. |
4640 | | /// If either the length, or overflow checks fail, it returns `None`. |
4641 | | /// |
4642 | | /// If the checks succeed, then the suffix will be initialized to zero. This |
4643 | | /// can be useful when re-using buffers to ensure that sensitive data |
4644 | | /// previously stored in the buffer is not leaked. |
4645 | | /// |
4646 | | /// # Panics |
4647 | | /// |
4648 | | /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized |
4649 | | /// type. |
4650 | | #[inline(always)] |
4651 | | pub fn new_slice_unaligned_from_suffix_zeroed( |
4652 | | bytes: B, |
4653 | | count: usize, |
4654 | | ) -> Option<(B, Ref<B, [T]>)> { |
4655 | | map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count)) |
4656 | | } |
4657 | | } |
4658 | | |
4659 | | impl<'a, B, T> Ref<B, T> |
4660 | | where |
4661 | | B: 'a + ByteSlice, |
4662 | | T: FromBytes, |
4663 | | { |
4664 | | /// Converts this `Ref` into a reference. |
4665 | | /// |
4666 | | /// `into_ref` consumes the `Ref`, and returns a reference to `T`. |
4667 | | #[inline(always)] |
4668 | | pub fn into_ref(self) -> &'a T { |
4669 | | assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); |
4670 | | |
4671 | | // SAFETY: According to the safety preconditions on |
4672 | | // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert |
4673 | | // ensures that, given `B: 'a`, it is sound to drop `self` and still |
4674 | | // access the underlying memory using reads for `'a`. |
4675 | | unsafe { self.deref_helper() } |
4676 | | } |
4677 | | } |
4678 | | |
4679 | | impl<'a, B, T> Ref<B, T> |
4680 | | where |
4681 | | B: 'a + ByteSliceMut, |
4682 | | T: FromBytes + AsBytes, |
4683 | | { |
4684 | | /// Converts this `Ref` into a mutable reference. |
4685 | | /// |
4686 | | /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`. |
4687 | | #[inline(always)] |
4688 | | pub fn into_mut(mut self) -> &'a mut T { |
4689 | | assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); |
4690 | | |
4691 | | // SAFETY: According to the safety preconditions on |
4692 | | // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert |
4693 | | // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop |
4694 | | // `self` and still access the underlying memory using both reads and |
4695 | | // writes for `'a`. |
4696 | | unsafe { self.deref_mut_helper() } |
4697 | | } |
4698 | | } |
4699 | | |
4700 | | impl<'a, B, T> Ref<B, [T]> |
4701 | | where |
4702 | | B: 'a + ByteSlice, |
4703 | | T: FromBytes, |
4704 | | { |
4705 | | /// Converts this `Ref` into a slice reference. |
4706 | | /// |
4707 | | /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`. |
4708 | | #[inline(always)] |
4709 | | pub fn into_slice(self) -> &'a [T] { |
4710 | | assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); |
4711 | | |
4712 | | // SAFETY: According to the safety preconditions on |
4713 | | // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert |
4714 | | // ensures that, given `B: 'a`, it is sound to drop `self` and still |
4715 | | // access the underlying memory using reads for `'a`. |
4716 | | unsafe { self.deref_slice_helper() } |
4717 | | } |
4718 | | } |
4719 | | |
4720 | | impl<'a, B, T> Ref<B, [T]> |
4721 | | where |
4722 | | B: 'a + ByteSliceMut, |
4723 | | T: FromBytes + AsBytes, |
4724 | | { |
4725 | | /// Converts this `Ref` into a mutable slice reference. |
4726 | | /// |
4727 | | /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to |
4728 | | /// `[T]`. |
4729 | | #[inline(always)] |
4730 | | pub fn into_mut_slice(mut self) -> &'a mut [T] { |
4731 | | assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); |
4732 | | |
4733 | | // SAFETY: According to the safety preconditions on |
4734 | | // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert |
4735 | | // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop |
4736 | | // `self` and still access the underlying memory using both reads and |
4737 | | // writes for `'a`. |
4738 | | unsafe { self.deref_mut_slice_helper() } |
4739 | | } |
4740 | | } |
4741 | | |
4742 | | impl<B, T> Ref<B, T> |
4743 | | where |
4744 | | B: ByteSlice, |
4745 | | T: FromBytes, |
4746 | | { |
4747 | | /// Creates an immutable reference to `T` with a specific lifetime. |
4748 | | /// |
4749 | | /// # Safety |
4750 | | /// |
4751 | | /// The type bounds on this method guarantee that it is safe to create an |
4752 | | /// immutable reference to `T` from `self`. However, since the lifetime `'a` |
4753 | | /// is not required to be shorter than the lifetime of the reference to |
4754 | | /// `self`, the caller must guarantee that the lifetime `'a` is valid for |
4755 | | /// this reference. In particular, the referent must exist for all of `'a`, |
4756 | | /// and no mutable references to the same memory may be constructed during |
4757 | | /// `'a`. |
4758 | | unsafe fn deref_helper<'a>(&self) -> &'a T { |
4759 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
4760 | | #[allow(clippy::undocumented_unsafe_blocks)] |
4761 | | unsafe { |
4762 | | &*self.0.as_ptr().cast::<T>() |
4763 | | } |
4764 | | } |
4765 | | } |
4766 | | |
4767 | | impl<B, T> Ref<B, T> |
4768 | | where |
4769 | | B: ByteSliceMut, |
4770 | | T: FromBytes + AsBytes, |
4771 | | { |
4772 | | /// Creates a mutable reference to `T` with a specific lifetime. |
4773 | | /// |
4774 | | /// # Safety |
4775 | | /// |
4776 | | /// The type bounds on this method guarantee that it is safe to create a |
4777 | | /// mutable reference to `T` from `self`. However, since the lifetime `'a` |
4778 | | /// is not required to be shorter than the lifetime of the reference to |
4779 | | /// `self`, the caller must guarantee that the lifetime `'a` is valid for |
4780 | | /// this reference. In particular, the referent must exist for all of `'a`, |
4781 | | /// and no other references - mutable or immutable - to the same memory may |
4782 | | /// be constructed during `'a`. |
4783 | | unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T { |
4784 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
4785 | | #[allow(clippy::undocumented_unsafe_blocks)] |
4786 | | unsafe { |
4787 | | &mut *self.0.as_mut_ptr().cast::<T>() |
4788 | | } |
4789 | | } |
4790 | | } |
4791 | | |
4792 | | impl<B, T> Ref<B, [T]> |
4793 | | where |
4794 | | B: ByteSlice, |
4795 | | T: FromBytes, |
4796 | | { |
4797 | | /// Creates an immutable reference to `[T]` with a specific lifetime. |
4798 | | /// |
4799 | | /// # Safety |
4800 | | /// |
4801 | | /// `deref_slice_helper` has the same safety requirements as `deref_helper`. |
4802 | | unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] { |
4803 | | let len = self.0.len(); |
4804 | | let elem_size = mem::size_of::<T>(); |
4805 | | debug_assert_ne!(elem_size, 0); |
4806 | | // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`. |
4807 | | // Thus, neither the mod nor division operations here can panic. |
4808 | | #[allow(clippy::arithmetic_side_effects)] |
4809 | | let elems = { |
4810 | | debug_assert_eq!(len % elem_size, 0); |
4811 | | len / elem_size |
4812 | | }; |
4813 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
4814 | | #[allow(clippy::undocumented_unsafe_blocks)] |
4815 | | unsafe { |
4816 | | slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems) |
4817 | | } |
4818 | | } |
4819 | | } |
4820 | | |
4821 | | impl<B, T> Ref<B, [T]> |
4822 | | where |
4823 | | B: ByteSliceMut, |
4824 | | T: FromBytes + AsBytes, |
4825 | | { |
4826 | | /// Creates a mutable reference to `[T]` with a specific lifetime. |
4827 | | /// |
4828 | | /// # Safety |
4829 | | /// |
4830 | | /// `deref_mut_slice_helper` has the same safety requirements as |
4831 | | /// `deref_mut_helper`. |
4832 | | unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] { |
4833 | | let len = self.0.len(); |
4834 | | let elem_size = mem::size_of::<T>(); |
4835 | | debug_assert_ne!(elem_size, 0); |
4836 | | // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`. |
4837 | | // Thus, neither the mod nor division operations here can panic. |
4838 | | #[allow(clippy::arithmetic_side_effects)] |
4839 | | let elems = { |
4840 | | debug_assert_eq!(len % elem_size, 0); |
4841 | | len / elem_size |
4842 | | }; |
4843 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
4844 | | #[allow(clippy::undocumented_unsafe_blocks)] |
4845 | | unsafe { |
4846 | | slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems) |
4847 | | } |
4848 | | } |
4849 | | } |
4850 | | |
4851 | | impl<B, T> Ref<B, T> |
4852 | | where |
4853 | | B: ByteSlice, |
4854 | | T: ?Sized, |
4855 | | { |
4856 | | /// Gets the underlying bytes. |
4857 | | #[inline] |
4858 | | pub fn bytes(&self) -> &[u8] { |
4859 | | &self.0 |
4860 | | } |
4861 | | } |
4862 | | |
4863 | | impl<B, T> Ref<B, T> |
4864 | | where |
4865 | | B: ByteSliceMut, |
4866 | | T: ?Sized, |
4867 | | { |
4868 | | /// Gets the underlying bytes mutably. |
4869 | | #[inline] |
4870 | | pub fn bytes_mut(&mut self) -> &mut [u8] { |
4871 | | &mut self.0 |
4872 | | } |
4873 | | } |
4874 | | |
4875 | | impl<B, T> Ref<B, T> |
4876 | | where |
4877 | | B: ByteSlice, |
4878 | | T: FromBytes, |
4879 | | { |
4880 | | /// Reads a copy of `T`. |
4881 | | #[inline] |
4882 | 0 | pub fn read(&self) -> T { |
4883 | | // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is |
4884 | | // at least `size_of::<T>()` bytes long, and that it is at least as |
4885 | | // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to |
4886 | | // interpret these bytes as a `T`. |
4887 | 0 | unsafe { ptr::read(self.0.as_ptr().cast::<T>()) } |
4888 | 0 | } Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim_interface::acpi::Ccel>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<td_shim::e820::E820Entry>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<migtd::migration::data::ServiceQueryResponse>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::CcEventHeader>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgEfiSpecIdevent>>>::read Unexecuted instantiation: <zerocopy::Ref<&[u8], zerocopy::wrappers::Unalign<cc_measurement::TcgPcrEventHeader>>>::read |
4889 | | } |
4890 | | |
4891 | | impl<B, T> Ref<B, T> |
4892 | | where |
4893 | | B: ByteSliceMut, |
4894 | | T: AsBytes, |
4895 | | { |
4896 | | /// Writes the bytes of `t` and then forgets `t`. |
4897 | | #[inline] |
4898 | | pub fn write(&mut self, t: T) { |
4899 | | // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is |
4900 | | // at least `size_of::<T>()` bytes long, and that it is at least as |
4901 | | // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow |
4902 | | // all of the bytes of `t` to be accessed as a `[u8]`, but because `T: |
4903 | | // AsBytes`, we know this is sound. |
4904 | | unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) } |
4905 | | } |
4906 | | } |
4907 | | |
4908 | | impl<B, T> Deref for Ref<B, T> |
4909 | | where |
4910 | | B: ByteSlice, |
4911 | | T: FromBytes, |
4912 | | { |
4913 | | type Target = T; |
4914 | | #[inline] |
4915 | | fn deref(&self) -> &T { |
4916 | | // SAFETY: This is sound because the lifetime of `self` is the same as |
4917 | | // the lifetime of the return value, meaning that a) the returned |
4918 | | // reference cannot outlive `self` and, b) no mutable methods on `self` |
4919 | | // can be called during the lifetime of the returned reference. See the |
4920 | | // documentation on `deref_helper` for what invariants we are required |
4921 | | // to uphold. |
4922 | | unsafe { self.deref_helper() } |
4923 | | } |
4924 | | } |
4925 | | |
4926 | | impl<B, T> DerefMut for Ref<B, T> |
4927 | | where |
4928 | | B: ByteSliceMut, |
4929 | | T: FromBytes + AsBytes, |
4930 | | { |
4931 | | #[inline] |
4932 | | fn deref_mut(&mut self) -> &mut T { |
4933 | | // SAFETY: This is sound because the lifetime of `self` is the same as |
4934 | | // the lifetime of the return value, meaning that a) the returned |
4935 | | // reference cannot outlive `self` and, b) no other methods on `self` |
4936 | | // can be called during the lifetime of the returned reference. See the |
4937 | | // documentation on `deref_mut_helper` for what invariants we are |
4938 | | // required to uphold. |
4939 | | unsafe { self.deref_mut_helper() } |
4940 | | } |
4941 | | } |
4942 | | |
4943 | | impl<B, T> Deref for Ref<B, [T]> |
4944 | | where |
4945 | | B: ByteSlice, |
4946 | | T: FromBytes, |
4947 | | { |
4948 | | type Target = [T]; |
4949 | | #[inline] |
4950 | | fn deref(&self) -> &[T] { |
4951 | | // SAFETY: This is sound because the lifetime of `self` is the same as |
4952 | | // the lifetime of the return value, meaning that a) the returned |
4953 | | // reference cannot outlive `self` and, b) no mutable methods on `self` |
4954 | | // can be called during the lifetime of the returned reference. See the |
4955 | | // documentation on `deref_slice_helper` for what invariants we are |
4956 | | // required to uphold. |
4957 | | unsafe { self.deref_slice_helper() } |
4958 | | } |
4959 | | } |
4960 | | |
4961 | | impl<B, T> DerefMut for Ref<B, [T]> |
4962 | | where |
4963 | | B: ByteSliceMut, |
4964 | | T: FromBytes + AsBytes, |
4965 | | { |
4966 | | #[inline] |
4967 | | fn deref_mut(&mut self) -> &mut [T] { |
4968 | | // SAFETY: This is sound because the lifetime of `self` is the same as |
4969 | | // the lifetime of the return value, meaning that a) the returned |
4970 | | // reference cannot outlive `self` and, b) no other methods on `self` |
4971 | | // can be called during the lifetime of the returned reference. See the |
4972 | | // documentation on `deref_mut_slice_helper` for what invariants we are |
4973 | | // required to uphold. |
4974 | | unsafe { self.deref_mut_slice_helper() } |
4975 | | } |
4976 | | } |
4977 | | |
4978 | | impl<T, B> Display for Ref<B, T> |
4979 | | where |
4980 | | B: ByteSlice, |
4981 | | T: FromBytes + Display, |
4982 | | { |
4983 | | #[inline] |
4984 | | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
4985 | | let inner: &T = self; |
4986 | | inner.fmt(fmt) |
4987 | | } |
4988 | | } |
4989 | | |
4990 | | impl<T, B> Display for Ref<B, [T]> |
4991 | | where |
4992 | | B: ByteSlice, |
4993 | | T: FromBytes, |
4994 | | [T]: Display, |
4995 | | { |
4996 | | #[inline] |
4997 | | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
4998 | | let inner: &[T] = self; |
4999 | | inner.fmt(fmt) |
5000 | | } |
5001 | | } |
5002 | | |
5003 | | impl<T, B> Debug for Ref<B, T> |
5004 | | where |
5005 | | B: ByteSlice, |
5006 | | T: FromBytes + Debug, |
5007 | | { |
5008 | | #[inline] |
5009 | | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
5010 | | let inner: &T = self; |
5011 | | fmt.debug_tuple("Ref").field(&inner).finish() |
5012 | | } |
5013 | | } |
5014 | | |
5015 | | impl<T, B> Debug for Ref<B, [T]> |
5016 | | where |
5017 | | B: ByteSlice, |
5018 | | T: FromBytes + Debug, |
5019 | | { |
5020 | | #[inline] |
5021 | | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
5022 | | let inner: &[T] = self; |
5023 | | fmt.debug_tuple("Ref").field(&inner).finish() |
5024 | | } |
5025 | | } |
5026 | | |
5027 | | impl<T, B> Eq for Ref<B, T> |
5028 | | where |
5029 | | B: ByteSlice, |
5030 | | T: FromBytes + Eq, |
5031 | | { |
5032 | | } |
5033 | | |
5034 | | impl<T, B> Eq for Ref<B, [T]> |
5035 | | where |
5036 | | B: ByteSlice, |
5037 | | T: FromBytes + Eq, |
5038 | | { |
5039 | | } |
5040 | | |
5041 | | impl<T, B> PartialEq for Ref<B, T> |
5042 | | where |
5043 | | B: ByteSlice, |
5044 | | T: FromBytes + PartialEq, |
5045 | | { |
5046 | | #[inline] |
5047 | | fn eq(&self, other: &Self) -> bool { |
5048 | | self.deref().eq(other.deref()) |
5049 | | } |
5050 | | } |
5051 | | |
5052 | | impl<T, B> PartialEq for Ref<B, [T]> |
5053 | | where |
5054 | | B: ByteSlice, |
5055 | | T: FromBytes + PartialEq, |
5056 | | { |
5057 | | #[inline] |
5058 | | fn eq(&self, other: &Self) -> bool { |
5059 | | self.deref().eq(other.deref()) |
5060 | | } |
5061 | | } |
5062 | | |
5063 | | impl<T, B> Ord for Ref<B, T> |
5064 | | where |
5065 | | B: ByteSlice, |
5066 | | T: FromBytes + Ord, |
5067 | | { |
5068 | | #[inline] |
5069 | | fn cmp(&self, other: &Self) -> Ordering { |
5070 | | let inner: &T = self; |
5071 | | let other_inner: &T = other; |
5072 | | inner.cmp(other_inner) |
5073 | | } |
5074 | | } |
5075 | | |
5076 | | impl<T, B> Ord for Ref<B, [T]> |
5077 | | where |
5078 | | B: ByteSlice, |
5079 | | T: FromBytes + Ord, |
5080 | | { |
5081 | | #[inline] |
5082 | | fn cmp(&self, other: &Self) -> Ordering { |
5083 | | let inner: &[T] = self; |
5084 | | let other_inner: &[T] = other; |
5085 | | inner.cmp(other_inner) |
5086 | | } |
5087 | | } |
5088 | | |
5089 | | impl<T, B> PartialOrd for Ref<B, T> |
5090 | | where |
5091 | | B: ByteSlice, |
5092 | | T: FromBytes + PartialOrd, |
5093 | | { |
5094 | | #[inline] |
5095 | | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
5096 | | let inner: &T = self; |
5097 | | let other_inner: &T = other; |
5098 | | inner.partial_cmp(other_inner) |
5099 | | } |
5100 | | } |
5101 | | |
5102 | | impl<T, B> PartialOrd for Ref<B, [T]> |
5103 | | where |
5104 | | B: ByteSlice, |
5105 | | T: FromBytes + PartialOrd, |
5106 | | { |
5107 | | #[inline] |
5108 | | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
5109 | | let inner: &[T] = self; |
5110 | | let other_inner: &[T] = other; |
5111 | | inner.partial_cmp(other_inner) |
5112 | | } |
5113 | | } |
5114 | | |
5115 | | mod sealed { |
5116 | | pub trait ByteSliceSealed {} |
5117 | | } |
5118 | | |
5119 | | // ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8], |
5120 | | // Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these |
5121 | | // references such as that a given reference will never changes its length |
5122 | | // between calls to deref() or deref_mut(), and that split_at() works as |
5123 | | // expected. If ByteSlice or ByteSliceMut were not sealed, consumers could |
5124 | | // implement them in a way that violated these behaviors, and would break our |
5125 | | // unsafe code. Thus, we seal them and implement it only for known-good |
5126 | | // reference types. For the same reason, they're unsafe traits. |
5127 | | |
5128 | | #[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068) |
5129 | | /// A mutable or immutable reference to a byte slice. |
5130 | | /// |
5131 | | /// `ByteSlice` abstracts over the mutability of a byte slice reference, and is |
5132 | | /// implemented for various special reference types such as `Ref<[u8]>` and |
5133 | | /// `RefMut<[u8]>`. |
5134 | | /// |
5135 | | /// Note that, while it would be technically possible, `ByteSlice` is not |
5136 | | /// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`] |
5137 | | /// method would involve reallocation, and `split_at` must be a very cheap |
5138 | | /// operation in order for the utilities in this crate to perform as designed. |
5139 | | /// |
5140 | | /// [`split_at`]: crate::ByteSlice::split_at |
5141 | | // It may seem overkill to go to this length to ensure that this doc link never |
5142 | | // breaks. We do this because it simplifies CI - it means that generating docs |
5143 | | // always succeeds, so we don't need special logic to only generate docs under |
5144 | | // certain features. |
5145 | | #[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")] |
5146 | | #[cfg_attr( |
5147 | | not(feature = "alloc"), |
5148 | | doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html" |
5149 | | )] |
5150 | | pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + sealed::ByteSliceSealed { |
5151 | | /// Are the [`Ref::into_ref`] and [`Ref::into_mut`] methods sound when used |
5152 | | /// with `Self`? If not, evaluating this constant must panic at compile |
5153 | | /// time. |
5154 | | /// |
5155 | | /// This exists to work around #716 on versions of zerocopy prior to 0.8. |
5156 | | /// |
5157 | | /// # Safety |
5158 | | /// |
5159 | | /// This may only be set to true if the following holds: Given the |
5160 | | /// following: |
5161 | | /// - `Self: 'a` |
5162 | | /// - `bytes: Self` |
5163 | | /// - `let ptr = bytes.as_ptr()` |
5164 | | /// |
5165 | | /// ...then: |
5166 | | /// - Using `ptr` to read the memory previously addressed by `bytes` is |
5167 | | /// sound for `'a` even after `bytes` has been dropped. |
5168 | | /// - If `Self: ByteSliceMut`, using `ptr` to write the memory previously |
5169 | | /// addressed by `bytes` is sound for `'a` even after `bytes` has been |
5170 | | /// dropped. |
5171 | | #[doc(hidden)] |
5172 | | const INTO_REF_INTO_MUT_ARE_SOUND: bool; |
5173 | | |
5174 | | /// Gets a raw pointer to the first byte in the slice. |
5175 | | #[inline] |
5176 | 0 | fn as_ptr(&self) -> *const u8 { |
5177 | 0 | <[u8]>::as_ptr(self) |
5178 | 0 | } Unexecuted instantiation: <&[u8] as zerocopy::ByteSlice>::as_ptr Unexecuted instantiation: <&[u8] as zerocopy::ByteSlice>::as_ptr |
5179 | | |
5180 | | /// Splits the slice at the midpoint. |
5181 | | /// |
5182 | | /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`. |
5183 | | /// |
5184 | | /// # Panics |
5185 | | /// |
5186 | | /// `x.split_at(mid)` panics if `mid > x.len()`. |
5187 | | fn split_at(self, mid: usize) -> (Self, Self); |
5188 | | } |
5189 | | |
5190 | | #[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068) |
5191 | | /// A mutable reference to a byte slice. |
5192 | | /// |
5193 | | /// `ByteSliceMut` abstracts over various ways of storing a mutable reference to |
5194 | | /// a byte slice, and is implemented for various special reference types such as |
5195 | | /// `RefMut<[u8]>`. |
5196 | | pub unsafe trait ByteSliceMut: ByteSlice + DerefMut { |
5197 | | /// Gets a mutable raw pointer to the first byte in the slice. |
5198 | | #[inline] |
5199 | | fn as_mut_ptr(&mut self) -> *mut u8 { |
5200 | | <[u8]>::as_mut_ptr(self) |
5201 | | } |
5202 | | } |
5203 | | |
5204 | | impl<'a> sealed::ByteSliceSealed for &'a [u8] {} |
5205 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5206 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5207 | | unsafe impl<'a> ByteSlice for &'a [u8] { |
5208 | | // SAFETY: If `&'b [u8]: 'a`, then the underlying memory is treated as |
5209 | | // borrowed immutably for `'a` even if the slice itself is dropped. |
5210 | | const INTO_REF_INTO_MUT_ARE_SOUND: bool = true; |
5211 | | |
5212 | | #[inline] |
5213 | | fn split_at(self, mid: usize) -> (Self, Self) { |
5214 | | <[u8]>::split_at(self, mid) |
5215 | | } |
5216 | | } |
5217 | | |
5218 | | impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {} |
5219 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5220 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5221 | | unsafe impl<'a> ByteSlice for &'a mut [u8] { |
5222 | | // SAFETY: If `&'b mut [u8]: 'a`, then the underlying memory is treated as |
5223 | | // borrowed mutably for `'a` even if the slice itself is dropped. |
5224 | | const INTO_REF_INTO_MUT_ARE_SOUND: bool = true; |
5225 | | |
5226 | | #[inline] |
5227 | | fn split_at(self, mid: usize) -> (Self, Self) { |
5228 | | <[u8]>::split_at_mut(self, mid) |
5229 | | } |
5230 | | } |
5231 | | |
5232 | | impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {} |
5233 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5234 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5235 | | unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> { |
5236 | | const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) { |
5237 | | panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716") |
5238 | | } else { |
5239 | | // When compiling documentation, allow the evaluation of this constant |
5240 | | // to succeed. This doesn't represent a soundness hole - it just delays |
5241 | | // any error to runtime. The reason we need this is that, otherwise, |
5242 | | // `rustdoc` will fail when trying to document this item. |
5243 | | false |
5244 | | }; |
5245 | | |
5246 | | #[inline] |
5247 | | fn split_at(self, mid: usize) -> (Self, Self) { |
5248 | | cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid)) |
5249 | | } |
5250 | | } |
5251 | | |
5252 | | impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {} |
5253 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5254 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5255 | | unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> { |
5256 | | const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) { |
5257 | | panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716") |
5258 | | } else { |
5259 | | // When compiling documentation, allow the evaluation of this constant |
5260 | | // to succeed. This doesn't represent a soundness hole - it just delays |
5261 | | // any error to runtime. The reason we need this is that, otherwise, |
5262 | | // `rustdoc` will fail when trying to document this item. |
5263 | | false |
5264 | | }; |
5265 | | |
5266 | | #[inline] |
5267 | | fn split_at(self, mid: usize) -> (Self, Self) { |
5268 | | RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid)) |
5269 | | } |
5270 | | } |
5271 | | |
5272 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5273 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5274 | | unsafe impl<'a> ByteSliceMut for &'a mut [u8] {} |
5275 | | |
5276 | | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
5277 | | #[allow(clippy::undocumented_unsafe_blocks)] |
5278 | | unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {} |
5279 | | |
5280 | | #[cfg(feature = "alloc")] |
5281 | | #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] |
5282 | | mod alloc_support { |
5283 | | use alloc::vec::Vec; |
5284 | | |
5285 | | use super::*; |
5286 | | |
5287 | | /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the |
5288 | | /// vector. The new items are initialized with zeroes. |
5289 | | /// |
5290 | | /// # Panics |
5291 | | /// |
5292 | | /// Panics if `Vec::reserve(additional)` fails to reserve enough memory. |
5293 | | #[inline(always)] |
5294 | | pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) { |
5295 | | insert_vec_zeroed(v, v.len(), additional); |
5296 | | } |
5297 | | |
5298 | | /// Inserts `additional` new items into `Vec<T>` at `position`. |
5299 | | /// The new items are initialized with zeroes. |
5300 | | /// |
5301 | | /// # Panics |
5302 | | /// |
5303 | | /// * Panics if `position > v.len()`. |
5304 | | /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory. |
5305 | | #[inline] |
5306 | | pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) { |
5307 | | assert!(position <= v.len()); |
5308 | | v.reserve(additional); |
5309 | | // SAFETY: The `reserve` call guarantees that these cannot overflow: |
5310 | | // * `ptr.add(position)` |
5311 | | // * `position + additional` |
5312 | | // * `v.len() + additional` |
5313 | | // |
5314 | | // `v.len() - position` cannot overflow because we asserted that |
5315 | | // `position <= v.len()`. |
5316 | | unsafe { |
5317 | | // This is a potentially overlapping copy. |
5318 | | let ptr = v.as_mut_ptr(); |
5319 | | #[allow(clippy::arithmetic_side_effects)] |
5320 | | ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position); |
5321 | | ptr.add(position).write_bytes(0, additional); |
5322 | | #[allow(clippy::arithmetic_side_effects)] |
5323 | | v.set_len(v.len() + additional); |
5324 | | } |
5325 | | } |
5326 | | |
5327 | | #[cfg(test)] |
5328 | | mod tests { |
5329 | | use core::convert::TryFrom as _; |
5330 | | |
5331 | | use super::*; |
5332 | | |
5333 | | #[test] |
5334 | | fn test_extend_vec_zeroed() { |
5335 | | // Test extending when there is an existing allocation. |
5336 | | let mut v = vec![100u64, 200, 300]; |
5337 | | extend_vec_zeroed(&mut v, 3); |
5338 | | assert_eq!(v.len(), 6); |
5339 | | assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]); |
5340 | | drop(v); |
5341 | | |
5342 | | // Test extending when there is no existing allocation. |
5343 | | let mut v: Vec<u64> = Vec::new(); |
5344 | | extend_vec_zeroed(&mut v, 3); |
5345 | | assert_eq!(v.len(), 3); |
5346 | | assert_eq!(&*v, &[0, 0, 0]); |
5347 | | drop(v); |
5348 | | } |
5349 | | |
5350 | | #[test] |
5351 | | fn test_extend_vec_zeroed_zst() { |
5352 | | // Test extending when there is an existing (fake) allocation. |
5353 | | let mut v = vec![(), (), ()]; |
5354 | | extend_vec_zeroed(&mut v, 3); |
5355 | | assert_eq!(v.len(), 6); |
5356 | | assert_eq!(&*v, &[(), (), (), (), (), ()]); |
5357 | | drop(v); |
5358 | | |
5359 | | // Test extending when there is no existing (fake) allocation. |
5360 | | let mut v: Vec<()> = Vec::new(); |
5361 | | extend_vec_zeroed(&mut v, 3); |
5362 | | assert_eq!(&*v, &[(), (), ()]); |
5363 | | drop(v); |
5364 | | } |
5365 | | |
5366 | | #[test] |
5367 | | fn test_insert_vec_zeroed() { |
5368 | | // Insert at start (no existing allocation). |
5369 | | let mut v: Vec<u64> = Vec::new(); |
5370 | | insert_vec_zeroed(&mut v, 0, 2); |
5371 | | assert_eq!(v.len(), 2); |
5372 | | assert_eq!(&*v, &[0, 0]); |
5373 | | drop(v); |
5374 | | |
5375 | | // Insert at start. |
5376 | | let mut v = vec![100u64, 200, 300]; |
5377 | | insert_vec_zeroed(&mut v, 0, 2); |
5378 | | assert_eq!(v.len(), 5); |
5379 | | assert_eq!(&*v, &[0, 0, 100, 200, 300]); |
5380 | | drop(v); |
5381 | | |
5382 | | // Insert at middle. |
5383 | | let mut v = vec![100u64, 200, 300]; |
5384 | | insert_vec_zeroed(&mut v, 1, 1); |
5385 | | assert_eq!(v.len(), 4); |
5386 | | assert_eq!(&*v, &[100, 0, 200, 300]); |
5387 | | drop(v); |
5388 | | |
5389 | | // Insert at end. |
5390 | | let mut v = vec![100u64, 200, 300]; |
5391 | | insert_vec_zeroed(&mut v, 3, 1); |
5392 | | assert_eq!(v.len(), 4); |
5393 | | assert_eq!(&*v, &[100, 200, 300, 0]); |
5394 | | drop(v); |
5395 | | } |
5396 | | |
5397 | | #[test] |
5398 | | fn test_insert_vec_zeroed_zst() { |
5399 | | // Insert at start (no existing fake allocation). |
5400 | | let mut v: Vec<()> = Vec::new(); |
5401 | | insert_vec_zeroed(&mut v, 0, 2); |
5402 | | assert_eq!(v.len(), 2); |
5403 | | assert_eq!(&*v, &[(), ()]); |
5404 | | drop(v); |
5405 | | |
5406 | | // Insert at start. |
5407 | | let mut v = vec![(), (), ()]; |
5408 | | insert_vec_zeroed(&mut v, 0, 2); |
5409 | | assert_eq!(v.len(), 5); |
5410 | | assert_eq!(&*v, &[(), (), (), (), ()]); |
5411 | | drop(v); |
5412 | | |
5413 | | // Insert at middle. |
5414 | | let mut v = vec![(), (), ()]; |
5415 | | insert_vec_zeroed(&mut v, 1, 1); |
5416 | | assert_eq!(v.len(), 4); |
5417 | | assert_eq!(&*v, &[(), (), (), ()]); |
5418 | | drop(v); |
5419 | | |
5420 | | // Insert at end. |
5421 | | let mut v = vec![(), (), ()]; |
5422 | | insert_vec_zeroed(&mut v, 3, 1); |
5423 | | assert_eq!(v.len(), 4); |
5424 | | assert_eq!(&*v, &[(), (), (), ()]); |
5425 | | drop(v); |
5426 | | } |
5427 | | |
5428 | | #[test] |
5429 | | fn test_new_box_zeroed() { |
5430 | | assert_eq!(*u64::new_box_zeroed(), 0); |
5431 | | } |
5432 | | |
5433 | | #[test] |
5434 | | fn test_new_box_zeroed_array() { |
5435 | | drop(<[u32; 0x1000]>::new_box_zeroed()); |
5436 | | } |
5437 | | |
5438 | | #[test] |
5439 | | fn test_new_box_zeroed_zst() { |
5440 | | // This test exists in order to exercise unsafe code, especially |
5441 | | // when running under Miri. |
5442 | | #[allow(clippy::unit_cmp)] |
5443 | | { |
5444 | | assert_eq!(*<()>::new_box_zeroed(), ()); |
5445 | | } |
5446 | | } |
5447 | | |
5448 | | #[test] |
5449 | | fn test_new_box_slice_zeroed() { |
5450 | | let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3); |
5451 | | assert_eq!(s.len(), 3); |
5452 | | assert_eq!(&*s, &[0, 0, 0]); |
5453 | | s[1] = 3; |
5454 | | assert_eq!(&*s, &[0, 3, 0]); |
5455 | | } |
5456 | | |
5457 | | #[test] |
5458 | | fn test_new_box_slice_zeroed_empty() { |
5459 | | let s: Box<[u64]> = u64::new_box_slice_zeroed(0); |
5460 | | assert_eq!(s.len(), 0); |
5461 | | } |
5462 | | |
5463 | | #[test] |
5464 | | fn test_new_box_slice_zeroed_zst() { |
5465 | | let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3); |
5466 | | assert_eq!(s.len(), 3); |
5467 | | assert!(s.get(10).is_none()); |
5468 | | // This test exists in order to exercise unsafe code, especially |
5469 | | // when running under Miri. |
5470 | | #[allow(clippy::unit_cmp)] |
5471 | | { |
5472 | | assert_eq!(s[1], ()); |
5473 | | } |
5474 | | s[2] = (); |
5475 | | } |
5476 | | |
5477 | | #[test] |
5478 | | fn test_new_box_slice_zeroed_zst_empty() { |
5479 | | let s: Box<[()]> = <()>::new_box_slice_zeroed(0); |
5480 | | assert_eq!(s.len(), 0); |
5481 | | } |
5482 | | |
5483 | | #[test] |
5484 | | #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")] |
5485 | | fn test_new_box_slice_zeroed_panics_mul_overflow() { |
5486 | | let _ = u16::new_box_slice_zeroed(usize::MAX); |
5487 | | } |
5488 | | |
5489 | | #[test] |
5490 | | #[should_panic(expected = "assertion failed: size <= max_alloc")] |
5491 | | fn test_new_box_slice_zeroed_panics_isize_overflow() { |
5492 | | let max = usize::try_from(isize::MAX).unwrap(); |
5493 | | let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1); |
5494 | | } |
5495 | | } |
5496 | | } |
5497 | | |
5498 | | #[cfg(feature = "alloc")] |
5499 | | #[doc(inline)] |
5500 | | pub use alloc_support::*; |
5501 | | |
5502 | | #[cfg(test)] |
5503 | | mod tests { |
5504 | | #![allow(clippy::unreadable_literal)] |
5505 | | |
5506 | | use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref}; |
5507 | | |
5508 | | use static_assertions::assert_impl_all; |
5509 | | |
5510 | | use super::*; |
5511 | | use crate::util::testutil::*; |
5512 | | |
5513 | | // An unsized type. |
5514 | | // |
5515 | | // This is used to test the custom derives of our traits. The `[u8]` type |
5516 | | // gets a hand-rolled impl, so it doesn't exercise our custom derives. |
5517 | | #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)] |
5518 | | #[repr(transparent)] |
5519 | | struct Unsized([u8]); |
5520 | | |
5521 | | impl Unsized { |
5522 | | fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized { |
5523 | | // SAFETY: This *probably* sound - since the layouts of `[u8]` and |
5524 | | // `Unsized` are the same, so are the layouts of `&mut [u8]` and |
5525 | | // `&mut Unsized`. [1] Even if it turns out that this isn't actually |
5526 | | // guaranteed by the language spec, we can just change this since |
5527 | | // it's in test code. |
5528 | | // |
5529 | | // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375 |
5530 | | unsafe { mem::transmute(slc) } |
5531 | | } |
5532 | | } |
5533 | | |
5534 | | /// Tests of when a sized `DstLayout` is extended with a sized field. |
5535 | | #[allow(clippy::decimal_literal_representation)] |
5536 | | #[test] |
5537 | | fn test_dst_layout_extend_sized_with_sized() { |
5538 | | // This macro constructs a layout corresponding to a `u8` and extends it |
5539 | | // with a zero-sized trailing field of given alignment `n`. The macro |
5540 | | // tests that the resulting layout has both size and alignment `min(n, |
5541 | | // P)` for all valid values of `repr(packed(P))`. |
5542 | | macro_rules! test_align_is_size { |
5543 | | ($n:expr) => { |
5544 | | let base = DstLayout::for_type::<u8>(); |
5545 | | let trailing_field = DstLayout::for_type::<elain::Align<$n>>(); |
5546 | | |
5547 | | let packs = |
5548 | | core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p)))); |
5549 | | |
5550 | | for pack in packs { |
5551 | | let composite = base.extend(trailing_field, pack); |
5552 | | let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN); |
5553 | | let align = $n.min(max_align.get()); |
5554 | | assert_eq!( |
5555 | | composite, |
5556 | | DstLayout { |
5557 | | align: NonZeroUsize::new(align).unwrap(), |
5558 | | size_info: SizeInfo::Sized { _size: align } |
5559 | | } |
5560 | | ) |
5561 | | } |
5562 | | }; |
5563 | | } |
5564 | | |
5565 | | test_align_is_size!(1); |
5566 | | test_align_is_size!(2); |
5567 | | test_align_is_size!(4); |
5568 | | test_align_is_size!(8); |
5569 | | test_align_is_size!(16); |
5570 | | test_align_is_size!(32); |
5571 | | test_align_is_size!(64); |
5572 | | test_align_is_size!(128); |
5573 | | test_align_is_size!(256); |
5574 | | test_align_is_size!(512); |
5575 | | test_align_is_size!(1024); |
5576 | | test_align_is_size!(2048); |
5577 | | test_align_is_size!(4096); |
5578 | | test_align_is_size!(8192); |
5579 | | test_align_is_size!(16384); |
5580 | | test_align_is_size!(32768); |
5581 | | test_align_is_size!(65536); |
5582 | | test_align_is_size!(131072); |
5583 | | test_align_is_size!(262144); |
5584 | | test_align_is_size!(524288); |
5585 | | test_align_is_size!(1048576); |
5586 | | test_align_is_size!(2097152); |
5587 | | test_align_is_size!(4194304); |
5588 | | test_align_is_size!(8388608); |
5589 | | test_align_is_size!(16777216); |
5590 | | test_align_is_size!(33554432); |
5591 | | test_align_is_size!(67108864); |
5592 | | test_align_is_size!(33554432); |
5593 | | test_align_is_size!(134217728); |
5594 | | test_align_is_size!(268435456); |
5595 | | } |
5596 | | |
5597 | | /// Tests of when a sized `DstLayout` is extended with a DST field. |
5598 | | #[test] |
5599 | | fn test_dst_layout_extend_sized_with_dst() { |
5600 | | // Test that for all combinations of real-world alignments and |
5601 | | // `repr_packed` values, that the extension of a sized `DstLayout`` with |
5602 | | // a DST field correctly computes the trailing offset in the composite |
5603 | | // layout. |
5604 | | |
5605 | | let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()); |
5606 | | let packs = core::iter::once(None).chain(aligns.clone().map(Some)); |
5607 | | |
5608 | | for align in aligns { |
5609 | | for pack in packs.clone() { |
5610 | | let base = DstLayout::for_type::<u8>(); |
5611 | | let elem_size = 42; |
5612 | | let trailing_field_offset = 11; |
5613 | | |
5614 | | let trailing_field = DstLayout { |
5615 | | align, |
5616 | | size_info: SizeInfo::SliceDst(TrailingSliceLayout { |
5617 | | _elem_size: elem_size, |
5618 | | _offset: 11, |
5619 | | }), |
5620 | | }; |
5621 | | |
5622 | | let composite = base.extend(trailing_field, pack); |
5623 | | |
5624 | | let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get(); |
5625 | | |
5626 | | let align = align.get().min(max_align); |
5627 | | |
5628 | | assert_eq!( |
5629 | | composite, |
5630 | | DstLayout { |
5631 | | align: NonZeroUsize::new(align).unwrap(), |
5632 | | size_info: SizeInfo::SliceDst(TrailingSliceLayout { |
5633 | | _elem_size: elem_size, |
5634 | | _offset: align + trailing_field_offset, |
5635 | | }), |
5636 | | } |
5637 | | ) |
5638 | | } |
5639 | | } |
5640 | | } |
5641 | | |
5642 | | /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the |
5643 | | /// expected amount of trailing padding. |
5644 | | #[test] |
5645 | | fn test_dst_layout_pad_to_align_with_sized() { |
5646 | | // For all valid alignments `align`, construct a one-byte layout aligned |
5647 | | // to `align`, call `pad_to_align`, and assert that the size of the |
5648 | | // resulting layout is equal to `align`. |
5649 | | for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { |
5650 | | let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } }; |
5651 | | |
5652 | | assert_eq!( |
5653 | | layout.pad_to_align(), |
5654 | | DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } } |
5655 | | ); |
5656 | | } |
5657 | | |
5658 | | // Test explicitly-provided combinations of unpadded and padded |
5659 | | // counterparts. |
5660 | | |
5661 | | macro_rules! test { |
5662 | | (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr } |
5663 | | => padded { size: $padded_size:expr, align: $padded_align:expr }) => { |
5664 | | let unpadded = DstLayout { |
5665 | | align: NonZeroUsize::new($unpadded_align).unwrap(), |
5666 | | size_info: SizeInfo::Sized { _size: $unpadded_size }, |
5667 | | }; |
5668 | | let padded = unpadded.pad_to_align(); |
5669 | | |
5670 | | assert_eq!( |
5671 | | padded, |
5672 | | DstLayout { |
5673 | | align: NonZeroUsize::new($padded_align).unwrap(), |
5674 | | size_info: SizeInfo::Sized { _size: $padded_size }, |
5675 | | } |
5676 | | ); |
5677 | | }; |
5678 | | } |
5679 | | |
5680 | | test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 }); |
5681 | | test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 }); |
5682 | | test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 }); |
5683 | | test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 }); |
5684 | | test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 }); |
5685 | | test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 }); |
5686 | | test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 }); |
5687 | | test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 }); |
5688 | | test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 }); |
5689 | | |
5690 | | let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get(); |
5691 | | |
5692 | | test!(unpadded { size: 1, align: current_max_align } |
5693 | | => padded { size: current_max_align, align: current_max_align }); |
5694 | | |
5695 | | test!(unpadded { size: current_max_align + 1, align: current_max_align } |
5696 | | => padded { size: current_max_align * 2, align: current_max_align }); |
5697 | | } |
5698 | | |
5699 | | /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op. |
5700 | | #[test] |
5701 | | fn test_dst_layout_pad_to_align_with_dst() { |
5702 | | for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { |
5703 | | for offset in 0..10 { |
5704 | | for elem_size in 0..10 { |
5705 | | let layout = DstLayout { |
5706 | | align, |
5707 | | size_info: SizeInfo::SliceDst(TrailingSliceLayout { |
5708 | | _offset: offset, |
5709 | | _elem_size: elem_size, |
5710 | | }), |
5711 | | }; |
5712 | | assert_eq!(layout.pad_to_align(), layout); |
5713 | | } |
5714 | | } |
5715 | | } |
5716 | | } |
5717 | | |
5718 | | // This test takes a long time when running under Miri, so we skip it in |
5719 | | // that case. This is acceptable because this is a logic test that doesn't |
5720 | | // attempt to expose UB. |
5721 | | #[test] |
5722 | | #[cfg_attr(miri, ignore)] |
5723 | | fn testvalidate_cast_and_convert_metadata() { |
5724 | | impl From<usize> for SizeInfo { |
5725 | | fn from(_size: usize) -> SizeInfo { |
5726 | | SizeInfo::Sized { _size } |
5727 | | } |
5728 | | } |
5729 | | |
5730 | | impl From<(usize, usize)> for SizeInfo { |
5731 | | fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo { |
5732 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) |
5733 | | } |
5734 | | } |
5735 | | |
5736 | | fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout { |
5737 | | DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() } |
5738 | | } |
5739 | | |
5740 | | /// This macro accepts arguments in the form of: |
5741 | | /// |
5742 | | /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _))) |
5743 | | /// | | | | | | | | |
5744 | | /// base_size ----+ | | | | | | | |
5745 | | /// align -----------+ | | | | | | |
5746 | | /// trailing_size ------+ | | | | | |
5747 | | /// addr ---------------------------+ | | | | |
5748 | | /// bytes_len -------------------------+ | | | |
5749 | | /// cast_type ----------------------------+ | | |
5750 | | /// elems ---------------------------------------------+ | |
5751 | | /// split_at ---------------------------------------------+ |
5752 | | /// |
5753 | | /// `.validate` is shorthand for `.validate_cast_and_convert_metadata` |
5754 | | /// for brevity. |
5755 | | /// |
5756 | | /// Each argument can either be an iterator or a wildcard. Each |
5757 | | /// wildcarded variable is implicitly replaced by an iterator over a |
5758 | | /// representative sample of values for that variable. Each `test!` |
5759 | | /// invocation iterates over every combination of values provided by |
5760 | | /// each variable's iterator (ie, the cartesian product) and validates |
5761 | | /// that the results are expected. |
5762 | | /// |
5763 | | /// The final argument uses the same syntax, but it has a different |
5764 | | /// meaning: |
5765 | | /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to |
5766 | | /// `assert_matches!` to validate the computed result for each |
5767 | | /// combination of input values. |
5768 | | /// - If it is `Err(msg)`, then `test!` validates that the call to |
5769 | | /// `validate_cast_and_convert_metadata` panics with the given panic |
5770 | | /// message. |
5771 | | /// |
5772 | | /// Note that the meta-variables that match these variables have the |
5773 | | /// `tt` type, and some valid expressions are not valid `tt`s (such as |
5774 | | /// `a..b`). In this case, wrap the expression in parentheses, and it |
5775 | | /// will become valid `tt`. |
5776 | | macro_rules! test { |
5777 | | ($(:$sizes:expr =>)? |
5778 | | layout($size:tt, $align:tt) |
5779 | | .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)? |
5780 | | ) => { |
5781 | | itertools::iproduct!( |
5782 | | test!(@generate_size $size), |
5783 | | test!(@generate_align $align), |
5784 | | test!(@generate_usize $addr), |
5785 | | test!(@generate_usize $bytes_len), |
5786 | | test!(@generate_cast_type $cast_type) |
5787 | | ).for_each(|(size_info, align, addr, bytes_len, cast_type)| { |
5788 | | // Temporarily disable the panic hook installed by the test |
5789 | | // harness. If we don't do this, all panic messages will be |
5790 | | // kept in an internal log. On its own, this isn't a |
5791 | | // problem, but if a non-caught panic ever happens (ie, in |
5792 | | // code later in this test not in this macro), all of the |
5793 | | // previously-buffered messages will be dumped, hiding the |
5794 | | // real culprit. |
5795 | | let previous_hook = std::panic::take_hook(); |
5796 | | // I don't understand why, but this seems to be required in |
5797 | | // addition to the previous line. |
5798 | | std::panic::set_hook(Box::new(|_| {})); |
5799 | | let actual = std::panic::catch_unwind(|| { |
5800 | | layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type) |
5801 | | }).map_err(|d| { |
5802 | | *d.downcast::<&'static str>().expect("expected string panic message").as_ref() |
5803 | | }); |
5804 | | std::panic::set_hook(previous_hook); |
5805 | | |
5806 | | assert_matches::assert_matches!( |
5807 | | actual, $expect, |
5808 | | "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})", |
5809 | | ); |
5810 | | }); |
5811 | | }; |
5812 | | (@generate_usize _) => { 0..8 }; |
5813 | | // Generate sizes for both Sized and !Sized types. |
5814 | | (@generate_size _) => { |
5815 | | test!(@generate_size (_)).chain(test!(@generate_size (_, _))) |
5816 | | }; |
5817 | | // Generate sizes for both Sized and !Sized types by chaining |
5818 | | // specified iterators for each. |
5819 | | (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => { |
5820 | | test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes)) |
5821 | | }; |
5822 | | // Generate sizes for Sized types. |
5823 | | (@generate_size (_)) => { test!(@generate_size (0..8)) }; |
5824 | | (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) }; |
5825 | | // Generate sizes for !Sized types. |
5826 | | (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => { |
5827 | | itertools::iproduct!( |
5828 | | test!(@generate_min_size $min_sizes), |
5829 | | test!(@generate_elem_size $elem_sizes) |
5830 | | ).map(Into::<SizeInfo>::into) |
5831 | | }; |
5832 | | (@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) }; |
5833 | | (@generate_min_size _) => { 0..8 }; |
5834 | | (@generate_elem_size _) => { 1..8 }; |
5835 | | (@generate_align _) => { [1, 2, 4, 8, 16] }; |
5836 | | (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) }; |
5837 | | (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] }; |
5838 | | (@generate_cast_type $variant:ident) => { [_CastType::$variant] }; |
5839 | | // Some expressions need to be wrapped in parentheses in order to be |
5840 | | // valid `tt`s (required by the top match pattern). See the comment |
5841 | | // below for more details. This arm removes these parentheses to |
5842 | | // avoid generating an `unused_parens` warning. |
5843 | | (@$_:ident ($vals:expr)) => { $vals }; |
5844 | | (@$_:ident $vals:expr) => { $vals }; |
5845 | | } |
5846 | | |
5847 | | const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14]; |
5848 | | const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15]; |
5849 | | |
5850 | | // base_size is too big for the memory region. |
5851 | | test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None)); |
5852 | | test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None)); |
5853 | | |
5854 | | // addr is unaligned for prefix cast |
5855 | | test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None)); |
5856 | | test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None)); |
5857 | | |
5858 | | // addr is aligned, but end of buffer is unaligned for suffix cast |
5859 | | test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None)); |
5860 | | test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None)); |
5861 | | |
5862 | | // Unfortunately, these constants cannot easily be used in the |
5863 | | // implementation of `validate_cast_and_convert_metadata`, since |
5864 | | // `panic!` consumes a string literal, not an expression. |
5865 | | // |
5866 | | // It's important that these messages be in a separate module. If they |
5867 | | // were at the function's top level, we'd pass them to `test!` as, e.g., |
5868 | | // `Err(TRAILING)`, which would run into a subtle Rust footgun - the |
5869 | | // `TRAILING` identifier would be treated as a pattern to match rather |
5870 | | // than a value to check for equality. |
5871 | | mod msgs { |
5872 | | pub(super) const TRAILING: &str = |
5873 | | "attempted to cast to slice type with zero-sized element"; |
5874 | | pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX"; |
5875 | | } |
5876 | | |
5877 | | // casts with ZST trailing element types are unsupported |
5878 | | test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),); |
5879 | | |
5880 | | // addr + bytes_len must not overflow usize |
5881 | | test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW)); |
5882 | | test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW)); |
5883 | | test!( |
5884 | | layout(_, _).validate( |
5885 | | [usize::MAX / 2 + 1, usize::MAX], |
5886 | | [usize::MAX / 2 + 1, usize::MAX], |
5887 | | _ |
5888 | | ), |
5889 | | Err(msgs::OVERFLOW) |
5890 | | ); |
5891 | | |
5892 | | // Validates that `validate_cast_and_convert_metadata` satisfies its own |
5893 | | // documented safety postconditions, and also a few other properties |
5894 | | // that aren't documented but we want to guarantee anyway. |
5895 | | fn validate_behavior( |
5896 | | (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType), |
5897 | | ) { |
5898 | | if let Some((elems, split_at)) = |
5899 | | layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type) |
5900 | | { |
5901 | | let (size_info, align) = (layout.size_info, layout.align); |
5902 | | let debug_str = format!( |
5903 | | "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})", |
5904 | | ); |
5905 | | |
5906 | | // If this is a sized type (no trailing slice), then `elems` is |
5907 | | // meaningless, but in practice we set it to 0. Callers are not |
5908 | | // allowed to rely on this, but a lot of math is nicer if |
5909 | | // they're able to, and some callers might accidentally do that. |
5910 | | let sized = matches!(layout.size_info, SizeInfo::Sized { .. }); |
5911 | | assert!(!(sized && elems != 0), "{}", debug_str); |
5912 | | |
5913 | | let resulting_size = match layout.size_info { |
5914 | | SizeInfo::Sized { _size } => _size, |
5915 | | SizeInfo::SliceDst(TrailingSliceLayout { |
5916 | | _offset: offset, |
5917 | | _elem_size: elem_size, |
5918 | | }) => { |
5919 | | let padded_size = |elems| { |
5920 | | let without_padding = offset + elems * elem_size; |
5921 | | without_padding |
5922 | | + util::core_layout::padding_needed_for(without_padding, align) |
5923 | | }; |
5924 | | |
5925 | | let resulting_size = padded_size(elems); |
5926 | | // Test that `validate_cast_and_convert_metadata` |
5927 | | // computed the largest possible value that fits in the |
5928 | | // given range. |
5929 | | assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str); |
5930 | | resulting_size |
5931 | | } |
5932 | | }; |
5933 | | |
5934 | | // Test safety postconditions guaranteed by |
5935 | | // `validate_cast_and_convert_metadata`. |
5936 | | assert!(resulting_size <= bytes_len, "{}", debug_str); |
5937 | | match cast_type { |
5938 | | _CastType::_Prefix => { |
5939 | | assert_eq!(addr % align, 0, "{}", debug_str); |
5940 | | assert_eq!(resulting_size, split_at, "{}", debug_str); |
5941 | | } |
5942 | | _CastType::_Suffix => { |
5943 | | assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str); |
5944 | | assert_eq!((addr + split_at) % align, 0, "{}", debug_str); |
5945 | | } |
5946 | | } |
5947 | | } else { |
5948 | | let min_size = match layout.size_info { |
5949 | | SizeInfo::Sized { _size } => _size, |
5950 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => { |
5951 | | _offset + util::core_layout::padding_needed_for(_offset, layout.align) |
5952 | | } |
5953 | | }; |
5954 | | |
5955 | | // If a cast is invalid, it is either because... |
5956 | | // 1. there are insufficent bytes at the given region for type: |
5957 | | let insufficient_bytes = bytes_len < min_size; |
5958 | | // 2. performing the cast would misalign type: |
5959 | | let base = match cast_type { |
5960 | | _CastType::_Prefix => 0, |
5961 | | _CastType::_Suffix => bytes_len, |
5962 | | }; |
5963 | | let misaligned = (base + addr) % layout.align != 0; |
5964 | | |
5965 | | assert!(insufficient_bytes || misaligned); |
5966 | | } |
5967 | | } |
5968 | | |
5969 | | let sizes = 0..8; |
5970 | | let elem_sizes = 1..8; |
5971 | | let size_infos = sizes |
5972 | | .clone() |
5973 | | .map(Into::<SizeInfo>::into) |
5974 | | .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into)); |
5975 | | let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32]) |
5976 | | .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0)) |
5977 | | .map(|(size_info, align)| layout(size_info, align)); |
5978 | | itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix]) |
5979 | | .for_each(validate_behavior); |
5980 | | } |
5981 | | |
5982 | | #[test] |
5983 | | #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] |
5984 | | fn test_validate_rust_layout() { |
5985 | | use core::ptr::NonNull; |
5986 | | |
5987 | | // This test synthesizes pointers with various metadata and uses Rust's |
5988 | | // built-in APIs to confirm that Rust makes decisions about type layout |
5989 | | // which are consistent with what we believe is guaranteed by the |
5990 | | // language. If this test fails, it doesn't just mean our code is wrong |
5991 | | // - it means we're misunderstanding the language's guarantees. |
5992 | | |
5993 | | #[derive(Debug)] |
5994 | | struct MacroArgs { |
5995 | | offset: usize, |
5996 | | align: NonZeroUsize, |
5997 | | elem_size: Option<usize>, |
5998 | | } |
5999 | | |
6000 | | /// # Safety |
6001 | | /// |
6002 | | /// `test` promises to only call `addr_of_slice_field` on a `NonNull<T>` |
6003 | | /// which points to a valid `T`. |
6004 | | /// |
6005 | | /// `with_elems` must produce a pointer which points to a valid `T`. |
6006 | | fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>( |
6007 | | args: MacroArgs, |
6008 | | with_elems: W, |
6009 | | addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>, |
6010 | | ) { |
6011 | | let dst = args.elem_size.is_some(); |
6012 | | let layout = { |
6013 | | let size_info = match args.elem_size { |
6014 | | Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { |
6015 | | _offset: args.offset, |
6016 | | _elem_size: elem_size, |
6017 | | }), |
6018 | | None => SizeInfo::Sized { |
6019 | | // Rust only supports types whose sizes are a multiple |
6020 | | // of their alignment. If the macro created a type like |
6021 | | // this: |
6022 | | // |
6023 | | // #[repr(C, align(2))] |
6024 | | // struct Foo([u8; 1]); |
6025 | | // |
6026 | | // ...then Rust will automatically round the type's size |
6027 | | // up to 2. |
6028 | | _size: args.offset |
6029 | | + util::core_layout::padding_needed_for(args.offset, args.align), |
6030 | | }, |
6031 | | }; |
6032 | | DstLayout { size_info, align: args.align } |
6033 | | }; |
6034 | | |
6035 | | for elems in 0..128 { |
6036 | | let ptr = with_elems(elems); |
6037 | | |
6038 | | if let Some(addr_of_slice_field) = addr_of_slice_field { |
6039 | | let slc_field_ptr = addr_of_slice_field(ptr).as_ptr(); |
6040 | | // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to |
6041 | | // the same valid Rust object. |
6042 | | let offset: usize = |
6043 | | unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() }; |
6044 | | assert_eq!(offset, args.offset); |
6045 | | } |
6046 | | |
6047 | | // SAFETY: `ptr` points to a valid `T`. |
6048 | | let (size, align) = unsafe { |
6049 | | (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr())) |
6050 | | }; |
6051 | | |
6052 | | // Avoid expensive allocation when running under Miri. |
6053 | | let assert_msg = if !cfg!(miri) { |
6054 | | format!("\n{args:?}\nsize:{size}, align:{align}") |
6055 | | } else { |
6056 | | String::new() |
6057 | | }; |
6058 | | |
6059 | | let without_padding = |
6060 | | args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0); |
6061 | | assert!(size >= without_padding, "{}", assert_msg); |
6062 | | assert_eq!(align, args.align.get(), "{}", assert_msg); |
6063 | | |
6064 | | // This encodes the most important part of the test: our |
6065 | | // understanding of how Rust determines the layout of repr(C) |
6066 | | // types. Sized repr(C) types are trivial, but DST types have |
6067 | | // some subtlety. Note that: |
6068 | | // - For sized types, `without_padding` is just the size of the |
6069 | | // type that we constructed for `Foo`. Since we may have |
6070 | | // requested a larger alignment, `Foo` may actually be larger |
6071 | | // than this, hence `padding_needed_for`. |
6072 | | // - For unsized types, `without_padding` is dynamically |
6073 | | // computed from the offset, the element size, and element |
6074 | | // count. We expect that the size of the object should be |
6075 | | // `offset + elem_size * elems` rounded up to the next |
6076 | | // alignment. |
6077 | | let expected_size = without_padding |
6078 | | + util::core_layout::padding_needed_for(without_padding, args.align); |
6079 | | assert_eq!(expected_size, size, "{}", assert_msg); |
6080 | | |
6081 | | // For zero-sized element types, |
6082 | | // `validate_cast_and_convert_metadata` just panics, so we skip |
6083 | | // testing those types. |
6084 | | if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) { |
6085 | | let addr = ptr.addr().get(); |
6086 | | let (got_elems, got_split_at) = layout |
6087 | | .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix) |
6088 | | .unwrap(); |
6089 | | // Avoid expensive allocation when running under Miri. |
6090 | | let assert_msg = if !cfg!(miri) { |
6091 | | format!( |
6092 | | "{}\nvalidate_cast_and_convert_metadata({addr}, {size})", |
6093 | | assert_msg |
6094 | | ) |
6095 | | } else { |
6096 | | String::new() |
6097 | | }; |
6098 | | assert_eq!(got_split_at, size, "{}", assert_msg); |
6099 | | if dst { |
6100 | | assert!(got_elems >= elems, "{}", assert_msg); |
6101 | | if got_elems != elems { |
6102 | | // If `validate_cast_and_convert_metadata` |
6103 | | // returned more elements than `elems`, that |
6104 | | // means that `elems` is not the maximum number |
6105 | | // of elements that can fit in `size` - in other |
6106 | | // words, there is enough padding at the end of |
6107 | | // the value to fit at least one more element. |
6108 | | // If we use this metadata to synthesize a |
6109 | | // pointer, despite having a different element |
6110 | | // count, we still expect it to have the same |
6111 | | // size. |
6112 | | let got_ptr = with_elems(got_elems); |
6113 | | // SAFETY: `got_ptr` is a pointer to a valid `T`. |
6114 | | let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) }; |
6115 | | assert_eq!(size_of_got_ptr, size, "{}", assert_msg); |
6116 | | } |
6117 | | } else { |
6118 | | // For sized casts, the returned element value is |
6119 | | // technically meaningless, and we don't guarantee any |
6120 | | // particular value. In practice, it's always zero. |
6121 | | assert_eq!(got_elems, 0, "{}", assert_msg) |
6122 | | } |
6123 | | } |
6124 | | } |
6125 | | } |
6126 | | |
6127 | | macro_rules! validate_against_rust { |
6128 | | ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{ |
6129 | | #[repr(C, align($align))] |
6130 | | struct Foo([u8; $offset]$(, [[u8; $elem_size]])?); |
6131 | | |
6132 | | let args = MacroArgs { |
6133 | | offset: $offset, |
6134 | | align: $align.try_into().unwrap(), |
6135 | | elem_size: { |
6136 | | #[allow(unused)] |
6137 | | let ret = None::<usize>; |
6138 | | $(let ret = Some($elem_size);)? |
6139 | | ret |
6140 | | } |
6141 | | }; |
6142 | | |
6143 | | #[repr(C, align($align))] |
6144 | | struct FooAlign; |
6145 | | // Create an aligned buffer to use in order to synthesize |
6146 | | // pointers to `Foo`. We don't ever load values from these |
6147 | | // pointers - we just do arithmetic on them - so having a "real" |
6148 | | // block of memory as opposed to a validly-aligned-but-dangling |
6149 | | // pointer is only necessary to make Miri happy since we run it |
6150 | | // with "strict provenance" checking enabled. |
6151 | | let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]); |
6152 | | let with_elems = |elems| { |
6153 | | let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems); |
6154 | | #[allow(clippy::as_conversions)] |
6155 | | NonNull::new(slc.as_ptr() as *mut Foo).unwrap() |
6156 | | }; |
6157 | | let addr_of_slice_field = { |
6158 | | #[allow(unused)] |
6159 | | let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>; |
6160 | | $( |
6161 | | // SAFETY: `test` promises to only call `f` with a `ptr` |
6162 | | // to a valid `Foo`. |
6163 | | let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe { |
6164 | | NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>() |
6165 | | }); |
6166 | | let _ = $elem_size; |
6167 | | )? |
6168 | | f |
6169 | | }; |
6170 | | |
6171 | | test::<Foo, _>(args, with_elems, addr_of_slice_field); |
6172 | | }}; |
6173 | | } |
6174 | | |
6175 | | // Every permutation of: |
6176 | | // - offset in [0, 4] |
6177 | | // - align in [1, 16] |
6178 | | // - elem_size in [0, 4] (plus no elem_size) |
6179 | | validate_against_rust!(0, 1); |
6180 | | validate_against_rust!(0, 1, 0); |
6181 | | validate_against_rust!(0, 1, 1); |
6182 | | validate_against_rust!(0, 1, 2); |
6183 | | validate_against_rust!(0, 1, 3); |
6184 | | validate_against_rust!(0, 1, 4); |
6185 | | validate_against_rust!(0, 2); |
6186 | | validate_against_rust!(0, 2, 0); |
6187 | | validate_against_rust!(0, 2, 1); |
6188 | | validate_against_rust!(0, 2, 2); |
6189 | | validate_against_rust!(0, 2, 3); |
6190 | | validate_against_rust!(0, 2, 4); |
6191 | | validate_against_rust!(0, 4); |
6192 | | validate_against_rust!(0, 4, 0); |
6193 | | validate_against_rust!(0, 4, 1); |
6194 | | validate_against_rust!(0, 4, 2); |
6195 | | validate_against_rust!(0, 4, 3); |
6196 | | validate_against_rust!(0, 4, 4); |
6197 | | validate_against_rust!(0, 8); |
6198 | | validate_against_rust!(0, 8, 0); |
6199 | | validate_against_rust!(0, 8, 1); |
6200 | | validate_against_rust!(0, 8, 2); |
6201 | | validate_against_rust!(0, 8, 3); |
6202 | | validate_against_rust!(0, 8, 4); |
6203 | | validate_against_rust!(0, 16); |
6204 | | validate_against_rust!(0, 16, 0); |
6205 | | validate_against_rust!(0, 16, 1); |
6206 | | validate_against_rust!(0, 16, 2); |
6207 | | validate_against_rust!(0, 16, 3); |
6208 | | validate_against_rust!(0, 16, 4); |
6209 | | validate_against_rust!(1, 1); |
6210 | | validate_against_rust!(1, 1, 0); |
6211 | | validate_against_rust!(1, 1, 1); |
6212 | | validate_against_rust!(1, 1, 2); |
6213 | | validate_against_rust!(1, 1, 3); |
6214 | | validate_against_rust!(1, 1, 4); |
6215 | | validate_against_rust!(1, 2); |
6216 | | validate_against_rust!(1, 2, 0); |
6217 | | validate_against_rust!(1, 2, 1); |
6218 | | validate_against_rust!(1, 2, 2); |
6219 | | validate_against_rust!(1, 2, 3); |
6220 | | validate_against_rust!(1, 2, 4); |
6221 | | validate_against_rust!(1, 4); |
6222 | | validate_against_rust!(1, 4, 0); |
6223 | | validate_against_rust!(1, 4, 1); |
6224 | | validate_against_rust!(1, 4, 2); |
6225 | | validate_against_rust!(1, 4, 3); |
6226 | | validate_against_rust!(1, 4, 4); |
6227 | | validate_against_rust!(1, 8); |
6228 | | validate_against_rust!(1, 8, 0); |
6229 | | validate_against_rust!(1, 8, 1); |
6230 | | validate_against_rust!(1, 8, 2); |
6231 | | validate_against_rust!(1, 8, 3); |
6232 | | validate_against_rust!(1, 8, 4); |
6233 | | validate_against_rust!(1, 16); |
6234 | | validate_against_rust!(1, 16, 0); |
6235 | | validate_against_rust!(1, 16, 1); |
6236 | | validate_against_rust!(1, 16, 2); |
6237 | | validate_against_rust!(1, 16, 3); |
6238 | | validate_against_rust!(1, 16, 4); |
6239 | | validate_against_rust!(2, 1); |
6240 | | validate_against_rust!(2, 1, 0); |
6241 | | validate_against_rust!(2, 1, 1); |
6242 | | validate_against_rust!(2, 1, 2); |
6243 | | validate_against_rust!(2, 1, 3); |
6244 | | validate_against_rust!(2, 1, 4); |
6245 | | validate_against_rust!(2, 2); |
6246 | | validate_against_rust!(2, 2, 0); |
6247 | | validate_against_rust!(2, 2, 1); |
6248 | | validate_against_rust!(2, 2, 2); |
6249 | | validate_against_rust!(2, 2, 3); |
6250 | | validate_against_rust!(2, 2, 4); |
6251 | | validate_against_rust!(2, 4); |
6252 | | validate_against_rust!(2, 4, 0); |
6253 | | validate_against_rust!(2, 4, 1); |
6254 | | validate_against_rust!(2, 4, 2); |
6255 | | validate_against_rust!(2, 4, 3); |
6256 | | validate_against_rust!(2, 4, 4); |
6257 | | validate_against_rust!(2, 8); |
6258 | | validate_against_rust!(2, 8, 0); |
6259 | | validate_against_rust!(2, 8, 1); |
6260 | | validate_against_rust!(2, 8, 2); |
6261 | | validate_against_rust!(2, 8, 3); |
6262 | | validate_against_rust!(2, 8, 4); |
6263 | | validate_against_rust!(2, 16); |
6264 | | validate_against_rust!(2, 16, 0); |
6265 | | validate_against_rust!(2, 16, 1); |
6266 | | validate_against_rust!(2, 16, 2); |
6267 | | validate_against_rust!(2, 16, 3); |
6268 | | validate_against_rust!(2, 16, 4); |
6269 | | validate_against_rust!(3, 1); |
6270 | | validate_against_rust!(3, 1, 0); |
6271 | | validate_against_rust!(3, 1, 1); |
6272 | | validate_against_rust!(3, 1, 2); |
6273 | | validate_against_rust!(3, 1, 3); |
6274 | | validate_against_rust!(3, 1, 4); |
6275 | | validate_against_rust!(3, 2); |
6276 | | validate_against_rust!(3, 2, 0); |
6277 | | validate_against_rust!(3, 2, 1); |
6278 | | validate_against_rust!(3, 2, 2); |
6279 | | validate_against_rust!(3, 2, 3); |
6280 | | validate_against_rust!(3, 2, 4); |
6281 | | validate_against_rust!(3, 4); |
6282 | | validate_against_rust!(3, 4, 0); |
6283 | | validate_against_rust!(3, 4, 1); |
6284 | | validate_against_rust!(3, 4, 2); |
6285 | | validate_against_rust!(3, 4, 3); |
6286 | | validate_against_rust!(3, 4, 4); |
6287 | | validate_against_rust!(3, 8); |
6288 | | validate_against_rust!(3, 8, 0); |
6289 | | validate_against_rust!(3, 8, 1); |
6290 | | validate_against_rust!(3, 8, 2); |
6291 | | validate_against_rust!(3, 8, 3); |
6292 | | validate_against_rust!(3, 8, 4); |
6293 | | validate_against_rust!(3, 16); |
6294 | | validate_against_rust!(3, 16, 0); |
6295 | | validate_against_rust!(3, 16, 1); |
6296 | | validate_against_rust!(3, 16, 2); |
6297 | | validate_against_rust!(3, 16, 3); |
6298 | | validate_against_rust!(3, 16, 4); |
6299 | | validate_against_rust!(4, 1); |
6300 | | validate_against_rust!(4, 1, 0); |
6301 | | validate_against_rust!(4, 1, 1); |
6302 | | validate_against_rust!(4, 1, 2); |
6303 | | validate_against_rust!(4, 1, 3); |
6304 | | validate_against_rust!(4, 1, 4); |
6305 | | validate_against_rust!(4, 2); |
6306 | | validate_against_rust!(4, 2, 0); |
6307 | | validate_against_rust!(4, 2, 1); |
6308 | | validate_against_rust!(4, 2, 2); |
6309 | | validate_against_rust!(4, 2, 3); |
6310 | | validate_against_rust!(4, 2, 4); |
6311 | | validate_against_rust!(4, 4); |
6312 | | validate_against_rust!(4, 4, 0); |
6313 | | validate_against_rust!(4, 4, 1); |
6314 | | validate_against_rust!(4, 4, 2); |
6315 | | validate_against_rust!(4, 4, 3); |
6316 | | validate_against_rust!(4, 4, 4); |
6317 | | validate_against_rust!(4, 8); |
6318 | | validate_against_rust!(4, 8, 0); |
6319 | | validate_against_rust!(4, 8, 1); |
6320 | | validate_against_rust!(4, 8, 2); |
6321 | | validate_against_rust!(4, 8, 3); |
6322 | | validate_against_rust!(4, 8, 4); |
6323 | | validate_against_rust!(4, 16); |
6324 | | validate_against_rust!(4, 16, 0); |
6325 | | validate_against_rust!(4, 16, 1); |
6326 | | validate_against_rust!(4, 16, 2); |
6327 | | validate_against_rust!(4, 16, 3); |
6328 | | validate_against_rust!(4, 16, 4); |
6329 | | } |
6330 | | |
6331 | | #[test] |
6332 | | fn test_known_layout() { |
6333 | | // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout. |
6334 | | // Test that `PhantomData<$ty>` has the same layout as `()` regardless |
6335 | | // of `$ty`. |
6336 | | macro_rules! test { |
6337 | | ($ty:ty, $expect:expr) => { |
6338 | | let expect = $expect; |
6339 | | assert_eq!(<$ty as KnownLayout>::LAYOUT, expect); |
6340 | | assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect); |
6341 | | assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT); |
6342 | | }; |
6343 | | } |
6344 | | |
6345 | | let layout = |offset, align, _trailing_slice_elem_size| DstLayout { |
6346 | | align: NonZeroUsize::new(align).unwrap(), |
6347 | | size_info: match _trailing_slice_elem_size { |
6348 | | None => SizeInfo::Sized { _size: offset }, |
6349 | | Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { |
6350 | | _offset: offset, |
6351 | | _elem_size: elem_size, |
6352 | | }), |
6353 | | }, |
6354 | | }; |
6355 | | |
6356 | | test!((), layout(0, 1, None)); |
6357 | | test!(u8, layout(1, 1, None)); |
6358 | | // Use `align_of` because `u64` alignment may be smaller than 8 on some |
6359 | | // platforms. |
6360 | | test!(u64, layout(8, mem::align_of::<u64>(), None)); |
6361 | | test!(AU64, layout(8, 8, None)); |
6362 | | |
6363 | | test!(Option<&'static ()>, usize::LAYOUT); |
6364 | | |
6365 | | test!([()], layout(0, 1, Some(0))); |
6366 | | test!([u8], layout(0, 1, Some(1))); |
6367 | | test!(str, layout(0, 1, Some(1))); |
6368 | | } |
6369 | | |
6370 | | #[cfg(feature = "derive")] |
6371 | | #[test] |
6372 | | fn test_known_layout_derive() { |
6373 | | // In this and other files (`late_compile_pass.rs`, |
6374 | | // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure |
6375 | | // modes of `derive(KnownLayout)` for the following combination of |
6376 | | // properties: |
6377 | | // |
6378 | | // +------------+--------------------------------------+-----------+ |
6379 | | // | | trailing field properties | | |
6380 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6381 | | // |------------+----------+----------------+----------+-----------| |
6382 | | // | N | N | N | N | KL00 | |
6383 | | // | N | N | N | Y | KL01 | |
6384 | | // | N | N | Y | N | KL02 | |
6385 | | // | N | N | Y | Y | KL03 | |
6386 | | // | N | Y | N | N | KL04 | |
6387 | | // | N | Y | N | Y | KL05 | |
6388 | | // | N | Y | Y | N | KL06 | |
6389 | | // | N | Y | Y | Y | KL07 | |
6390 | | // | Y | N | N | N | KL08 | |
6391 | | // | Y | N | N | Y | KL09 | |
6392 | | // | Y | N | Y | N | KL10 | |
6393 | | // | Y | N | Y | Y | KL11 | |
6394 | | // | Y | Y | N | N | KL12 | |
6395 | | // | Y | Y | N | Y | KL13 | |
6396 | | // | Y | Y | Y | N | KL14 | |
6397 | | // | Y | Y | Y | Y | KL15 | |
6398 | | // +------------+----------+----------------+----------+-----------+ |
6399 | | |
6400 | | struct NotKnownLayout<T = ()> { |
6401 | | _t: T, |
6402 | | } |
6403 | | |
6404 | | #[derive(KnownLayout)] |
6405 | | #[repr(C)] |
6406 | | struct AlignSize<const ALIGN: usize, const SIZE: usize> |
6407 | | where |
6408 | | elain::Align<ALIGN>: elain::Alignment, |
6409 | | { |
6410 | | _align: elain::Align<ALIGN>, |
6411 | | _size: [u8; SIZE], |
6412 | | } |
6413 | | |
6414 | | type AU16 = AlignSize<2, 2>; |
6415 | | type AU32 = AlignSize<4, 4>; |
6416 | | |
6417 | | fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {} |
6418 | | |
6419 | | let sized_layout = |align, size| DstLayout { |
6420 | | align: NonZeroUsize::new(align).unwrap(), |
6421 | | size_info: SizeInfo::Sized { _size: size }, |
6422 | | }; |
6423 | | |
6424 | | let unsized_layout = |align, elem_size, offset| DstLayout { |
6425 | | align: NonZeroUsize::new(align).unwrap(), |
6426 | | size_info: SizeInfo::SliceDst(TrailingSliceLayout { |
6427 | | _offset: offset, |
6428 | | _elem_size: elem_size, |
6429 | | }), |
6430 | | }; |
6431 | | |
6432 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6433 | | // | N | N | N | Y | KL01 | |
6434 | | #[derive(KnownLayout)] |
6435 | | #[allow(dead_code)] // fields are never read |
6436 | | struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
6437 | | |
6438 | | let expected = DstLayout::for_type::<KL01>(); |
6439 | | |
6440 | | assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected); |
6441 | | assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8)); |
6442 | | |
6443 | | // ...with `align(N)`: |
6444 | | #[derive(KnownLayout)] |
6445 | | #[repr(align(64))] |
6446 | | #[allow(dead_code)] // fields are never read |
6447 | | struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
6448 | | |
6449 | | let expected = DstLayout::for_type::<KL01Align>(); |
6450 | | |
6451 | | assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected); |
6452 | | assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
6453 | | |
6454 | | // ...with `packed`: |
6455 | | #[derive(KnownLayout)] |
6456 | | #[repr(packed)] |
6457 | | #[allow(dead_code)] // fields are never read |
6458 | | struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
6459 | | |
6460 | | let expected = DstLayout::for_type::<KL01Packed>(); |
6461 | | |
6462 | | assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected); |
6463 | | assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6)); |
6464 | | |
6465 | | // ...with `packed(N)`: |
6466 | | #[derive(KnownLayout)] |
6467 | | #[repr(packed(2))] |
6468 | | #[allow(dead_code)] // fields are never read |
6469 | | struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
6470 | | |
6471 | | assert_impl_all!(KL01PackedN: KnownLayout); |
6472 | | |
6473 | | let expected = DstLayout::for_type::<KL01PackedN>(); |
6474 | | |
6475 | | assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected); |
6476 | | assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6)); |
6477 | | |
6478 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6479 | | // | N | N | Y | Y | KL03 | |
6480 | | #[derive(KnownLayout)] |
6481 | | #[allow(dead_code)] // fields are never read |
6482 | | struct KL03(NotKnownLayout, u8); |
6483 | | |
6484 | | let expected = DstLayout::for_type::<KL03>(); |
6485 | | |
6486 | | assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected); |
6487 | | assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
6488 | | |
6489 | | // ... with `align(N)` |
6490 | | #[derive(KnownLayout)] |
6491 | | #[repr(align(64))] |
6492 | | #[allow(dead_code)] // fields are never read |
6493 | | struct KL03Align(NotKnownLayout<AU32>, u8); |
6494 | | |
6495 | | let expected = DstLayout::for_type::<KL03Align>(); |
6496 | | |
6497 | | assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected); |
6498 | | assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
6499 | | |
6500 | | // ... with `packed`: |
6501 | | #[derive(KnownLayout)] |
6502 | | #[repr(packed)] |
6503 | | #[allow(dead_code)] // fields are never read |
6504 | | struct KL03Packed(NotKnownLayout<AU32>, u8); |
6505 | | |
6506 | | let expected = DstLayout::for_type::<KL03Packed>(); |
6507 | | |
6508 | | assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected); |
6509 | | assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5)); |
6510 | | |
6511 | | // ... with `packed(N)` |
6512 | | #[derive(KnownLayout)] |
6513 | | #[repr(packed(2))] |
6514 | | #[allow(dead_code)] // fields are never read |
6515 | | struct KL03PackedN(NotKnownLayout<AU32>, u8); |
6516 | | |
6517 | | assert_impl_all!(KL03PackedN: KnownLayout); |
6518 | | |
6519 | | let expected = DstLayout::for_type::<KL03PackedN>(); |
6520 | | |
6521 | | assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected); |
6522 | | assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6)); |
6523 | | |
6524 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6525 | | // | N | Y | N | Y | KL05 | |
6526 | | #[derive(KnownLayout)] |
6527 | | #[allow(dead_code)] // fields are never read |
6528 | | struct KL05<T>(u8, T); |
6529 | | |
6530 | | fn _test_kl05<T>(t: T) -> impl KnownLayout { |
6531 | | KL05(0u8, t) |
6532 | | } |
6533 | | |
6534 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6535 | | // | N | Y | Y | Y | KL07 | |
6536 | | #[derive(KnownLayout)] |
6537 | | #[allow(dead_code)] // fields are never read |
6538 | | struct KL07<T: KnownLayout>(u8, T); |
6539 | | |
6540 | | fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout { |
6541 | | let _ = KL07(0u8, t); |
6542 | | } |
6543 | | |
6544 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6545 | | // | Y | N | Y | N | KL10 | |
6546 | | #[derive(KnownLayout)] |
6547 | | #[repr(C)] |
6548 | | struct KL10(NotKnownLayout<AU32>, [u8]); |
6549 | | |
6550 | | let expected = DstLayout::new_zst(None) |
6551 | | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None) |
6552 | | .extend(<[u8] as KnownLayout>::LAYOUT, None) |
6553 | | .pad_to_align(); |
6554 | | |
6555 | | assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected); |
6556 | | assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4)); |
6557 | | |
6558 | | // ...with `align(N)`: |
6559 | | #[derive(KnownLayout)] |
6560 | | #[repr(C, align(64))] |
6561 | | struct KL10Align(NotKnownLayout<AU32>, [u8]); |
6562 | | |
6563 | | let repr_align = NonZeroUsize::new(64); |
6564 | | |
6565 | | let expected = DstLayout::new_zst(repr_align) |
6566 | | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None) |
6567 | | .extend(<[u8] as KnownLayout>::LAYOUT, None) |
6568 | | .pad_to_align(); |
6569 | | |
6570 | | assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected); |
6571 | | assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4)); |
6572 | | |
6573 | | // ...with `packed`: |
6574 | | #[derive(KnownLayout)] |
6575 | | #[repr(C, packed)] |
6576 | | struct KL10Packed(NotKnownLayout<AU32>, [u8]); |
6577 | | |
6578 | | let repr_packed = NonZeroUsize::new(1); |
6579 | | |
6580 | | let expected = DstLayout::new_zst(None) |
6581 | | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed) |
6582 | | .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) |
6583 | | .pad_to_align(); |
6584 | | |
6585 | | assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected); |
6586 | | assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4)); |
6587 | | |
6588 | | // ...with `packed(N)`: |
6589 | | #[derive(KnownLayout)] |
6590 | | #[repr(C, packed(2))] |
6591 | | struct KL10PackedN(NotKnownLayout<AU32>, [u8]); |
6592 | | |
6593 | | let repr_packed = NonZeroUsize::new(2); |
6594 | | |
6595 | | let expected = DstLayout::new_zst(None) |
6596 | | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed) |
6597 | | .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) |
6598 | | .pad_to_align(); |
6599 | | |
6600 | | assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected); |
6601 | | assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4)); |
6602 | | |
6603 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6604 | | // | Y | N | Y | Y | KL11 | |
6605 | | #[derive(KnownLayout)] |
6606 | | #[repr(C)] |
6607 | | struct KL11(NotKnownLayout<AU64>, u8); |
6608 | | |
6609 | | let expected = DstLayout::new_zst(None) |
6610 | | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None) |
6611 | | .extend(<u8 as KnownLayout>::LAYOUT, None) |
6612 | | .pad_to_align(); |
6613 | | |
6614 | | assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected); |
6615 | | assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16)); |
6616 | | |
6617 | | // ...with `align(N)`: |
6618 | | #[derive(KnownLayout)] |
6619 | | #[repr(C, align(64))] |
6620 | | struct KL11Align(NotKnownLayout<AU64>, u8); |
6621 | | |
6622 | | let repr_align = NonZeroUsize::new(64); |
6623 | | |
6624 | | let expected = DstLayout::new_zst(repr_align) |
6625 | | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None) |
6626 | | .extend(<u8 as KnownLayout>::LAYOUT, None) |
6627 | | .pad_to_align(); |
6628 | | |
6629 | | assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected); |
6630 | | assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
6631 | | |
6632 | | // ...with `packed`: |
6633 | | #[derive(KnownLayout)] |
6634 | | #[repr(C, packed)] |
6635 | | struct KL11Packed(NotKnownLayout<AU64>, u8); |
6636 | | |
6637 | | let repr_packed = NonZeroUsize::new(1); |
6638 | | |
6639 | | let expected = DstLayout::new_zst(None) |
6640 | | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed) |
6641 | | .extend(<u8 as KnownLayout>::LAYOUT, repr_packed) |
6642 | | .pad_to_align(); |
6643 | | |
6644 | | assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected); |
6645 | | assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9)); |
6646 | | |
6647 | | // ...with `packed(N)`: |
6648 | | #[derive(KnownLayout)] |
6649 | | #[repr(C, packed(2))] |
6650 | | struct KL11PackedN(NotKnownLayout<AU64>, u8); |
6651 | | |
6652 | | let repr_packed = NonZeroUsize::new(2); |
6653 | | |
6654 | | let expected = DstLayout::new_zst(None) |
6655 | | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed) |
6656 | | .extend(<u8 as KnownLayout>::LAYOUT, repr_packed) |
6657 | | .pad_to_align(); |
6658 | | |
6659 | | assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected); |
6660 | | assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10)); |
6661 | | |
6662 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6663 | | // | Y | Y | Y | N | KL14 | |
6664 | | #[derive(KnownLayout)] |
6665 | | #[repr(C)] |
6666 | | struct KL14<T: ?Sized + KnownLayout>(u8, T); |
6667 | | |
6668 | | fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) { |
6669 | | _assert_kl(kl) |
6670 | | } |
6671 | | |
6672 | | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
6673 | | // | Y | Y | Y | Y | KL15 | |
6674 | | #[derive(KnownLayout)] |
6675 | | #[repr(C)] |
6676 | | struct KL15<T: KnownLayout>(u8, T); |
6677 | | |
6678 | | fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout { |
6679 | | let _ = KL15(0u8, t); |
6680 | | } |
6681 | | |
6682 | | // Test a variety of combinations of field types: |
6683 | | // - () |
6684 | | // - u8 |
6685 | | // - AU16 |
6686 | | // - [()] |
6687 | | // - [u8] |
6688 | | // - [AU16] |
6689 | | |
6690 | | #[allow(clippy::upper_case_acronyms)] |
6691 | | #[derive(KnownLayout)] |
6692 | | #[repr(C)] |
6693 | | struct KLTU<T, U: ?Sized>(T, U); |
6694 | | |
6695 | | assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0)); |
6696 | | |
6697 | | assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
6698 | | |
6699 | | assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2)); |
6700 | | |
6701 | | assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0)); |
6702 | | |
6703 | | assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0)); |
6704 | | |
6705 | | assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0)); |
6706 | | |
6707 | | assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
6708 | | |
6709 | | assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2)); |
6710 | | |
6711 | | assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
6712 | | |
6713 | | assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1)); |
6714 | | |
6715 | | assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1)); |
6716 | | |
6717 | | assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); |
6718 | | |
6719 | | assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2)); |
6720 | | |
6721 | | assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
6722 | | |
6723 | | assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
6724 | | |
6725 | | assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2)); |
6726 | | |
6727 | | assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2)); |
6728 | | |
6729 | | assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); |
6730 | | |
6731 | | // Test a variety of field counts. |
6732 | | |
6733 | | #[derive(KnownLayout)] |
6734 | | #[repr(C)] |
6735 | | struct KLF0; |
6736 | | |
6737 | | assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0)); |
6738 | | |
6739 | | #[derive(KnownLayout)] |
6740 | | #[repr(C)] |
6741 | | struct KLF1([u8]); |
6742 | | |
6743 | | assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0)); |
6744 | | |
6745 | | #[derive(KnownLayout)] |
6746 | | #[repr(C)] |
6747 | | struct KLF2(NotKnownLayout<u8>, [u8]); |
6748 | | |
6749 | | assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1)); |
6750 | | |
6751 | | #[derive(KnownLayout)] |
6752 | | #[repr(C)] |
6753 | | struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]); |
6754 | | |
6755 | | assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4)); |
6756 | | |
6757 | | #[derive(KnownLayout)] |
6758 | | #[repr(C)] |
6759 | | struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]); |
6760 | | |
6761 | | assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8)); |
6762 | | } |
6763 | | |
6764 | | #[test] |
6765 | | fn test_object_safety() { |
6766 | | fn _takes_from_zeroes(_: &dyn FromZeroes) {} |
6767 | | fn _takes_from_bytes(_: &dyn FromBytes) {} |
6768 | | fn _takes_unaligned(_: &dyn Unaligned) {} |
6769 | | } |
6770 | | |
6771 | | #[test] |
6772 | | fn test_from_zeroes_only() { |
6773 | | // Test types that implement `FromZeroes` but not `FromBytes`. |
6774 | | |
6775 | | assert!(!bool::new_zeroed()); |
6776 | | assert_eq!(char::new_zeroed(), '\0'); |
6777 | | |
6778 | | #[cfg(feature = "alloc")] |
6779 | | { |
6780 | | assert_eq!(bool::new_box_zeroed(), Box::new(false)); |
6781 | | assert_eq!(char::new_box_zeroed(), Box::new('\0')); |
6782 | | |
6783 | | assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]); |
6784 | | assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']); |
6785 | | |
6786 | | assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]); |
6787 | | assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']); |
6788 | | } |
6789 | | |
6790 | | let mut string = "hello".to_string(); |
6791 | | let s: &mut str = string.as_mut(); |
6792 | | assert_eq!(s, "hello"); |
6793 | | s.zero(); |
6794 | | assert_eq!(s, "\0\0\0\0\0"); |
6795 | | } |
6796 | | |
6797 | | #[test] |
6798 | | fn test_read_write() { |
6799 | | const VAL: u64 = 0x12345678; |
6800 | | #[cfg(target_endian = "big")] |
6801 | | const VAL_BYTES: [u8; 8] = VAL.to_be_bytes(); |
6802 | | #[cfg(target_endian = "little")] |
6803 | | const VAL_BYTES: [u8; 8] = VAL.to_le_bytes(); |
6804 | | |
6805 | | // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`. |
6806 | | |
6807 | | assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL)); |
6808 | | // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all |
6809 | | // zeroes. |
6810 | | let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); |
6811 | | assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL)); |
6812 | | assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0)); |
6813 | | // The first 8 bytes are all zeroes and the second 8 bytes are from |
6814 | | // `VAL_BYTES` |
6815 | | let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); |
6816 | | assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0)); |
6817 | | assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL)); |
6818 | | |
6819 | | // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`. |
6820 | | |
6821 | | let mut bytes = [0u8; 8]; |
6822 | | assert_eq!(VAL.write_to(&mut bytes[..]), Some(())); |
6823 | | assert_eq!(bytes, VAL_BYTES); |
6824 | | let mut bytes = [0u8; 16]; |
6825 | | assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(())); |
6826 | | let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); |
6827 | | assert_eq!(bytes, want); |
6828 | | let mut bytes = [0u8; 16]; |
6829 | | assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(())); |
6830 | | let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); |
6831 | | assert_eq!(bytes, want); |
6832 | | } |
6833 | | |
6834 | | #[test] |
6835 | | fn test_transmute() { |
6836 | | // Test that memory is transmuted as expected. |
6837 | | let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; |
6838 | | let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; |
6839 | | let x: [[u8; 2]; 4] = transmute!(array_of_u8s); |
6840 | | assert_eq!(x, array_of_arrays); |
6841 | | let x: [u8; 8] = transmute!(array_of_arrays); |
6842 | | assert_eq!(x, array_of_u8s); |
6843 | | |
6844 | | // Test that the source expression's value is forgotten rather than |
6845 | | // dropped. |
6846 | | #[derive(AsBytes)] |
6847 | | #[repr(transparent)] |
6848 | | struct PanicOnDrop(()); |
6849 | | impl Drop for PanicOnDrop { |
6850 | | fn drop(&mut self) { |
6851 | | panic!("PanicOnDrop::drop"); |
6852 | | } |
6853 | | } |
6854 | | #[allow(clippy::let_unit_value)] |
6855 | | let _: () = transmute!(PanicOnDrop(())); |
6856 | | |
6857 | | // Test that `transmute!` is legal in a const context. |
6858 | | const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; |
6859 | | const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; |
6860 | | const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S); |
6861 | | assert_eq!(X, ARRAY_OF_ARRAYS); |
6862 | | } |
6863 | | |
6864 | | #[test] |
6865 | | fn test_transmute_ref() { |
6866 | | // Test that memory is transmuted as expected. |
6867 | | let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; |
6868 | | let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; |
6869 | | let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s); |
6870 | | assert_eq!(*x, array_of_arrays); |
6871 | | let x: &[u8; 8] = transmute_ref!(&array_of_arrays); |
6872 | | assert_eq!(*x, array_of_u8s); |
6873 | | |
6874 | | // Test that `transmute_ref!` is legal in a const context. |
6875 | | const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; |
6876 | | const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; |
6877 | | #[allow(clippy::redundant_static_lifetimes)] |
6878 | | const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S); |
6879 | | assert_eq!(*X, ARRAY_OF_ARRAYS); |
6880 | | |
6881 | | // Test that it's legal to transmute a reference while shrinking the |
6882 | | // lifetime (note that `X` has the lifetime `'static`). |
6883 | | let x: &[u8; 8] = transmute_ref!(X); |
6884 | | assert_eq!(*x, ARRAY_OF_U8S); |
6885 | | |
6886 | | // Test that `transmute_ref!` supports decreasing alignment. |
6887 | | let u = AU64(0); |
6888 | | let array = [0, 0, 0, 0, 0, 0, 0, 0]; |
6889 | | let x: &[u8; 8] = transmute_ref!(&u); |
6890 | | assert_eq!(*x, array); |
6891 | | |
6892 | | // Test that a mutable reference can be turned into an immutable one. |
6893 | | let mut x = 0u8; |
6894 | | #[allow(clippy::useless_transmute)] |
6895 | | let y: &u8 = transmute_ref!(&mut x); |
6896 | | assert_eq!(*y, 0); |
6897 | | } |
6898 | | |
6899 | | #[test] |
6900 | | fn test_transmute_mut() { |
6901 | | // Test that memory is transmuted as expected. |
6902 | | let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; |
6903 | | let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; |
6904 | | let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s); |
6905 | | assert_eq!(*x, array_of_arrays); |
6906 | | let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); |
6907 | | assert_eq!(*x, array_of_u8s); |
6908 | | |
6909 | | { |
6910 | | // Test that it's legal to transmute a reference while shrinking the |
6911 | | // lifetime. |
6912 | | let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); |
6913 | | assert_eq!(*x, array_of_u8s); |
6914 | | } |
6915 | | // Test that `transmute_mut!` supports decreasing alignment. |
6916 | | let mut u = AU64(0); |
6917 | | let array = [0, 0, 0, 0, 0, 0, 0, 0]; |
6918 | | let x: &[u8; 8] = transmute_mut!(&mut u); |
6919 | | assert_eq!(*x, array); |
6920 | | |
6921 | | // Test that a mutable reference can be turned into an immutable one. |
6922 | | let mut x = 0u8; |
6923 | | #[allow(clippy::useless_transmute)] |
6924 | | let y: &u8 = transmute_mut!(&mut x); |
6925 | | assert_eq!(*y, 0); |
6926 | | } |
6927 | | |
6928 | | #[test] |
6929 | | fn test_macros_evaluate_args_once() { |
6930 | | let mut ctr = 0; |
6931 | | let _: usize = transmute!({ |
6932 | | ctr += 1; |
6933 | | 0usize |
6934 | | }); |
6935 | | assert_eq!(ctr, 1); |
6936 | | |
6937 | | let mut ctr = 0; |
6938 | | let _: &usize = transmute_ref!({ |
6939 | | ctr += 1; |
6940 | | &0usize |
6941 | | }); |
6942 | | assert_eq!(ctr, 1); |
6943 | | } |
6944 | | |
6945 | | #[test] |
6946 | | fn test_include_value() { |
6947 | | const AS_U32: u32 = include_value!("../testdata/include_value/data"); |
6948 | | assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); |
6949 | | const AS_I32: i32 = include_value!("../testdata/include_value/data"); |
6950 | | assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); |
6951 | | } |
6952 | | |
6953 | | #[test] |
6954 | | fn test_address() { |
6955 | | // Test that the `Deref` and `DerefMut` implementations return a |
6956 | | // reference which points to the right region of memory. |
6957 | | |
6958 | | let buf = [0]; |
6959 | | let r = Ref::<_, u8>::new(&buf[..]).unwrap(); |
6960 | | let buf_ptr = buf.as_ptr(); |
6961 | | let deref_ptr: *const u8 = r.deref(); |
6962 | | assert_eq!(buf_ptr, deref_ptr); |
6963 | | |
6964 | | let buf = [0]; |
6965 | | let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap(); |
6966 | | let buf_ptr = buf.as_ptr(); |
6967 | | let deref_ptr = r.deref().as_ptr(); |
6968 | | assert_eq!(buf_ptr, deref_ptr); |
6969 | | } |
6970 | | |
6971 | | // Verify that values written to a `Ref` are properly shared between the |
6972 | | // typed and untyped representations, that reads via `deref` and `read` |
6973 | | // behave the same, and that writes via `deref_mut` and `write` behave the |
6974 | | // same. |
6975 | | fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { |
6976 | | // assert that the value starts at 0 |
6977 | | assert_eq!(*r, AU64(0)); |
6978 | | assert_eq!(r.read(), AU64(0)); |
6979 | | |
6980 | | // Assert that values written to the typed value are reflected in the |
6981 | | // byte slice. |
6982 | | const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); |
6983 | | *r = VAL1; |
6984 | | assert_eq!(r.bytes(), &VAL1.to_bytes()); |
6985 | | *r = AU64(0); |
6986 | | r.write(VAL1); |
6987 | | assert_eq!(r.bytes(), &VAL1.to_bytes()); |
6988 | | |
6989 | | // Assert that values written to the byte slice are reflected in the |
6990 | | // typed value. |
6991 | | const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` |
6992 | | r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]); |
6993 | | assert_eq!(*r, VAL2); |
6994 | | assert_eq!(r.read(), VAL2); |
6995 | | } |
6996 | | |
6997 | | // Verify that values written to a `Ref` are properly shared between the |
6998 | | // typed and untyped representations; pass a value with `typed_len` `AU64`s |
6999 | | // backed by an array of `typed_len * 8` bytes. |
7000 | | fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { |
7001 | | // Assert that the value starts out zeroed. |
7002 | | assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); |
7003 | | |
7004 | | // Check the backing storage is the exact same slice. |
7005 | | let untyped_len = typed_len * 8; |
7006 | | assert_eq!(r.bytes().len(), untyped_len); |
7007 | | assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>()); |
7008 | | |
7009 | | // Assert that values written to the typed value are reflected in the |
7010 | | // byte slice. |
7011 | | const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); |
7012 | | for typed in &mut *r { |
7013 | | *typed = VAL1; |
7014 | | } |
7015 | | assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); |
7016 | | |
7017 | | // Assert that values written to the byte slice are reflected in the |
7018 | | // typed value. |
7019 | | const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 |
7020 | | r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); |
7021 | | assert!(r.iter().copied().all(|x| x == VAL2)); |
7022 | | } |
7023 | | |
7024 | | // Verify that values written to a `Ref` are properly shared between the |
7025 | | // typed and untyped representations, that reads via `deref` and `read` |
7026 | | // behave the same, and that writes via `deref_mut` and `write` behave the |
7027 | | // same. |
7028 | | fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) { |
7029 | | // assert that the value starts at 0 |
7030 | | assert_eq!(*r, [0; 8]); |
7031 | | assert_eq!(r.read(), [0; 8]); |
7032 | | |
7033 | | // Assert that values written to the typed value are reflected in the |
7034 | | // byte slice. |
7035 | | const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00]; |
7036 | | *r = VAL1; |
7037 | | assert_eq!(r.bytes(), &VAL1); |
7038 | | *r = [0; 8]; |
7039 | | r.write(VAL1); |
7040 | | assert_eq!(r.bytes(), &VAL1); |
7041 | | |
7042 | | // Assert that values written to the byte slice are reflected in the |
7043 | | // typed value. |
7044 | | const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1 |
7045 | | r.bytes_mut().copy_from_slice(&VAL2[..]); |
7046 | | assert_eq!(*r, VAL2); |
7047 | | assert_eq!(r.read(), VAL2); |
7048 | | } |
7049 | | |
7050 | | // Verify that values written to a `Ref` are properly shared between the |
7051 | | // typed and untyped representations; pass a value with `len` `u8`s backed |
7052 | | // by an array of `len` bytes. |
7053 | | fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) { |
7054 | | // Assert that the value starts out zeroed. |
7055 | | assert_eq!(&*r, vec![0u8; len].as_slice()); |
7056 | | |
7057 | | // Check the backing storage is the exact same slice. |
7058 | | assert_eq!(r.bytes().len(), len); |
7059 | | assert_eq!(r.bytes().as_ptr(), r.as_ptr()); |
7060 | | |
7061 | | // Assert that values written to the typed value are reflected in the |
7062 | | // byte slice. |
7063 | | let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>(); |
7064 | | r.copy_from_slice(&expected_bytes); |
7065 | | assert_eq!(r.bytes(), expected_bytes.as_slice()); |
7066 | | |
7067 | | // Assert that values written to the byte slice are reflected in the |
7068 | | // typed value. |
7069 | | for byte in &mut expected_bytes { |
7070 | | *byte = !*byte; // different from `expected_len` |
7071 | | } |
7072 | | r.bytes_mut().copy_from_slice(&expected_bytes); |
7073 | | assert_eq!(&*r, expected_bytes.as_slice()); |
7074 | | } |
7075 | | |
7076 | | #[test] |
7077 | | fn test_new_aligned_sized() { |
7078 | | // Test that a properly-aligned, properly-sized buffer works for new, |
7079 | | // new_from_prefix, and new_from_suffix, and that new_from_prefix and |
7080 | | // new_from_suffix return empty slices. Test that a properly-aligned |
7081 | | // buffer whose length is a multiple of the element size works for |
7082 | | // new_slice. Test that xxx_zeroed behaves the same, and zeroes the |
7083 | | // memory. |
7084 | | |
7085 | | // A buffer with an alignment of 8. |
7086 | | let mut buf = Align::<[u8; 8], AU64>::default(); |
7087 | | // `buf.t` should be aligned to 8, so this should always succeed. |
7088 | | test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap()); |
7089 | | let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap(); |
7090 | | buf.t = ascending; |
7091 | | test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap()); |
7092 | | { |
7093 | | // In a block so that `r` and `suffix` don't live too long. |
7094 | | buf.set_default(); |
7095 | | let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap(); |
7096 | | assert!(suffix.is_empty()); |
7097 | | test_new_helper(r); |
7098 | | } |
7099 | | { |
7100 | | buf.t = ascending; |
7101 | | let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap(); |
7102 | | assert!(suffix.is_empty()); |
7103 | | test_new_helper(r); |
7104 | | } |
7105 | | { |
7106 | | buf.set_default(); |
7107 | | let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap(); |
7108 | | assert!(prefix.is_empty()); |
7109 | | test_new_helper(r); |
7110 | | } |
7111 | | { |
7112 | | buf.t = ascending; |
7113 | | let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap(); |
7114 | | assert!(prefix.is_empty()); |
7115 | | test_new_helper(r); |
7116 | | } |
7117 | | |
7118 | | // A buffer with alignment 8 and length 24. We choose this length very |
7119 | | // intentionally: if we instead used length 16, then the prefix and |
7120 | | // suffix lengths would be identical. In the past, we used length 16, |
7121 | | // which resulted in this test failing to discover the bug uncovered in |
7122 | | // #506. |
7123 | | let mut buf = Align::<[u8; 24], AU64>::default(); |
7124 | | // `buf.t` should be aligned to 8 and have a length which is a multiple |
7125 | | // of `size_of::<AU64>()`, so this should always succeed. |
7126 | | test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3); |
7127 | | let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap(); |
7128 | | // 16 ascending bytes followed by 8 zeros. |
7129 | | let mut ascending_prefix = ascending; |
7130 | | ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); |
7131 | | // 8 zeros followed by 16 ascending bytes. |
7132 | | let mut ascending_suffix = ascending; |
7133 | | ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); |
7134 | | test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3); |
7135 | | |
7136 | | { |
7137 | | buf.t = ascending_suffix; |
7138 | | let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap(); |
7139 | | assert_eq!(suffix, &ascending[8..]); |
7140 | | test_new_helper_slice(r, 1); |
7141 | | } |
7142 | | { |
7143 | | buf.t = ascending_suffix; |
7144 | | let (r, suffix) = |
7145 | | Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap(); |
7146 | | assert_eq!(suffix, &ascending[8..]); |
7147 | | test_new_helper_slice(r, 1); |
7148 | | } |
7149 | | { |
7150 | | buf.t = ascending_prefix; |
7151 | | let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap(); |
7152 | | assert_eq!(prefix, &ascending[..16]); |
7153 | | test_new_helper_slice(r, 1); |
7154 | | } |
7155 | | { |
7156 | | buf.t = ascending_prefix; |
7157 | | let (prefix, r) = |
7158 | | Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap(); |
7159 | | assert_eq!(prefix, &ascending[..16]); |
7160 | | test_new_helper_slice(r, 1); |
7161 | | } |
7162 | | } |
7163 | | |
7164 | | #[test] |
7165 | | fn test_new_unaligned_sized() { |
7166 | | // Test that an unaligned, properly-sized buffer works for |
7167 | | // `new_unaligned`, `new_unaligned_from_prefix`, and |
7168 | | // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix` |
7169 | | // `new_unaligned_from_suffix` return empty slices. Test that an |
7170 | | // unaligned buffer whose length is a multiple of the element size works |
7171 | | // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes |
7172 | | // the memory. |
7173 | | |
7174 | | let mut buf = [0u8; 8]; |
7175 | | test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap()); |
7176 | | buf = [0xFFu8; 8]; |
7177 | | test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap()); |
7178 | | { |
7179 | | // In a block so that `r` and `suffix` don't live too long. |
7180 | | buf = [0u8; 8]; |
7181 | | let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap(); |
7182 | | assert!(suffix.is_empty()); |
7183 | | test_new_helper_unaligned(r); |
7184 | | } |
7185 | | { |
7186 | | buf = [0xFFu8; 8]; |
7187 | | let (r, suffix) = |
7188 | | Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap(); |
7189 | | assert!(suffix.is_empty()); |
7190 | | test_new_helper_unaligned(r); |
7191 | | } |
7192 | | { |
7193 | | buf = [0u8; 8]; |
7194 | | let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap(); |
7195 | | assert!(prefix.is_empty()); |
7196 | | test_new_helper_unaligned(r); |
7197 | | } |
7198 | | { |
7199 | | buf = [0xFFu8; 8]; |
7200 | | let (prefix, r) = |
7201 | | Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap(); |
7202 | | assert!(prefix.is_empty()); |
7203 | | test_new_helper_unaligned(r); |
7204 | | } |
7205 | | |
7206 | | let mut buf = [0u8; 16]; |
7207 | | // `buf.t` should be aligned to 8 and have a length which is a multiple |
7208 | | // of `size_of::AU64>()`, so this should always succeed. |
7209 | | test_new_helper_slice_unaligned( |
7210 | | Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(), |
7211 | | 16, |
7212 | | ); |
7213 | | buf = [0xFFu8; 16]; |
7214 | | test_new_helper_slice_unaligned( |
7215 | | Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(), |
7216 | | 16, |
7217 | | ); |
7218 | | |
7219 | | { |
7220 | | buf = [0u8; 16]; |
7221 | | let (r, suffix) = |
7222 | | Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap(); |
7223 | | assert_eq!(suffix, [0; 8]); |
7224 | | test_new_helper_slice_unaligned(r, 8); |
7225 | | } |
7226 | | { |
7227 | | buf = [0xFFu8; 16]; |
7228 | | let (r, suffix) = |
7229 | | Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap(); |
7230 | | assert_eq!(suffix, [0xFF; 8]); |
7231 | | test_new_helper_slice_unaligned(r, 8); |
7232 | | } |
7233 | | { |
7234 | | buf = [0u8; 16]; |
7235 | | let (prefix, r) = |
7236 | | Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap(); |
7237 | | assert_eq!(prefix, [0; 8]); |
7238 | | test_new_helper_slice_unaligned(r, 8); |
7239 | | } |
7240 | | { |
7241 | | buf = [0xFFu8; 16]; |
7242 | | let (prefix, r) = |
7243 | | Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap(); |
7244 | | assert_eq!(prefix, [0xFF; 8]); |
7245 | | test_new_helper_slice_unaligned(r, 8); |
7246 | | } |
7247 | | } |
7248 | | |
7249 | | #[test] |
7250 | | fn test_new_oversized() { |
7251 | | // Test that a properly-aligned, overly-sized buffer works for |
7252 | | // `new_from_prefix` and `new_from_suffix`, and that they return the |
7253 | | // remainder and prefix of the slice respectively. Test that |
7254 | | // `xxx_zeroed` behaves the same, and zeroes the memory. |
7255 | | |
7256 | | let mut buf = Align::<[u8; 16], AU64>::default(); |
7257 | | { |
7258 | | // In a block so that `r` and `suffix` don't live too long. `buf.t` |
7259 | | // should be aligned to 8, so this should always succeed. |
7260 | | let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap(); |
7261 | | assert_eq!(suffix.len(), 8); |
7262 | | test_new_helper(r); |
7263 | | } |
7264 | | { |
7265 | | buf.t = [0xFFu8; 16]; |
7266 | | // `buf.t` should be aligned to 8, so this should always succeed. |
7267 | | let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap(); |
7268 | | // Assert that the suffix wasn't zeroed. |
7269 | | assert_eq!(suffix, &[0xFFu8; 8]); |
7270 | | test_new_helper(r); |
7271 | | } |
7272 | | { |
7273 | | buf.set_default(); |
7274 | | // `buf.t` should be aligned to 8, so this should always succeed. |
7275 | | let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap(); |
7276 | | assert_eq!(prefix.len(), 8); |
7277 | | test_new_helper(r); |
7278 | | } |
7279 | | { |
7280 | | buf.t = [0xFFu8; 16]; |
7281 | | // `buf.t` should be aligned to 8, so this should always succeed. |
7282 | | let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap(); |
7283 | | // Assert that the prefix wasn't zeroed. |
7284 | | assert_eq!(prefix, &[0xFFu8; 8]); |
7285 | | test_new_helper(r); |
7286 | | } |
7287 | | } |
7288 | | |
7289 | | #[test] |
7290 | | fn test_new_unaligned_oversized() { |
7291 | | // Test than an unaligned, overly-sized buffer works for |
7292 | | // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that |
7293 | | // they return the remainder and prefix of the slice respectively. Test |
7294 | | // that `xxx_zeroed` behaves the same, and zeroes the memory. |
7295 | | |
7296 | | let mut buf = [0u8; 16]; |
7297 | | { |
7298 | | // In a block so that `r` and `suffix` don't live too long. |
7299 | | let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap(); |
7300 | | assert_eq!(suffix.len(), 8); |
7301 | | test_new_helper_unaligned(r); |
7302 | | } |
7303 | | { |
7304 | | buf = [0xFFu8; 16]; |
7305 | | let (r, suffix) = |
7306 | | Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap(); |
7307 | | // Assert that the suffix wasn't zeroed. |
7308 | | assert_eq!(suffix, &[0xFF; 8]); |
7309 | | test_new_helper_unaligned(r); |
7310 | | } |
7311 | | { |
7312 | | buf = [0u8; 16]; |
7313 | | let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap(); |
7314 | | assert_eq!(prefix.len(), 8); |
7315 | | test_new_helper_unaligned(r); |
7316 | | } |
7317 | | { |
7318 | | buf = [0xFFu8; 16]; |
7319 | | let (prefix, r) = |
7320 | | Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap(); |
7321 | | // Assert that the prefix wasn't zeroed. |
7322 | | assert_eq!(prefix, &[0xFF; 8]); |
7323 | | test_new_helper_unaligned(r); |
7324 | | } |
7325 | | } |
7326 | | |
7327 | | #[test] |
7328 | | fn test_ref_from_mut_from() { |
7329 | | // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases |
7330 | | // Exhaustive coverage for these methods is covered by the `Ref` tests above, |
7331 | | // which these helper methods defer to. |
7332 | | |
7333 | | let mut buf = |
7334 | | Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); |
7335 | | |
7336 | | assert_eq!( |
7337 | | AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(), |
7338 | | [8, 9, 10, 11, 12, 13, 14, 15] |
7339 | | ); |
7340 | | let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap(); |
7341 | | suffix.0 = 0x0101010101010101; |
7342 | | // The `[u8:9]` is a non-half size of the full buffer, which would catch |
7343 | | // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). |
7344 | | assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]); |
7345 | | let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); |
7346 | | suffix.0 = 0x0202020202020202; |
7347 | | <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42; |
7348 | | assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]); |
7349 | | <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30; |
7350 | | assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); |
7351 | | } |
7352 | | |
7353 | | #[test] |
7354 | | fn test_ref_from_mut_from_error() { |
7355 | | // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases. |
7356 | | |
7357 | | // Fail because the buffer is too large. |
7358 | | let mut buf = Align::<[u8; 16], AU64>::default(); |
7359 | | // `buf.t` should be aligned to 8, so only the length check should fail. |
7360 | | assert!(AU64::ref_from(&buf.t[..]).is_none()); |
7361 | | assert!(AU64::mut_from(&mut buf.t[..]).is_none()); |
7362 | | assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none()); |
7363 | | assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none()); |
7364 | | |
7365 | | // Fail because the buffer is too small. |
7366 | | let mut buf = Align::<[u8; 4], AU64>::default(); |
7367 | | assert!(AU64::ref_from(&buf.t[..]).is_none()); |
7368 | | assert!(AU64::mut_from(&mut buf.t[..]).is_none()); |
7369 | | assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none()); |
7370 | | assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none()); |
7371 | | assert!(AU64::ref_from_prefix(&buf.t[..]).is_none()); |
7372 | | assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none()); |
7373 | | assert!(AU64::ref_from_suffix(&buf.t[..]).is_none()); |
7374 | | assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none()); |
7375 | | assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none()); |
7376 | | assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none()); |
7377 | | assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none()); |
7378 | | assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none()); |
7379 | | |
7380 | | // Fail because the alignment is insufficient. |
7381 | | let mut buf = Align::<[u8; 13], AU64>::default(); |
7382 | | assert!(AU64::ref_from(&buf.t[1..]).is_none()); |
7383 | | assert!(AU64::mut_from(&mut buf.t[1..]).is_none()); |
7384 | | assert!(AU64::ref_from(&buf.t[1..]).is_none()); |
7385 | | assert!(AU64::mut_from(&mut buf.t[1..]).is_none()); |
7386 | | assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none()); |
7387 | | assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none()); |
7388 | | assert!(AU64::ref_from_suffix(&buf.t[..]).is_none()); |
7389 | | assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none()); |
7390 | | } |
7391 | | |
7392 | | #[test] |
7393 | | #[allow(clippy::cognitive_complexity)] |
7394 | | fn test_new_error() { |
7395 | | // Fail because the buffer is too large. |
7396 | | |
7397 | | // A buffer with an alignment of 8. |
7398 | | let mut buf = Align::<[u8; 16], AU64>::default(); |
7399 | | // `buf.t` should be aligned to 8, so only the length check should fail. |
7400 | | assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none()); |
7401 | | assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none()); |
7402 | | assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none()); |
7403 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none()); |
7404 | | |
7405 | | // Fail because the buffer is too small. |
7406 | | |
7407 | | // A buffer with an alignment of 8. |
7408 | | let mut buf = Align::<[u8; 4], AU64>::default(); |
7409 | | // `buf.t` should be aligned to 8, so only the length check should fail. |
7410 | | assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none()); |
7411 | | assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none()); |
7412 | | assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none()); |
7413 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none()); |
7414 | | assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none()); |
7415 | | assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none()); |
7416 | | assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none()); |
7417 | | assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none()); |
7418 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none()); |
7419 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none()); |
7420 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none()); |
7421 | | assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none()); |
7422 | | |
7423 | | // Fail because the length is not a multiple of the element size. |
7424 | | |
7425 | | let mut buf = Align::<[u8; 12], AU64>::default(); |
7426 | | // `buf.t` has length 12, but element size is 8. |
7427 | | assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none()); |
7428 | | assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none()); |
7429 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none()); |
7430 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none()); |
7431 | | |
7432 | | // Fail because the buffer is too short. |
7433 | | let mut buf = Align::<[u8; 12], AU64>::default(); |
7434 | | // `buf.t` has length 12, but the element size is 8 (and we're expecting |
7435 | | // two of them). |
7436 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none()); |
7437 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none()); |
7438 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none()); |
7439 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none()); |
7440 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none()); |
7441 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2) |
7442 | | .is_none()); |
7443 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none()); |
7444 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2) |
7445 | | .is_none()); |
7446 | | |
7447 | | // Fail because the alignment is insufficient. |
7448 | | |
7449 | | // A buffer with an alignment of 8. An odd buffer size is chosen so that |
7450 | | // the last byte of the buffer has odd alignment. |
7451 | | let mut buf = Align::<[u8; 13], AU64>::default(); |
7452 | | // Slicing from 1, we get a buffer with size 12 (so the length check |
7453 | | // should succeed) but an alignment of only 1, which is insufficient. |
7454 | | assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none()); |
7455 | | assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none()); |
7456 | | assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none()); |
7457 | | assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none()); |
7458 | | assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none()); |
7459 | | assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none()); |
7460 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none()); |
7461 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none()); |
7462 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none()); |
7463 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none()); |
7464 | | // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use |
7465 | | // the suffix of the slice, which has odd alignment. |
7466 | | assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none()); |
7467 | | assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none()); |
7468 | | |
7469 | | // Fail due to arithmetic overflow. |
7470 | | |
7471 | | let mut buf = Align::<[u8; 16], AU64>::default(); |
7472 | | let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1; |
7473 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none()); |
7474 | | assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len) |
7475 | | .is_none()); |
7476 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none()); |
7477 | | assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len) |
7478 | | .is_none()); |
7479 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len) |
7480 | | .is_none()); |
7481 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed( |
7482 | | &mut buf.t[..], |
7483 | | unreasonable_len |
7484 | | ) |
7485 | | .is_none()); |
7486 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len) |
7487 | | .is_none()); |
7488 | | assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed( |
7489 | | &mut buf.t[..], |
7490 | | unreasonable_len |
7491 | | ) |
7492 | | .is_none()); |
7493 | | } |
7494 | | |
7495 | | // Tests for ensuring that, if a ZST is passed into a slice-like function, |
7496 | | // we always panic. Since these tests need to be separate per-function, and |
7497 | | // they tend to take up a lot of space, we generate them using a macro in a |
7498 | | // submodule instead. The submodule ensures that we can just re-use the name |
7499 | | // of the function under test for the name of the test itself. |
7500 | | mod test_zst_panics { |
7501 | | macro_rules! zst_test { |
7502 | | ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => { |
7503 | | #[test] |
7504 | | #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")] |
7505 | | fn $name() { |
7506 | | let mut buffer = [0u8]; |
7507 | | let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*); |
7508 | | unreachable!("should have panicked, got {:?}", r); |
7509 | | } |
7510 | | } |
7511 | | } |
7512 | | zst_test!(new_slice(), "new_slice"); |
7513 | | zst_test!(new_slice_zeroed(), "new_slice"); |
7514 | | zst_test!(new_slice_from_prefix(1), "new_slice"); |
7515 | | zst_test!(new_slice_from_prefix_zeroed(1), "new_slice"); |
7516 | | zst_test!(new_slice_from_suffix(1), "new_slice"); |
7517 | | zst_test!(new_slice_from_suffix_zeroed(1), "new_slice"); |
7518 | | zst_test!(new_slice_unaligned(), "new_slice_unaligned"); |
7519 | | zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned"); |
7520 | | zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned"); |
7521 | | zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned"); |
7522 | | zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned"); |
7523 | | zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned"); |
7524 | | } |
7525 | | |
7526 | | #[test] |
7527 | | fn test_as_bytes_methods() { |
7528 | | /// Run a series of tests by calling `AsBytes` methods on `t`. |
7529 | | /// |
7530 | | /// `bytes` is the expected byte sequence returned from `t.as_bytes()` |
7531 | | /// before `t` has been modified. `post_mutation` is the expected |
7532 | | /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]` |
7533 | | /// has had its bits flipped (by applying `^= 0xFF`). |
7534 | | /// |
7535 | | /// `N` is the size of `t` in bytes. |
7536 | | fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>( |
7537 | | t: &mut T, |
7538 | | bytes: &[u8], |
7539 | | post_mutation: &T, |
7540 | | ) { |
7541 | | // Test that we can access the underlying bytes, and that we get the |
7542 | | // right bytes and the right number of bytes. |
7543 | | assert_eq!(t.as_bytes(), bytes); |
7544 | | |
7545 | | // Test that changes to the underlying byte slices are reflected in |
7546 | | // the original object. |
7547 | | t.as_bytes_mut()[0] ^= 0xFF; |
7548 | | assert_eq!(t, post_mutation); |
7549 | | t.as_bytes_mut()[0] ^= 0xFF; |
7550 | | |
7551 | | // `write_to` rejects slices that are too small or too large. |
7552 | | assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None); |
7553 | | assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None); |
7554 | | |
7555 | | // `write_to` works as expected. |
7556 | | let mut bytes = [0; N]; |
7557 | | assert_eq!(t.write_to(&mut bytes[..]), Some(())); |
7558 | | assert_eq!(bytes, t.as_bytes()); |
7559 | | |
7560 | | // `write_to_prefix` rejects slices that are too small. |
7561 | | assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None); |
7562 | | |
7563 | | // `write_to_prefix` works with exact-sized slices. |
7564 | | let mut bytes = [0; N]; |
7565 | | assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(())); |
7566 | | assert_eq!(bytes, t.as_bytes()); |
7567 | | |
7568 | | // `write_to_prefix` works with too-large slices, and any bytes past |
7569 | | // the prefix aren't modified. |
7570 | | let mut too_many_bytes = vec![0; N + 1]; |
7571 | | too_many_bytes[N] = 123; |
7572 | | assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(())); |
7573 | | assert_eq!(&too_many_bytes[..N], t.as_bytes()); |
7574 | | assert_eq!(too_many_bytes[N], 123); |
7575 | | |
7576 | | // `write_to_suffix` rejects slices that are too small. |
7577 | | assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None); |
7578 | | |
7579 | | // `write_to_suffix` works with exact-sized slices. |
7580 | | let mut bytes = [0; N]; |
7581 | | assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(())); |
7582 | | assert_eq!(bytes, t.as_bytes()); |
7583 | | |
7584 | | // `write_to_suffix` works with too-large slices, and any bytes |
7585 | | // before the suffix aren't modified. |
7586 | | let mut too_many_bytes = vec![0; N + 1]; |
7587 | | too_many_bytes[0] = 123; |
7588 | | assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(())); |
7589 | | assert_eq!(&too_many_bytes[1..], t.as_bytes()); |
7590 | | assert_eq!(too_many_bytes[0], 123); |
7591 | | } |
7592 | | |
7593 | | #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)] |
7594 | | #[repr(C)] |
7595 | | struct Foo { |
7596 | | a: u32, |
7597 | | b: Wrapping<u32>, |
7598 | | c: Option<NonZeroU32>, |
7599 | | } |
7600 | | |
7601 | | let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") { |
7602 | | vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0] |
7603 | | } else { |
7604 | | vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0] |
7605 | | }; |
7606 | | let post_mutation_expected_a = |
7607 | | if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 }; |
7608 | | test::<_, 12>( |
7609 | | &mut Foo { a: 1, b: Wrapping(2), c: None }, |
7610 | | expected_bytes.as_bytes(), |
7611 | | &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None }, |
7612 | | ); |
7613 | | test::<_, 3>( |
7614 | | Unsized::from_mut_slice(&mut [1, 2, 3]), |
7615 | | &[1, 2, 3], |
7616 | | Unsized::from_mut_slice(&mut [0xFE, 2, 3]), |
7617 | | ); |
7618 | | } |
7619 | | |
7620 | | #[test] |
7621 | | fn test_array() { |
7622 | | #[derive(FromZeroes, FromBytes, AsBytes)] |
7623 | | #[repr(C)] |
7624 | | struct Foo { |
7625 | | a: [u16; 33], |
7626 | | } |
7627 | | |
7628 | | let foo = Foo { a: [0xFFFF; 33] }; |
7629 | | let expected = [0xFFu8; 66]; |
7630 | | assert_eq!(foo.as_bytes(), &expected[..]); |
7631 | | } |
7632 | | |
7633 | | #[test] |
7634 | | fn test_display_debug() { |
7635 | | let buf = Align::<[u8; 8], u64>::default(); |
7636 | | let r = Ref::<_, u64>::new(&buf.t[..]).unwrap(); |
7637 | | assert_eq!(format!("{}", r), "0"); |
7638 | | assert_eq!(format!("{:?}", r), "Ref(0)"); |
7639 | | |
7640 | | let buf = Align::<[u8; 8], u64>::default(); |
7641 | | let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap(); |
7642 | | assert_eq!(format!("{:?}", r), "Ref([0])"); |
7643 | | } |
7644 | | |
7645 | | #[test] |
7646 | | fn test_eq() { |
7647 | | let buf1 = 0_u64; |
7648 | | let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); |
7649 | | let buf2 = 0_u64; |
7650 | | let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); |
7651 | | assert_eq!(r1, r2); |
7652 | | } |
7653 | | |
7654 | | #[test] |
7655 | | fn test_ne() { |
7656 | | let buf1 = 0_u64; |
7657 | | let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); |
7658 | | let buf2 = 1_u64; |
7659 | | let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); |
7660 | | assert_ne!(r1, r2); |
7661 | | } |
7662 | | |
7663 | | #[test] |
7664 | | fn test_ord() { |
7665 | | let buf1 = 0_u64; |
7666 | | let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); |
7667 | | let buf2 = 1_u64; |
7668 | | let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); |
7669 | | assert!(r1 < r2); |
7670 | | } |
7671 | | |
7672 | | #[test] |
7673 | | fn test_new_zeroed() { |
7674 | | assert!(!bool::new_zeroed()); |
7675 | | assert_eq!(u64::new_zeroed(), 0); |
7676 | | // This test exists in order to exercise unsafe code, especially when |
7677 | | // running under Miri. |
7678 | | #[allow(clippy::unit_cmp)] |
7679 | | { |
7680 | | assert_eq!(<()>::new_zeroed(), ()); |
7681 | | } |
7682 | | } |
7683 | | |
7684 | | #[test] |
7685 | | fn test_transparent_packed_generic_struct() { |
7686 | | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] |
7687 | | #[repr(transparent)] |
7688 | | #[allow(dead_code)] // for the unused fields |
7689 | | struct Foo<T> { |
7690 | | _t: T, |
7691 | | _phantom: PhantomData<()>, |
7692 | | } |
7693 | | |
7694 | | assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes); |
7695 | | assert_impl_all!(Foo<u8>: Unaligned); |
7696 | | |
7697 | | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] |
7698 | | #[repr(packed)] |
7699 | | #[allow(dead_code)] // for the unused fields |
7700 | | struct Bar<T, U> { |
7701 | | _t: T, |
7702 | | _u: U, |
7703 | | } |
7704 | | |
7705 | | assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned); |
7706 | | } |
7707 | | |
7708 | | #[test] |
7709 | | fn test_impls() { |
7710 | | use core::borrow::Borrow; |
7711 | | |
7712 | | // A type that can supply test cases for testing |
7713 | | // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!` |
7714 | | // must implement this trait; that macro uses it to generate runtime |
7715 | | // tests for `TryFromBytes` impls. |
7716 | | // |
7717 | | // All `T: FromBytes` types are provided with a blanket impl. Other |
7718 | | // types must implement `TryFromBytesTestable` directly (ie using |
7719 | | // `impl_try_from_bytes_testable!`). |
7720 | | trait TryFromBytesTestable { |
7721 | | fn with_passing_test_cases<F: Fn(&Self)>(f: F); |
7722 | | fn with_failing_test_cases<F: Fn(&[u8])>(f: F); |
7723 | | } |
7724 | | |
7725 | | impl<T: FromBytes> TryFromBytesTestable for T { |
7726 | | fn with_passing_test_cases<F: Fn(&Self)>(f: F) { |
7727 | | // Test with a zeroed value. |
7728 | | f(&Self::new_zeroed()); |
7729 | | |
7730 | | let ffs = { |
7731 | | let mut t = Self::new_zeroed(); |
7732 | | let ptr: *mut T = &mut t; |
7733 | | // SAFETY: `T: FromBytes` |
7734 | | unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) }; |
7735 | | t |
7736 | | }; |
7737 | | |
7738 | | // Test with a value initialized with 0xFF. |
7739 | | f(&ffs); |
7740 | | } |
7741 | | |
7742 | | fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {} |
7743 | | } |
7744 | | |
7745 | | // Implements `TryFromBytesTestable`. |
7746 | | macro_rules! impl_try_from_bytes_testable { |
7747 | | // Base case for recursion (when the list of types has run out). |
7748 | | (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {}; |
7749 | | // Implements for type(s) with no type parameters. |
7750 | | ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { |
7751 | | impl TryFromBytesTestable for $ty { |
7752 | | impl_try_from_bytes_testable!( |
7753 | | @methods @success $($success_case),* |
7754 | | $(, @failure $($failure_case),*)? |
7755 | | ); |
7756 | | } |
7757 | | impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?); |
7758 | | }; |
7759 | | // Implements for multiple types with no type parameters. |
7760 | | ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => { |
7761 | | $( |
7762 | | impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*); |
7763 | | )* |
7764 | | }; |
7765 | | // Implements only the methods; caller must invoke this from inside |
7766 | | // an impl block. |
7767 | | (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { |
7768 | | fn with_passing_test_cases<F: Fn(&Self)>(_f: F) { |
7769 | | $( |
7770 | | _f($success_case.borrow()); |
7771 | | )* |
7772 | | } |
7773 | | |
7774 | | fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) { |
7775 | | $($( |
7776 | | let case = $failure_case.as_bytes(); |
7777 | | _f(case.as_bytes()); |
7778 | | )*)? |
7779 | | } |
7780 | | }; |
7781 | | } |
7782 | | |
7783 | | // Note that these impls are only for types which are not `FromBytes`. |
7784 | | // `FromBytes` types are covered by a preceding blanket impl. |
7785 | | impl_try_from_bytes_testable!( |
7786 | | bool => @success true, false, |
7787 | | @failure 2u8, 3u8, 0xFFu8; |
7788 | | char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}', |
7789 | | @failure 0xD800u32, 0xDFFFu32, 0x110000u32; |
7790 | | str => @success "", "hello", "❤️🧡💛💚💙💜", |
7791 | | @failure [0, 159, 146, 150]; |
7792 | | [u8] => @success [], [0, 1, 2]; |
7793 | | NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, |
7794 | | NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, |
7795 | | NonZeroUsize, NonZeroIsize |
7796 | | => @success Self::new(1).unwrap(), |
7797 | | // Doing this instead of `0` ensures that we always satisfy |
7798 | | // the size and alignment requirements of `Self` (whereas |
7799 | | // `0` may be any integer type with a different size or |
7800 | | // alignment than some `NonZeroXxx` types). |
7801 | | @failure Option::<Self>::None; |
7802 | | [bool] |
7803 | | => @success [true, false], [false, true], |
7804 | | @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; |
7805 | | ); |
7806 | | |
7807 | | // Asserts that `$ty` implements any `$trait` and doesn't implement any |
7808 | | // `!$trait`. Note that all `$trait`s must come before any `!$trait`s. |
7809 | | // |
7810 | | // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success |
7811 | | // and failure cases for `TryFromBytes::is_bit_valid`. |
7812 | | macro_rules! assert_impls { |
7813 | | ($ty:ty: TryFromBytes) => { |
7814 | | <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| { |
7815 | | let c = Ptr::from(val); |
7816 | | // SAFETY: |
7817 | | // - Since `val` is a normal reference, `c` is guranteed to |
7818 | | // be aligned, to point to a single allocation, and to |
7819 | | // have a size which doesn't overflow `isize`. |
7820 | | // - Since `val` is a valid `$ty`, `c`'s referent satisfies |
7821 | | // the bit validity constraints of `is_bit_valid`, which |
7822 | | // are a superset of the bit validity constraints of |
7823 | | // `$ty`. |
7824 | | let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) }; |
7825 | | assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val); |
7826 | | |
7827 | | // TODO(#5): In addition to testing `is_bit_valid`, test the |
7828 | | // methods built on top of it. This would both allow us to |
7829 | | // test their implementations and actually convert the bytes |
7830 | | // to `$ty`, giving Miri a chance to catch if this is |
7831 | | // unsound (ie, if our `is_bit_valid` impl is buggy). |
7832 | | // |
7833 | | // The following code was tried, but it doesn't work because |
7834 | | // a) some types are not `AsBytes` and, b) some types are |
7835 | | // not `Sized`. |
7836 | | // |
7837 | | // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap(); |
7838 | | // assert_eq!(r, &val); |
7839 | | // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap(); |
7840 | | // assert_eq!(r, &mut val); |
7841 | | // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap(); |
7842 | | // assert_eq!(v, val); |
7843 | | }); |
7844 | | #[allow(clippy::as_conversions)] |
7845 | | <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| { |
7846 | | let res = <$ty as TryFromBytes>::try_from_ref(c); |
7847 | | assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c); |
7848 | | }); |
7849 | | |
7850 | | #[allow(dead_code)] |
7851 | | const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); }; |
7852 | | }; |
7853 | | ($ty:ty: $trait:ident) => { |
7854 | | #[allow(dead_code)] |
7855 | | const _: () = { static_assertions::assert_impl_all!($ty: $trait); }; |
7856 | | }; |
7857 | | ($ty:ty: !$trait:ident) => { |
7858 | | #[allow(dead_code)] |
7859 | | const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); }; |
7860 | | }; |
7861 | | ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => { |
7862 | | $( |
7863 | | assert_impls!($ty: $trait); |
7864 | | )* |
7865 | | |
7866 | | $( |
7867 | | assert_impls!($ty: !$negative_trait); |
7868 | | )* |
7869 | | }; |
7870 | | } |
7871 | | |
7872 | | // NOTE: The negative impl assertions here are not necessarily |
7873 | | // prescriptive. They merely serve as change detectors to make sure |
7874 | | // we're aware of what trait impls are getting added with a given |
7875 | | // change. Of course, some impls would be invalid (e.g., `bool: |
7876 | | // FromBytes`), and so this change detection is very important. |
7877 | | |
7878 | | assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7879 | | assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7880 | | assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7881 | | assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7882 | | assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7883 | | assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7884 | | assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7885 | | assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7886 | | assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7887 | | assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7888 | | assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7889 | | assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7890 | | assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7891 | | assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7892 | | assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7893 | | |
7894 | | assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); |
7895 | | assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned); |
7896 | | assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); |
7897 | | |
7898 | | assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes); |
7899 | | assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes); |
7900 | | assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7901 | | assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7902 | | assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7903 | | assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7904 | | assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7905 | | assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7906 | | assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7907 | | assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7908 | | assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7909 | | assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); |
7910 | | |
7911 | | assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7912 | | assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7913 | | assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7914 | | assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7915 | | assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7916 | | assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7917 | | assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7918 | | assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7919 | | assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7920 | | assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7921 | | assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7922 | | assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); |
7923 | | |
7924 | | // Implements none of the ZC traits. |
7925 | | struct NotZerocopy; |
7926 | | |
7927 | | #[rustfmt::skip] |
7928 | | type FnManyArgs = fn( |
7929 | | NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, |
7930 | | ) -> (NotZerocopy, NotZerocopy); |
7931 | | |
7932 | | // Allowed, because we're not actually using this type for FFI. |
7933 | | #[allow(improper_ctypes_definitions)] |
7934 | | #[rustfmt::skip] |
7935 | | type ECFnManyArgs = extern "C" fn( |
7936 | | NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, |
7937 | | ) -> (NotZerocopy, NotZerocopy); |
7938 | | |
7939 | | #[cfg(feature = "alloc")] |
7940 | | assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7941 | | assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7942 | | assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7943 | | assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7944 | | assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7945 | | assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7946 | | assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7947 | | assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7948 | | assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7949 | | assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7950 | | assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7951 | | assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7952 | | |
7953 | | assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7954 | | assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7955 | | |
7956 | | assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); |
7957 | | assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); |
7958 | | assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7959 | | assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7960 | | |
7961 | | assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes); |
7962 | | assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7963 | | |
7964 | | assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7965 | | assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7966 | | |
7967 | | assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); |
7968 | | assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes); |
7969 | | |
7970 | | assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); |
7971 | | assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); |
7972 | | assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7973 | | assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); |
7974 | | assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7975 | | assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); |
7976 | | assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7977 | | |
7978 | | assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7979 | | assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); |
7980 | | assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7981 | | assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7982 | | assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7983 | | assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); |
7984 | | |
7985 | | #[cfg(feature = "simd")] |
7986 | | { |
7987 | | #[allow(unused_macros)] |
7988 | | macro_rules! test_simd_arch_mod { |
7989 | | ($arch:ident, $($typ:ident),*) => { |
7990 | | { |
7991 | | use core::arch::$arch::{$($typ),*}; |
7992 | | use crate::*; |
7993 | | $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )* |
7994 | | } |
7995 | | }; |
7996 | | } |
7997 | | #[cfg(target_arch = "x86")] |
7998 | | test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i); |
7999 | | |
8000 | | #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] |
8001 | | test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i); |
8002 | | |
8003 | | #[cfg(target_arch = "x86_64")] |
8004 | | test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i); |
8005 | | |
8006 | | #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] |
8007 | | test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i); |
8008 | | |
8009 | | #[cfg(target_arch = "wasm32")] |
8010 | | test_simd_arch_mod!(wasm32, v128); |
8011 | | |
8012 | | #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] |
8013 | | test_simd_arch_mod!( |
8014 | | powerpc, |
8015 | | vector_bool_long, |
8016 | | vector_double, |
8017 | | vector_signed_long, |
8018 | | vector_unsigned_long |
8019 | | ); |
8020 | | |
8021 | | #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] |
8022 | | test_simd_arch_mod!( |
8023 | | powerpc64, |
8024 | | vector_bool_long, |
8025 | | vector_double, |
8026 | | vector_signed_long, |
8027 | | vector_unsigned_long |
8028 | | ); |
8029 | | #[cfg(target_arch = "aarch64")] |
8030 | | #[rustfmt::skip] |
8031 | | test_simd_arch_mod!( |
8032 | | aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, |
8033 | | int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, |
8034 | | int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, |
8035 | | poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, |
8036 | | poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, |
8037 | | uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, |
8038 | | uint64x1_t, uint64x2_t |
8039 | | ); |
8040 | | #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] |
8041 | | #[rustfmt::skip] |
8042 | | test_simd_arch_mod!(arm, int8x4_t, uint8x4_t); |
8043 | | } |
8044 | | } |
8045 | | } |
8046 | | |
8047 | | #[cfg(kani)] |
8048 | | mod proofs { |
8049 | | use super::*; |
8050 | | |
8051 | | impl kani::Arbitrary for DstLayout { |
8052 | | fn any() -> Self { |
8053 | | let align: NonZeroUsize = kani::any(); |
8054 | | let size_info: SizeInfo = kani::any(); |
8055 | | |
8056 | | kani::assume(align.is_power_of_two()); |
8057 | | kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN); |
8058 | | |
8059 | | // For testing purposes, we most care about instantiations of |
8060 | | // `DstLayout` that can correspond to actual Rust types. We use |
8061 | | // `Layout` to verify that our `DstLayout` satisfies the validity |
8062 | | // conditions of Rust layouts. |
8063 | | kani::assume( |
8064 | | match size_info { |
8065 | | SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()), |
8066 | | SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => { |
8067 | | // `SliceDst`` cannot encode an exact size, but we know |
8068 | | // it is at least `_offset` bytes. |
8069 | | Layout::from_size_align(_offset, align.get()) |
8070 | | } |
8071 | | } |
8072 | | .is_ok(), |
8073 | | ); |
8074 | | |
8075 | | Self { align: align, size_info: size_info } |
8076 | | } |
8077 | | } |
8078 | | |
8079 | | impl kani::Arbitrary for SizeInfo { |
8080 | | fn any() -> Self { |
8081 | | let is_sized: bool = kani::any(); |
8082 | | |
8083 | | match is_sized { |
8084 | | true => { |
8085 | | let size: usize = kani::any(); |
8086 | | |
8087 | | kani::assume(size <= isize::MAX as _); |
8088 | | |
8089 | | SizeInfo::Sized { _size: size } |
8090 | | } |
8091 | | false => SizeInfo::SliceDst(kani::any()), |
8092 | | } |
8093 | | } |
8094 | | } |
8095 | | |
8096 | | impl kani::Arbitrary for TrailingSliceLayout { |
8097 | | fn any() -> Self { |
8098 | | let elem_size: usize = kani::any(); |
8099 | | let offset: usize = kani::any(); |
8100 | | |
8101 | | kani::assume(elem_size < isize::MAX as _); |
8102 | | kani::assume(offset < isize::MAX as _); |
8103 | | |
8104 | | TrailingSliceLayout { _elem_size: elem_size, _offset: offset } |
8105 | | } |
8106 | | } |
8107 | | |
8108 | | #[kani::proof] |
8109 | | fn prove_dst_layout_extend() { |
8110 | | use crate::util::{core_layout::padding_needed_for, max, min}; |
8111 | | |
8112 | | let base: DstLayout = kani::any(); |
8113 | | let field: DstLayout = kani::any(); |
8114 | | let packed: Option<NonZeroUsize> = kani::any(); |
8115 | | |
8116 | | if let Some(max_align) = packed { |
8117 | | kani::assume(max_align.is_power_of_two()); |
8118 | | kani::assume(base.align <= max_align); |
8119 | | } |
8120 | | |
8121 | | // The base can only be extended if it's sized. |
8122 | | kani::assume(matches!(base.size_info, SizeInfo::Sized { .. })); |
8123 | | let base_size = if let SizeInfo::Sized { _size: size } = base.size_info { |
8124 | | size |
8125 | | } else { |
8126 | | unreachable!(); |
8127 | | }; |
8128 | | |
8129 | | // Under the above conditions, `DstLayout::extend` will not panic. |
8130 | | let composite = base.extend(field, packed); |
8131 | | |
8132 | | // The field's alignment is clamped by `max_align` (i.e., the |
8133 | | // `packed` attribute, if any) [1]. |
8134 | | // |
8135 | | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
8136 | | // |
8137 | | // The alignments of each field, for the purpose of positioning |
8138 | | // fields, is the smaller of the specified alignment and the |
8139 | | // alignment of the field's type. |
8140 | | let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN)); |
8141 | | |
8142 | | // The struct's alignment is the maximum of its previous alignment and |
8143 | | // `field_align`. |
8144 | | assert_eq!(composite.align, max(base.align, field_align)); |
8145 | | |
8146 | | // Compute the minimum amount of inter-field padding needed to |
8147 | | // satisfy the field's alignment, and offset of the trailing field. |
8148 | | // [1] |
8149 | | // |
8150 | | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
8151 | | // |
8152 | | // Inter-field padding is guaranteed to be the minimum required in |
8153 | | // order to satisfy each field's (possibly altered) alignment. |
8154 | | let padding = padding_needed_for(base_size, field_align); |
8155 | | let offset = base_size + padding; |
8156 | | |
8157 | | // For testing purposes, we'll also construct `alloc::Layout` |
8158 | | // stand-ins for `DstLayout`, and show that `extend` behaves |
8159 | | // comparably on both types. |
8160 | | let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap(); |
8161 | | |
8162 | | match field.size_info { |
8163 | | SizeInfo::Sized { _size: field_size } => { |
8164 | | if let SizeInfo::Sized { _size: composite_size } = composite.size_info { |
8165 | | // If the trailing field is sized, the resulting layout |
8166 | | // will be sized. Its size will be the sum of the |
8167 | | // preceeding layout, the size of the new field, and the |
8168 | | // size of inter-field padding between the two. |
8169 | | assert_eq!(composite_size, offset + field_size); |
8170 | | |
8171 | | let field_analog = |
8172 | | Layout::from_size_align(field_size, field_align.get()).unwrap(); |
8173 | | |
8174 | | if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) |
8175 | | { |
8176 | | assert_eq!(actual_offset, offset); |
8177 | | assert_eq!(actual_composite.size(), composite_size); |
8178 | | assert_eq!(actual_composite.align(), composite.align.get()); |
8179 | | } else { |
8180 | | // An error here reflects that composite of `base` |
8181 | | // and `field` cannot correspond to a real Rust type |
8182 | | // fragment, because such a fragment would violate |
8183 | | // the basic invariants of a valid Rust layout. At |
8184 | | // the time of writing, `DstLayout` is a little more |
8185 | | // permissive than `Layout`, so we don't assert |
8186 | | // anything in this branch (e.g., unreachability). |
8187 | | } |
8188 | | } else { |
8189 | | panic!("The composite of two sized layouts must be sized.") |
8190 | | } |
8191 | | } |
8192 | | SizeInfo::SliceDst(TrailingSliceLayout { |
8193 | | _offset: field_offset, |
8194 | | _elem_size: field_elem_size, |
8195 | | }) => { |
8196 | | if let SizeInfo::SliceDst(TrailingSliceLayout { |
8197 | | _offset: composite_offset, |
8198 | | _elem_size: composite_elem_size, |
8199 | | }) = composite.size_info |
8200 | | { |
8201 | | // The offset of the trailing slice component is the sum |
8202 | | // of the offset of the trailing field and the trailing |
8203 | | // slice offset within that field. |
8204 | | assert_eq!(composite_offset, offset + field_offset); |
8205 | | // The elem size is unchanged. |
8206 | | assert_eq!(composite_elem_size, field_elem_size); |
8207 | | |
8208 | | let field_analog = |
8209 | | Layout::from_size_align(field_offset, field_align.get()).unwrap(); |
8210 | | |
8211 | | if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) |
8212 | | { |
8213 | | assert_eq!(actual_offset, offset); |
8214 | | assert_eq!(actual_composite.size(), composite_offset); |
8215 | | assert_eq!(actual_composite.align(), composite.align.get()); |
8216 | | } else { |
8217 | | // An error here reflects that composite of `base` |
8218 | | // and `field` cannot correspond to a real Rust type |
8219 | | // fragment, because such a fragment would violate |
8220 | | // the basic invariants of a valid Rust layout. At |
8221 | | // the time of writing, `DstLayout` is a little more |
8222 | | // permissive than `Layout`, so we don't assert |
8223 | | // anything in this branch (e.g., unreachability). |
8224 | | } |
8225 | | } else { |
8226 | | panic!("The extension of a layout with a DST must result in a DST.") |
8227 | | } |
8228 | | } |
8229 | | } |
8230 | | } |
8231 | | |
8232 | | #[kani::proof] |
8233 | | #[kani::should_panic] |
8234 | | fn prove_dst_layout_extend_dst_panics() { |
8235 | | let base: DstLayout = kani::any(); |
8236 | | let field: DstLayout = kani::any(); |
8237 | | let packed: Option<NonZeroUsize> = kani::any(); |
8238 | | |
8239 | | if let Some(max_align) = packed { |
8240 | | kani::assume(max_align.is_power_of_two()); |
8241 | | kani::assume(base.align <= max_align); |
8242 | | } |
8243 | | |
8244 | | kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..))); |
8245 | | |
8246 | | let _ = base.extend(field, packed); |
8247 | | } |
8248 | | |
8249 | | #[kani::proof] |
8250 | | fn prove_dst_layout_pad_to_align() { |
8251 | | use crate::util::core_layout::padding_needed_for; |
8252 | | |
8253 | | let layout: DstLayout = kani::any(); |
8254 | | |
8255 | | let padded: DstLayout = layout.pad_to_align(); |
8256 | | |
8257 | | // Calling `pad_to_align` does not alter the `DstLayout`'s alignment. |
8258 | | assert_eq!(padded.align, layout.align); |
8259 | | |
8260 | | if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info { |
8261 | | if let SizeInfo::Sized { _size: padded_size } = padded.size_info { |
8262 | | // If the layout is sized, it will remain sized after padding is |
8263 | | // added. Its sum will be its unpadded size and the size of the |
8264 | | // trailing padding needed to satisfy its alignment |
8265 | | // requirements. |
8266 | | let padding = padding_needed_for(unpadded_size, layout.align); |
8267 | | assert_eq!(padded_size, unpadded_size + padding); |
8268 | | |
8269 | | // Prove that calling `DstLayout::pad_to_align` behaves |
8270 | | // identically to `Layout::pad_to_align`. |
8271 | | let layout_analog = |
8272 | | Layout::from_size_align(unpadded_size, layout.align.get()).unwrap(); |
8273 | | let padded_analog = layout_analog.pad_to_align(); |
8274 | | assert_eq!(padded_analog.align(), layout.align.get()); |
8275 | | assert_eq!(padded_analog.size(), padded_size); |
8276 | | } else { |
8277 | | panic!("The padding of a sized layout must result in a sized layout.") |
8278 | | } |
8279 | | } else { |
8280 | | // If the layout is a DST, padding cannot be statically added. |
8281 | | assert_eq!(padded.size_info, layout.size_info); |
8282 | | } |
8283 | | } |
8284 | | } |