Skip to main content

zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::multiple_unsafe_ops_per_block,
263    clippy::must_use_candidate,
264    clippy::must_use_unit,
265    clippy::obfuscated_if_else,
266    clippy::perf,
267    clippy::print_stdout,
268    clippy::return_self_not_must_use,
269    clippy::std_instead_of_core,
270    clippy::style,
271    clippy::suspicious,
272    clippy::todo,
273    clippy::undocumented_unsafe_blocks,
274    clippy::unimplemented,
275    clippy::unnested_or_patterns,
276    clippy::unwrap_used,
277    clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284    rustdoc::bare_urls,
285    rustdoc::broken_intra_doc_links,
286    rustdoc::invalid_codeblock_attributes,
287    rustdoc::invalid_html_tags,
288    rustdoc::invalid_rust_codeblocks,
289    rustdoc::missing_crate_level_docs,
290    rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295    // In tests, you get line numbers and have access to source code, so panic
296    // messages are less important. You also often unwrap a lot, which would
297    // make expect'ing instead very verbose.
298    clippy::unwrap_used,
299    // In tests, there's no harm to "panic risks" - the worst that can happen is
300    // that your test will fail, and you'll fix it. By contrast, panic risks in
301    // production code introduce the possibly of code panicking unexpectedly "in
302    // the field".
303    clippy::arithmetic_side_effects,
304    clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308    all(feature = "simd-nightly", target_arch = "arm"),
309    feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313    feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319    any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320    feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[doc(hidden)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361    cell::{Cell, UnsafeCell},
362    cmp::Ordering,
363    fmt::{self, Debug, Display, Formatter},
364    hash::Hasher,
365    marker::PhantomData,
366    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367    num::{
368        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370    },
371    ops::{Deref, DerefMut},
372    ptr::{self, NonNull},
373    slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378#[doc(hidden)]
379pub use crate::pointer::invariant::{self, BecauseExclusive};
380#[doc(hidden)]
381pub use crate::pointer::PtrInner;
382pub use crate::{
383    byte_slice::*,
384    byteorder::*,
385    error::*,
386    r#ref::*,
387    split_at::{Split, SplitAt},
388    wrappers::*,
389};
390
391#[cfg(any(feature = "alloc", test, kani))]
392extern crate alloc;
393#[cfg(any(feature = "alloc", test))]
394use alloc::{boxed::Box, vec::Vec};
395#[cfg(any(feature = "alloc", test))]
396use core::alloc::Layout;
397
398use util::MetadataOf;
399
400// Used by `KnownLayout`.
401#[doc(hidden)]
402pub use crate::layout::*;
403// Used by `TryFromBytes::is_bit_valid`.
404#[doc(hidden)]
405pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
406// For each trait polyfill, as soon as the corresponding feature is stable, the
407// polyfill import will be unused because method/function resolution will prefer
408// the inherent method/function over a trait method/function. Thus, we suppress
409// the `unused_imports` warning.
410//
411// See the documentation on `util::polyfills` for more information.
412#[allow(unused_imports)]
413use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
414
415#[rustversion::nightly]
416#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
417const _: () = {
418    #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
419    const _WARNING: () = ();
420    #[warn(deprecated)]
421    _WARNING
422};
423
424// These exist so that code which was written against the old names will get
425// less confusing error messages when they upgrade to a more recent version of
426// zerocopy. On our MSRV toolchain, the error messages read, for example:
427//
428//   error[E0603]: trait `FromZeroes` is private
429//       --> examples/deprecated.rs:1:15
430//        |
431//   1    | use zerocopy::FromZeroes;
432//        |               ^^^^^^^^^^ private trait
433//        |
434//   note: the trait `FromZeroes` is defined here
435//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
436//        |
437//   1845 | use FromZeros as FromZeroes;
438//        |     ^^^^^^^^^^^^^^^^^^^^^^^
439//
440// The "note" provides enough context to make it easy to figure out how to fix
441// the error.
442/// Implements [`KnownLayout`].
443///
444/// This derive analyzes various aspects of a type's layout that are needed for
445/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
446/// e.g.:
447///
448/// ```
449/// # use zerocopy_derive::KnownLayout;
450/// #[derive(KnownLayout)]
451/// struct MyStruct {
452/// # /*
453///     ...
454/// # */
455/// }
456///
457/// #[derive(KnownLayout)]
458/// enum MyEnum {
459/// #   V00,
460/// # /*
461///     ...
462/// # */
463/// }
464///
465/// #[derive(KnownLayout)]
466/// union MyUnion {
467/// #   variant: u8,
468/// # /*
469///     ...
470/// # */
471/// }
472/// ```
473///
474/// # Limitations
475///
476/// This derive cannot currently be applied to unsized structs without an
477/// explicit `repr` attribute.
478///
479/// Some invocations of this derive run afoul of a [known bug] in Rust's type
480/// privacy checker. For example, this code:
481///
482/// ```compile_fail,E0446
483/// use zerocopy::*;
484/// # use zerocopy_derive::*;
485///
486/// #[derive(KnownLayout)]
487/// #[repr(C)]
488/// pub struct PublicType {
489///     leading: Foo,
490///     trailing: Bar,
491/// }
492///
493/// #[derive(KnownLayout)]
494/// struct Foo;
495///
496/// #[derive(KnownLayout)]
497/// struct Bar;
498/// ```
499///
500/// ...results in a compilation error:
501///
502/// ```text
503/// error[E0446]: private type `Bar` in public interface
504///  --> examples/bug.rs:3:10
505///    |
506/// 3  | #[derive(KnownLayout)]
507///    |          ^^^^^^^^^^^ can't leak private type
508/// ...
509/// 14 | struct Bar;
510///    | ---------- `Bar` declared as private
511///    |
512///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
513/// ```
514///
515/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
516/// structs whose trailing field type is less public than the enclosing struct.
517///
518/// To work around this, mark the trailing field type `pub` and annotate it with
519/// `#[doc(hidden)]`; e.g.:
520///
521/// ```no_run
522/// use zerocopy::*;
523/// # use zerocopy_derive::*;
524///
525/// #[derive(KnownLayout)]
526/// #[repr(C)]
527/// pub struct PublicType {
528///     leading: Foo,
529///     trailing: Bar,
530/// }
531///
532/// #[derive(KnownLayout)]
533/// struct Foo;
534///
535/// #[doc(hidden)]
536/// #[derive(KnownLayout)]
537/// pub struct Bar; // <- `Bar` is now also `pub`
538/// ```
539///
540/// [known bug]: https://github.com/rust-lang/rust/issues/45713
541#[cfg(any(feature = "derive", test))]
542#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
543pub use zerocopy_derive::KnownLayout;
544#[allow(unused)]
545use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
546
547/// Indicates that zerocopy can reason about certain aspects of a type's layout.
548///
549/// This trait is required by many of zerocopy's APIs. It supports sized types,
550/// slices, and [slice DSTs](#dynamically-sized-types).
551///
552/// # Implementation
553///
554/// **Do not implement this trait yourself!** Instead, use
555/// [`#[derive(KnownLayout)]`][derive]; e.g.:
556///
557/// ```
558/// # use zerocopy_derive::KnownLayout;
559/// #[derive(KnownLayout)]
560/// struct MyStruct {
561/// # /*
562///     ...
563/// # */
564/// }
565///
566/// #[derive(KnownLayout)]
567/// enum MyEnum {
568/// # /*
569///     ...
570/// # */
571/// }
572///
573/// #[derive(KnownLayout)]
574/// union MyUnion {
575/// #   variant: u8,
576/// # /*
577///     ...
578/// # */
579/// }
580/// ```
581///
582/// This derive performs a sophisticated analysis to deduce the layout
583/// characteristics of types. You **must** implement this trait via the derive.
584///
585/// # Dynamically-sized types
586///
587/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
588///
589/// A slice DST is a type whose trailing field is either a slice or another
590/// slice DST, rather than a type with fixed size. For example:
591///
592/// ```
593/// #[repr(C)]
594/// struct PacketHeader {
595/// # /*
596///     ...
597/// # */
598/// }
599///
600/// #[repr(C)]
601/// struct Packet {
602///     header: PacketHeader,
603///     body: [u8],
604/// }
605/// ```
606///
607/// It can be useful to think of slice DSTs as a generalization of slices - in
608/// other words, a normal slice is just the special case of a slice DST with
609/// zero leading fields. In particular:
610/// - Like slices, slice DSTs can have different lengths at runtime
611/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
612///   or via other indirection such as `Box`
613/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
614///   encodes the number of elements in the trailing slice field
615///
616/// ## Slice DST layout
617///
618/// Just like other composite Rust types, the layout of a slice DST is not
619/// well-defined unless it is specified using an explicit `#[repr(...)]`
620/// attribute such as `#[repr(C)]`. [Other representations are
621/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
622/// example.
623///
624/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
625/// types][repr-c-structs], but the presence of a variable-length field
626/// introduces the possibility of *dynamic padding*. In particular, it may be
627/// necessary to add trailing padding *after* the trailing slice field in order
628/// to satisfy the outer type's alignment, and the amount of padding required
629/// may be a function of the length of the trailing slice field. This is just a
630/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
631/// but it can result in surprising behavior. For example, consider the
632/// following type:
633///
634/// ```
635/// #[repr(C)]
636/// struct Foo {
637///     a: u32,
638///     b: u8,
639///     z: [u16],
640/// }
641/// ```
642///
643/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
644/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
645/// `Foo`:
646///
647/// ```text
648/// byte offset | 01234567
649///       field | aaaab---
650///                    ><
651/// ```
652///
653/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
654/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
655/// round up to offset 6. This means that there is one byte of padding between
656/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
657/// then two bytes of padding after `z` in order to satisfy the overall
658/// alignment of `Foo`. The size of this instance is 8 bytes.
659///
660/// What about if `z` has length 1?
661///
662/// ```text
663/// byte offset | 01234567
664///       field | aaaab-zz
665/// ```
666///
667/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
668/// that we no longer need padding after `z` in order to satisfy `Foo`'s
669/// alignment. We've now seen two different values of `Foo` with two different
670/// lengths of `z`, but they both have the same size - 8 bytes.
671///
672/// What about if `z` has length 2?
673///
674/// ```text
675/// byte offset | 012345678901
676///       field | aaaab-zzzz--
677/// ```
678///
679/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
680/// size to 10, and so we now need another 2 bytes of padding after `z` to
681/// satisfy `Foo`'s alignment.
682///
683/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
684/// applied to slice DSTs, but it can be surprising that the amount of trailing
685/// padding becomes a function of the trailing slice field's length, and thus
686/// can only be computed at runtime.
687///
688/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
689/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
690///
691/// ## What is a valid size?
692///
693/// There are two places in zerocopy's API that we refer to "a valid size" of a
694/// type. In normal casts or conversions, where the source is a byte slice, we
695/// need to know whether the source byte slice is a valid size of the
696/// destination type. In prefix or suffix casts, we need to know whether *there
697/// exists* a valid size of the destination type which fits in the source byte
698/// slice and, if so, what the largest such size is.
699///
700/// As outlined above, a slice DST's size is defined by the number of elements
701/// in its trailing slice field. However, there is not necessarily a 1-to-1
702/// mapping between trailing slice field length and overall size. As we saw in
703/// the previous section with the type `Foo`, instances with both 0 and 1
704/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
705///
706/// When we say "x is a valid size of `T`", we mean one of two things:
707/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
708/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
709///   `T` with `len` trailing slice elements has size `x`
710///
711/// When we say "largest possible size of `T` that fits in a byte slice", we
712/// mean one of two things:
713/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
714///   `size_of::<T>()` bytes long
715/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
716///   that the instance of `T` with `len` trailing slice elements fits in the
717///   byte slice, and to choose the largest such `len`, if any
718///
719///
720/// # Safety
721///
722/// This trait does not convey any safety guarantees to code outside this crate.
723///
724/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
725/// releases of zerocopy may make backwards-breaking changes to these items,
726/// including changes that only affect soundness, which may cause code which
727/// uses those items to silently become unsound.
728///
729#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
730#[cfg_attr(
731    not(feature = "derive"),
732    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
733)]
734#[cfg_attr(
735    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
736    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
737)]
738pub unsafe trait KnownLayout {
739    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
740    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
741    // it likely won't be in the future, but there's no reason not to be
742    // forwards-compatible with object safety.
743    #[doc(hidden)]
744    fn only_derive_is_allowed_to_implement_this_trait()
745    where
746        Self: Sized;
747
748    /// The type of metadata stored in a pointer to `Self`.
749    ///
750    /// This is `()` for sized types and `usize` for slice DSTs.
751    type PointerMetadata: PointerMetadata;
752
753    /// A maybe-uninitialized analog of `Self`
754    ///
755    /// # Safety
756    ///
757    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
758    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
759    #[doc(hidden)]
760    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
761
762    /// The layout of `Self`.
763    ///
764    /// # Safety
765    ///
766    /// Callers may assume that `LAYOUT` accurately reflects the layout of
767    /// `Self`. In particular:
768    /// - `LAYOUT.align` is equal to `Self`'s alignment
769    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
770    ///   where `size == size_of::<Self>()`
771    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
772    ///   SizeInfo::SliceDst(slice_layout)` where:
773    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
774    ///     slice elements is equal to `slice_layout.offset +
775    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
776    ///     of `LAYOUT.align`
777    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
778    ///     slice_layout.elem_size * elems, size)` are padding and must not be
779    ///     assumed to be initialized
780    #[doc(hidden)]
781    const LAYOUT: DstLayout;
782
783    /// SAFETY: The returned pointer has the same address and provenance as
784    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
785    /// elements in its trailing slice.
786    #[doc(hidden)]
787    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
788
789    /// Extracts the metadata from a pointer to `Self`.
790    ///
791    /// # Safety
792    ///
793    /// `pointer_to_metadata` always returns the correct metadata stored in
794    /// `ptr`.
795    #[doc(hidden)]
796    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
797
798    /// Computes the length of the byte range addressed by `ptr`.
799    ///
800    /// Returns `None` if the resulting length would not fit in an `usize`.
801    ///
802    /// # Safety
803    ///
804    /// Callers may assume that `size_of_val_raw` always returns the correct
805    /// size.
806    ///
807    /// Callers may assume that, if `ptr` addresses a byte range whose length
808    /// fits in an `usize`, this will return `Some`.
809    #[doc(hidden)]
810    #[must_use]
811    #[inline(always)]
812    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
813        let meta = Self::pointer_to_metadata(ptr.as_ptr());
814        // SAFETY: `size_for_metadata` promises to only return `None` if the
815        // resulting size would not fit in a `usize`.
816        Self::size_for_metadata(meta)
817    }
818
819    #[doc(hidden)]
820    #[must_use]
821    #[inline(always)]
822    fn raw_dangling() -> NonNull<Self> {
823        let meta = Self::PointerMetadata::from_elem_count(0);
824        Self::raw_from_ptr_len(NonNull::dangling(), meta)
825    }
826
827    /// Computes the size of an object of type `Self` with the given pointer
828    /// metadata.
829    ///
830    /// # Safety
831    ///
832    /// `size_for_metadata` promises to return `None` if and only if the
833    /// resulting size would not fit in a `usize`. Note that the returned size
834    /// could exceed the actual maximum valid size of an allocated object,
835    /// `isize::MAX`.
836    ///
837    /// # Examples
838    ///
839    /// ```
840    /// use zerocopy::KnownLayout;
841    ///
842    /// assert_eq!(u8::size_for_metadata(()), Some(1));
843    /// assert_eq!(u16::size_for_metadata(()), Some(2));
844    /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
845    /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
846    ///
847    /// // This size exceeds the maximum valid object size (`isize::MAX`):
848    /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
849    ///
850    /// // This size, if computed, would exceed `usize::MAX`:
851    /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
852    /// ```
853    #[inline(always)]
854    fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
855        meta.size_for_metadata(Self::LAYOUT)
856    }
857}
858
859/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
860#[inline(always)]
861pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
862where
863    T: ?Sized + KnownLayout<PointerMetadata = usize>,
864{
865    trait LayoutFacts {
866        const SIZE_INFO: TrailingSliceLayout;
867    }
868
869    impl<T: ?Sized> LayoutFacts for T
870    where
871        T: KnownLayout<PointerMetadata = usize>,
872    {
873        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
874            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
875            crate::SizeInfo::SliceDst(info) => info,
876        };
877    }
878
879    T::SIZE_INFO
880}
881
882/// The metadata associated with a [`KnownLayout`] type.
883#[doc(hidden)]
884pub trait PointerMetadata: Copy + Eq + Debug {
885    /// Constructs a `Self` from an element count.
886    ///
887    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
888    /// `elems`. No other types are currently supported.
889    fn from_elem_count(elems: usize) -> Self;
890
891    /// Converts `self` to an element count.
892    ///
893    /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns
894    /// `self`. No other types are currently supported.
895    fn to_elem_count(self) -> usize;
896
897    /// Computes the size of the object with the given layout and pointer
898    /// metadata.
899    ///
900    /// # Panics
901    ///
902    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
903    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
904    /// panic.
905    ///
906    /// # Safety
907    ///
908    /// `size_for_metadata` promises to only return `None` if the resulting size
909    /// would not fit in a `usize`.
910    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
911}
912
913impl PointerMetadata for () {
914    #[inline]
915    #[allow(clippy::unused_unit)]
916    fn from_elem_count(_elems: usize) -> () {}
917
918    #[inline]
919    fn to_elem_count(self) -> usize {
920        0
921    }
922
923    #[inline]
924    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
925        match layout.size_info {
926            SizeInfo::Sized { size } => Some(size),
927            // NOTE: This branch is unreachable, but we return `None` rather
928            // than `unreachable!()` to avoid generating panic paths.
929            SizeInfo::SliceDst(_) => None,
930        }
931    }
932}
933
934impl PointerMetadata for usize {
935    #[inline]
936    fn from_elem_count(elems: usize) -> usize {
937        elems
938    }
939
940    #[inline]
941    fn to_elem_count(self) -> usize {
942        self
943    }
944
945    #[inline]
946    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
947        match layout.size_info {
948            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
949                let slice_len = elem_size.checked_mul(self)?;
950                let without_padding = offset.checked_add(slice_len)?;
951                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
952            }
953            // NOTE: This branch is unreachable, but we return `None` rather
954            // than `unreachable!()` to avoid generating panic paths.
955            SizeInfo::Sized { .. } => None,
956        }
957    }
958}
959
960// SAFETY: Delegates safety to `DstLayout::for_slice`.
961unsafe impl<T> KnownLayout for [T] {
962    #[allow(clippy::missing_inline_in_public_items, dead_code)]
963    #[cfg_attr(
964        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
965        coverage(off)
966    )]
967    fn only_derive_is_allowed_to_implement_this_trait()
968    where
969        Self: Sized,
970    {
971    }
972
973    type PointerMetadata = usize;
974
975    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
976    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
977    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
978    // identical, because they both lack a fixed-sized prefix and because they
979    // inherit the alignments of their inner element type (which are identical)
980    // [2][3].
981    //
982    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
983    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
984    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
985    // back-to-back [2][3].
986    //
987    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
988    //
989    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
990    //   `T`
991    //
992    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
993    //
994    //   Slices have the same layout as the section of the array they slice.
995    //
996    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
997    //
998    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
999    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
1000    //   element of the array is offset from the start of the array by `n *
1001    //   size_of::<T>()` bytes.
1002    type MaybeUninit = [CoreMaybeUninit<T>];
1003
1004    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
1005
1006    // SAFETY: `.cast` preserves address and provenance. The returned pointer
1007    // refers to an object with `elems` elements by construction.
1008    #[inline(always)]
1009    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
1010        // FIXME(#67): Remove this allow. See NonNullExt for more details.
1011        #[allow(unstable_name_collisions)]
1012        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
1013    }
1014
1015    #[inline(always)]
1016    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1017        #[allow(clippy::as_conversions)]
1018        let slc = ptr as *const [()];
1019
1020        // SAFETY:
1021        // - `()` has alignment 1, so `slc` is trivially aligned.
1022        // - `slc` was derived from a non-null pointer.
1023        // - The size is 0 regardless of the length, so it is sound to
1024        //   materialize a reference regardless of location.
1025        // - By invariant, `self.ptr` has valid provenance.
1026        let slc = unsafe { &*slc };
1027
1028        // This is correct because the preceding `as` cast preserves the number
1029        // of slice elements. [1]
1030        //
1031        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1032        //
1033        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
1034        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1035        //   elements in this slice. Casts between these raw pointer types
1036        //   preserve the number of elements. ... The same holds for `str` and
1037        //   any compound type whose unsized tail is a slice type, such as
1038        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1039        slc.len()
1040    }
1041}
1042
1043#[rustfmt::skip]
1044impl_known_layout!(
1045    (),
1046    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1047    bool, char,
1048    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1049    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1050);
1051#[rustfmt::skip]
1052#[cfg(feature = "float-nightly")]
1053impl_known_layout!(
1054    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1055    f16,
1056    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1057    f128
1058);
1059#[rustfmt::skip]
1060impl_known_layout!(
1061    T         => Option<T>,
1062    T: ?Sized => PhantomData<T>,
1063    T         => Wrapping<T>,
1064    T         => CoreMaybeUninit<T>,
1065    T: ?Sized => *const T,
1066    T: ?Sized => *mut T,
1067    T: ?Sized => &'_ T,
1068    T: ?Sized => &'_ mut T,
1069);
1070impl_known_layout!(const N: usize, T => [T; N]);
1071
1072// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1073// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1074//
1075// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1076//
1077//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1078//   `T`
1079//
1080// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1081//
1082//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1083//   `T`.
1084//
1085// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1086//
1087//   `Cell<T>` has the same in-memory representation as `T`.
1088#[allow(clippy::multiple_unsafe_ops_per_block)]
1089const _: () = unsafe {
1090    unsafe_impl_known_layout!(
1091        #[repr([u8])]
1092        str
1093    );
1094    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1095    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1096    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1097};
1098
1099// SAFETY:
1100// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1101//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1102//   - Fixed prefix size
1103//   - Alignment
1104//   - (For DSTs) trailing slice element size
1105// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1106//   require the same kind of pointer metadata, and thus it is valid to perform
1107//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1108//   preserves referent size (ie, `size_of_val_raw`).
1109const _: () = unsafe {
1110    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1111};
1112
1113// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1114// union fields being treated uniformly since they behave similarly to each
1115// other in terms of projecting validity – specifically, for a type `T` with
1116// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1117// have validity `V`. By contrast, if `T` is an enum or union type, then
1118// validity is not straightforwardly recursive in this way.
1119#[doc(hidden)]
1120pub const STRUCT_VARIANT_ID: i128 = -1;
1121#[doc(hidden)]
1122pub const UNION_VARIANT_ID: i128 = -2;
1123#[doc(hidden)]
1124pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1125
1126/// # Safety
1127///
1128/// `Self::ProjectToTag` must satisfy its safety invariant.
1129#[doc(hidden)]
1130pub unsafe trait HasTag {
1131    fn only_derive_is_allowed_to_implement_this_trait()
1132    where
1133        Self: Sized;
1134
1135    /// The type's enum tag, or `()` for non-enum types.
1136    type Tag: Immutable;
1137
1138    /// A pointer projection from `Self` to its tag.
1139    ///
1140    /// # Safety
1141    ///
1142    /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1143    /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1144    type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1145}
1146
1147/// Projects a given field from `Self`.
1148///
1149/// All implementations of `HasField` for a particular field `f` in `Self`
1150/// should use the same `Field` type; this ensures that `Field` is inferable
1151/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1152///
1153/// # Safety
1154///
1155/// A field `f` is `HasField` for `Self` if and only if:
1156///
1157/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1158///   `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1159///   `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1160///   of the enum variant in which `f` appears. Note that `Self` does not need
1161///   to actually *be* such a type – it just needs to have the same layout as
1162///   such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1163///   has the same layout as that enum.
1164/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1165///   if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1166/// - `Field` is a type with the same visibility as `f`.
1167/// - `Type` has the same type as `f`.
1168///
1169/// The caller must **not** assume that a pointer's referent being aligned
1170/// implies that calling `project` on that pointer will result in a pointer to
1171/// an aligned referent. For example, `HasField` may be implemented for
1172/// `#[repr(packed)]` structs.
1173///
1174/// The implementation of `project` must satisfy its safety post-condition.
1175#[doc(hidden)]
1176pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1177    HasTag
1178{
1179    fn only_derive_is_allowed_to_implement_this_trait()
1180    where
1181        Self: Sized;
1182
1183    /// The type of the field.
1184    type Type: ?Sized;
1185
1186    /// Projects from `slf` to the field.
1187    ///
1188    /// Users should generally not call `project` directly, and instead should
1189    /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1190    ///
1191    /// # Safety
1192    ///
1193    /// The returned pointer refers to a non-strict subset of the bytes of
1194    /// `slf`'s referent, and has the same provenance as `slf`.
1195    #[must_use]
1196    fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1197}
1198
1199/// Projects a given field from `Self`.
1200///
1201/// Implementations of this trait encode the conditions under which a field can
1202/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1203/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1204/// other words, it is a type-level function over invariants; `I` goes in,
1205/// `Self::Invariants` comes out.
1206///
1207/// # Safety
1208///
1209/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1210/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1211/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1212/// conforms to `T::Invariants`.
1213#[doc(hidden)]
1214pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1215    HasField<Field, VARIANT_ID, FIELD_ID>
1216where
1217    I: invariant::Invariants,
1218{
1219    fn only_derive_is_allowed_to_implement_this_trait()
1220    where
1221        Self: Sized;
1222
1223    /// The invariants of the projected field pointer, with respect to the
1224    /// invariants, `I`, of the containing pointer. The aliasing dimension of
1225    /// the invariants is guaranteed to remain unchanged.
1226    type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1227
1228    /// The failure mode of projection. `()` if the projection is fallible,
1229    /// otherwise [`core::convert::Infallible`].
1230    type Error;
1231
1232    /// Is the given field projectable from `ptr`?
1233    ///
1234    /// If a field with [`Self::Invariants`] is projectable from the referent,
1235    /// this function produces an `Ok(ptr)` from which the projection can be
1236    /// made; otherwise `Err`.
1237    ///
1238    /// This method must be overriden if the field's projectability depends on
1239    /// the value of the bytes in `ptr`.
1240    #[inline(always)]
1241    fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1242        trait IsInfallible {
1243            const IS_INFALLIBLE: bool;
1244        }
1245
1246        struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1247            PhantomData<(Field, I, T)>,
1248        )
1249        where
1250            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1251            I: invariant::Invariants;
1252
1253        impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1254            for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1255        where
1256            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1257            I: invariant::Invariants,
1258        {
1259            const IS_INFALLIBLE: bool = {
1260                let is_infallible = match VARIANT_ID {
1261                    // For nondestructive projections of struct and union
1262                    // fields, the projected field's satisfaction of
1263                    // `Invariants` does not depend on the value of the
1264                    // referent. This default implementation of `is_projectable`
1265                    // is non-destructive, as it does not overwrite any part of
1266                    // the referent.
1267                    crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1268                    _enum_variant => {
1269                        use crate::invariant::{Validity, ValidityKind};
1270                        match I::Validity::KIND {
1271                            // The `Uninit` and `Initialized` validity
1272                            // invariants do not depend on the enum's tag. In
1273                            // particular, we don't actually care about what
1274                            // variant is present – we can treat *any* range of
1275                            // uninitialized or initialized memory as containing
1276                            // an uninitialized or initialized instance of *any*
1277                            // type – the type itself is irrelevant.
1278                            ValidityKind::Uninit | ValidityKind::Initialized => true,
1279                            // The projectability of an enum field from an
1280                            // `AsInitialized` or `Valid` state is a dynamic
1281                            // property of its tag.
1282                            ValidityKind::AsInitialized | ValidityKind::Valid => false,
1283                        }
1284                    }
1285                };
1286                const_assert!(is_infallible);
1287                is_infallible
1288            };
1289        }
1290
1291        const_assert!(
1292            <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1293        );
1294
1295        Ok(())
1296    }
1297}
1298
1299/// Analyzes whether a type is [`FromZeros`].
1300///
1301/// This derive analyzes, at compile time, whether the annotated type satisfies
1302/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1303/// supertraits if it is sound to do so. This derive can be applied to structs,
1304/// enums, and unions; e.g.:
1305///
1306/// ```
1307/// # use zerocopy_derive::{FromZeros, Immutable};
1308/// #[derive(FromZeros)]
1309/// struct MyStruct {
1310/// # /*
1311///     ...
1312/// # */
1313/// }
1314///
1315/// #[derive(FromZeros)]
1316/// #[repr(u8)]
1317/// enum MyEnum {
1318/// #   Variant0,
1319/// # /*
1320///     ...
1321/// # */
1322/// }
1323///
1324/// #[derive(FromZeros, Immutable)]
1325/// union MyUnion {
1326/// #   variant: u8,
1327/// # /*
1328///     ...
1329/// # */
1330/// }
1331/// ```
1332///
1333/// [safety conditions]: trait@FromZeros#safety
1334///
1335/// # Analysis
1336///
1337/// *This section describes, roughly, the analysis performed by this derive to
1338/// determine whether it is sound to implement `FromZeros` for a given type.
1339/// Unless you are modifying the implementation of this derive, or attempting to
1340/// manually implement `FromZeros` for a type yourself, you don't need to read
1341/// this section.*
1342///
1343/// If a type has the following properties, then this derive can implement
1344/// `FromZeros` for that type:
1345///
1346/// - If the type is a struct, all of its fields must be `FromZeros`.
1347/// - If the type is an enum:
1348///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1349///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1350///   - It must have a variant with a discriminant/tag of `0`, and its fields
1351///     must be `FromZeros`. See [the reference] for a description of
1352///     discriminant values are specified.
1353///   - The fields of that variant must be `FromZeros`.
1354///
1355/// This analysis is subject to change. Unsafe code may *only* rely on the
1356/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1357/// implementation details of this derive.
1358///
1359/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1360///
1361/// ## Why isn't an explicit representation required for structs?
1362///
1363/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1364/// that structs are marked with `#[repr(C)]`.
1365///
1366/// Per the [Rust reference](reference),
1367///
1368/// > The representation of a type can change the padding between fields, but
1369/// > does not change the layout of the fields themselves.
1370///
1371/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1372///
1373/// Since the layout of structs only consists of padding bytes and field bytes,
1374/// a struct is soundly `FromZeros` if:
1375/// 1. its padding is soundly `FromZeros`, and
1376/// 2. its fields are soundly `FromZeros`.
1377///
1378/// The answer to the first question is always yes: padding bytes do not have
1379/// any validity constraints. A [discussion] of this question in the Unsafe Code
1380/// Guidelines Working Group concluded that it would be virtually unimaginable
1381/// for future versions of rustc to add validity constraints to padding bytes.
1382///
1383/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1384///
1385/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1386/// its fields are `FromZeros`.
1387// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1388// attribute.
1389#[cfg(any(feature = "derive", test))]
1390#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1391pub use zerocopy_derive::FromZeros;
1392/// Analyzes whether a type is [`Immutable`].
1393///
1394/// This derive analyzes, at compile time, whether the annotated type satisfies
1395/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1396/// sound to do so. This derive can be applied to structs, enums, and unions;
1397/// e.g.:
1398///
1399/// ```
1400/// # use zerocopy_derive::Immutable;
1401/// #[derive(Immutable)]
1402/// struct MyStruct {
1403/// # /*
1404///     ...
1405/// # */
1406/// }
1407///
1408/// #[derive(Immutable)]
1409/// enum MyEnum {
1410/// #   Variant0,
1411/// # /*
1412///     ...
1413/// # */
1414/// }
1415///
1416/// #[derive(Immutable)]
1417/// union MyUnion {
1418/// #   variant: u8,
1419/// # /*
1420///     ...
1421/// # */
1422/// }
1423/// ```
1424///
1425/// # Analysis
1426///
1427/// *This section describes, roughly, the analysis performed by this derive to
1428/// determine whether it is sound to implement `Immutable` for a given type.
1429/// Unless you are modifying the implementation of this derive, you don't need
1430/// to read this section.*
1431///
1432/// If a type has the following properties, then this derive can implement
1433/// `Immutable` for that type:
1434///
1435/// - All fields must be `Immutable`.
1436///
1437/// This analysis is subject to change. Unsafe code may *only* rely on the
1438/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1439/// implementation details of this derive.
1440///
1441/// [safety conditions]: trait@Immutable#safety
1442#[cfg(any(feature = "derive", test))]
1443#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1444pub use zerocopy_derive::Immutable;
1445
1446/// Types which are free from interior mutability.
1447///
1448/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1449/// by ownership or an exclusive (`&mut`) borrow.
1450///
1451/// # Implementation
1452///
1453/// **Do not implement this trait yourself!** Instead, use
1454/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1455/// e.g.:
1456///
1457/// ```
1458/// # use zerocopy_derive::Immutable;
1459/// #[derive(Immutable)]
1460/// struct MyStruct {
1461/// # /*
1462///     ...
1463/// # */
1464/// }
1465///
1466/// #[derive(Immutable)]
1467/// enum MyEnum {
1468/// # /*
1469///     ...
1470/// # */
1471/// }
1472///
1473/// #[derive(Immutable)]
1474/// union MyUnion {
1475/// #   variant: u8,
1476/// # /*
1477///     ...
1478/// # */
1479/// }
1480/// ```
1481///
1482/// This derive performs a sophisticated, compile-time safety analysis to
1483/// determine whether a type is `Immutable`.
1484///
1485/// # Safety
1486///
1487/// Unsafe code outside of this crate must not make any assumptions about `T`
1488/// based on `T: Immutable`. We reserve the right to relax the requirements for
1489/// `Immutable` in the future, and if unsafe code outside of this crate makes
1490/// assumptions based on `T: Immutable`, future relaxations may cause that code
1491/// to become unsound.
1492///
1493// # Safety (Internal)
1494//
1495// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1496// `t: &T`, `t` does not permit interior mutation of its referent. Because
1497// [`UnsafeCell`] is the only type which permits interior mutation, it is
1498// sufficient (though not necessary) to guarantee that `T` contains no
1499// `UnsafeCell`s.
1500//
1501// [`UnsafeCell`]: core::cell::UnsafeCell
1502#[cfg_attr(
1503    feature = "derive",
1504    doc = "[derive]: zerocopy_derive::Immutable",
1505    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1506)]
1507#[cfg_attr(
1508    not(feature = "derive"),
1509    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1510    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1511)]
1512#[cfg_attr(
1513    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1514    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1515)]
1516pub unsafe trait Immutable {
1517    // The `Self: Sized` bound makes it so that `Immutable` is still object
1518    // safe.
1519    #[doc(hidden)]
1520    fn only_derive_is_allowed_to_implement_this_trait()
1521    where
1522        Self: Sized;
1523}
1524
1525/// Implements [`TryFromBytes`].
1526///
1527/// This derive synthesizes the runtime checks required to check whether a
1528/// sequence of initialized bytes corresponds to a valid instance of a type.
1529/// This derive can be applied to structs, enums, and unions; e.g.:
1530///
1531/// ```
1532/// # use zerocopy_derive::{TryFromBytes, Immutable};
1533/// #[derive(TryFromBytes)]
1534/// struct MyStruct {
1535/// # /*
1536///     ...
1537/// # */
1538/// }
1539///
1540/// #[derive(TryFromBytes)]
1541/// #[repr(u8)]
1542/// enum MyEnum {
1543/// #   V00,
1544/// # /*
1545///     ...
1546/// # */
1547/// }
1548///
1549/// #[derive(TryFromBytes, Immutable)]
1550/// union MyUnion {
1551/// #   variant: u8,
1552/// # /*
1553///     ...
1554/// # */
1555/// }
1556/// ```
1557///
1558/// # Portability
1559///
1560/// To ensure consistent endianness for enums with multi-byte representations,
1561/// explicitly specify and convert each discriminant using `.to_le()` or
1562/// `.to_be()`; e.g.:
1563///
1564/// ```
1565/// # use zerocopy_derive::TryFromBytes;
1566/// // `DataStoreVersion` is encoded in little-endian.
1567/// #[derive(TryFromBytes)]
1568/// #[repr(u32)]
1569/// pub enum DataStoreVersion {
1570///     /// Version 1 of the data store.
1571///     V1 = 9u32.to_le(),
1572///
1573///     /// Version 2 of the data store.
1574///     V2 = 10u32.to_le(),
1575/// }
1576/// ```
1577///
1578/// [safety conditions]: trait@TryFromBytes#safety
1579#[cfg(any(feature = "derive", test))]
1580#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1581pub use zerocopy_derive::TryFromBytes;
1582
1583/// Types for which some bit patterns are valid.
1584///
1585/// A memory region of the appropriate length which contains initialized bytes
1586/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1587/// bytes corresponds to a [*valid instance*] of that type. For example,
1588/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1589/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1590/// `1`.
1591///
1592/// # Implementation
1593///
1594/// **Do not implement this trait yourself!** Instead, use
1595/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1596///
1597/// ```
1598/// # use zerocopy_derive::{TryFromBytes, Immutable};
1599/// #[derive(TryFromBytes)]
1600/// struct MyStruct {
1601/// # /*
1602///     ...
1603/// # */
1604/// }
1605///
1606/// #[derive(TryFromBytes)]
1607/// #[repr(u8)]
1608/// enum MyEnum {
1609/// #   V00,
1610/// # /*
1611///     ...
1612/// # */
1613/// }
1614///
1615/// #[derive(TryFromBytes, Immutable)]
1616/// union MyUnion {
1617/// #   variant: u8,
1618/// # /*
1619///     ...
1620/// # */
1621/// }
1622/// ```
1623///
1624/// This derive ensures that the runtime check of whether bytes correspond to a
1625/// valid instance is sound. You **must** implement this trait via the derive.
1626///
1627/// # What is a "valid instance"?
1628///
1629/// In Rust, each type has *bit validity*, which refers to the set of bit
1630/// patterns which may appear in an instance of that type. It is impossible for
1631/// safe Rust code to produce values which violate bit validity (ie, values
1632/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1633/// invalid value, this is considered [undefined behavior].
1634///
1635/// Rust's bit validity rules are currently being decided, which means that some
1636/// types have three classes of bit patterns: those which are definitely valid,
1637/// and whose validity is documented in the language; those which may or may not
1638/// be considered valid at some point in the future; and those which are
1639/// definitely invalid.
1640///
1641/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1642/// be valid if its validity is a documented guarantee provided by the
1643/// language.
1644///
1645/// For most use cases, Rust's current guarantees align with programmers'
1646/// intuitions about what ought to be valid. As a result, zerocopy's
1647/// conservatism should not affect most users.
1648///
1649/// If you are negatively affected by lack of support for a particular type,
1650/// we encourage you to let us know by [filing an issue][github-repo].
1651///
1652/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1653///
1654/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1655/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1656/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1657/// IntoBytes`, there exist values of `t: T` such that
1658/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1659/// generally assume that values produced by `IntoBytes` will necessarily be
1660/// accepted as valid by `TryFromBytes`.
1661///
1662/// # Safety
1663///
1664/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1665/// or representation of `T`. It merely provides the ability to perform a
1666/// validity check at runtime via methods like [`try_ref_from_bytes`].
1667///
1668/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1669/// Future releases of zerocopy may make backwards-breaking changes to these
1670/// items, including changes that only affect soundness, which may cause code
1671/// which uses those items to silently become unsound.
1672///
1673/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1674/// [github-repo]: https://github.com/google/zerocopy
1675/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1676/// [*valid instance*]: #what-is-a-valid-instance
1677#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1678#[cfg_attr(
1679    not(feature = "derive"),
1680    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1681)]
1682#[cfg_attr(
1683    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1684    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1685)]
1686pub unsafe trait TryFromBytes {
1687    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1688    // safe.
1689    #[doc(hidden)]
1690    fn only_derive_is_allowed_to_implement_this_trait()
1691    where
1692        Self: Sized;
1693
1694    /// Does a given memory range contain a valid instance of `Self`?
1695    ///
1696    /// # Safety
1697    ///
1698    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1699    /// `*candidate` contains a valid `Self`.
1700    ///
1701    /// # Panics
1702    ///
1703    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1704    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1705    /// panicking. (We support user-defined validation routines; so long as
1706    /// these routines are not required to be `unsafe`, there is no way to
1707    /// ensure that these do not generate panics.)
1708    ///
1709    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1710    /// either panic or fail to compile if called on a pointer with [`Shared`]
1711    /// aliasing when `Self: !Immutable`.
1712    ///
1713    /// [`UnsafeCell`]: core::cell::UnsafeCell
1714    /// [`Shared`]: invariant::Shared
1715    #[doc(hidden)]
1716    fn is_bit_valid(candidate: Maybe<'_, Self>) -> bool;
1717
1718    /// Attempts to interpret the given `source` as a `&Self`.
1719    ///
1720    /// If the bytes of `source` are a valid instance of `Self`, this method
1721    /// returns a reference to those bytes interpreted as a `Self`. If the
1722    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1723    /// `source` is not appropriately aligned, or if `source` is not a valid
1724    /// instance of `Self`, this returns `Err`. If [`Self:
1725    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1726    /// error][ConvertError::from].
1727    ///
1728    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1729    ///
1730    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1731    /// [self-unaligned]: Unaligned
1732    /// [slice-dst]: KnownLayout#dynamically-sized-types
1733    ///
1734    /// # Compile-Time Assertions
1735    ///
1736    /// This method cannot yet be used on unsized types whose dynamically-sized
1737    /// component is zero-sized. Attempting to use this method on such types
1738    /// results in a compile-time assertion error; e.g.:
1739    ///
1740    /// ```compile_fail,E0080
1741    /// use zerocopy::*;
1742    /// # use zerocopy_derive::*;
1743    ///
1744    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1745    /// #[repr(C)]
1746    /// struct ZSTy {
1747    ///     leading_sized: u16,
1748    ///     trailing_dst: [()],
1749    /// }
1750    ///
1751    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1752    /// ```
1753    ///
1754    /// # Examples
1755    ///
1756    /// ```
1757    /// use zerocopy::TryFromBytes;
1758    /// # use zerocopy_derive::*;
1759    ///
1760    /// // The only valid value of this type is the byte `0xC0`
1761    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1762    /// #[repr(u8)]
1763    /// enum C0 { xC0 = 0xC0 }
1764    ///
1765    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1766    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1767    /// #[repr(C)]
1768    /// struct C0C0(C0, C0);
1769    ///
1770    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1771    /// #[repr(C)]
1772    /// struct Packet {
1773    ///     magic_number: C0C0,
1774    ///     mug_size: u8,
1775    ///     temperature: u8,
1776    ///     marshmallows: [[u8; 2]],
1777    /// }
1778    ///
1779    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1780    ///
1781    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1782    ///
1783    /// assert_eq!(packet.mug_size, 240);
1784    /// assert_eq!(packet.temperature, 77);
1785    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1786    ///
1787    /// // These bytes are not valid instance of `Packet`.
1788    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1789    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1790    /// ```
1791    #[must_use = "has no side effects"]
1792    #[inline]
1793    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1794    where
1795        Self: KnownLayout + Immutable,
1796    {
1797        static_assert_dst_is_not_zst!(Self);
1798        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1799            Ok(source) => {
1800                // This call may panic. If that happens, it doesn't cause any soundness
1801                // issues, as we have not generated any invalid state which we need to
1802                // fix before returning.
1803                match source.try_into_valid() {
1804                    Ok(valid) => Ok(valid.as_ref()),
1805                    Err(e) => {
1806                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1807                    }
1808                }
1809            }
1810            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1811        }
1812    }
1813
1814    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1815    ///
1816    /// This method computes the [largest possible size of `Self`][valid-size]
1817    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1818    /// instance of `Self`, this method returns a reference to those bytes
1819    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1820    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1821    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1822    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1823    /// alignment error][ConvertError::from].
1824    ///
1825    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1826    ///
1827    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1828    /// [self-unaligned]: Unaligned
1829    /// [slice-dst]: KnownLayout#dynamically-sized-types
1830    ///
1831    /// # Compile-Time Assertions
1832    ///
1833    /// This method cannot yet be used on unsized types whose dynamically-sized
1834    /// component is zero-sized. Attempting to use this method on such types
1835    /// results in a compile-time assertion error; e.g.:
1836    ///
1837    /// ```compile_fail,E0080
1838    /// use zerocopy::*;
1839    /// # use zerocopy_derive::*;
1840    ///
1841    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1842    /// #[repr(C)]
1843    /// struct ZSTy {
1844    ///     leading_sized: u16,
1845    ///     trailing_dst: [()],
1846    /// }
1847    ///
1848    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1849    /// ```
1850    ///
1851    /// # Examples
1852    ///
1853    /// ```
1854    /// use zerocopy::TryFromBytes;
1855    /// # use zerocopy_derive::*;
1856    ///
1857    /// // The only valid value of this type is the byte `0xC0`
1858    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1859    /// #[repr(u8)]
1860    /// enum C0 { xC0 = 0xC0 }
1861    ///
1862    /// // The only valid value of this type is the bytes `0xC0C0`.
1863    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1864    /// #[repr(C)]
1865    /// struct C0C0(C0, C0);
1866    ///
1867    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1868    /// #[repr(C)]
1869    /// struct Packet {
1870    ///     magic_number: C0C0,
1871    ///     mug_size: u8,
1872    ///     temperature: u8,
1873    ///     marshmallows: [[u8; 2]],
1874    /// }
1875    ///
1876    /// // These are more bytes than are needed to encode a `Packet`.
1877    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1878    ///
1879    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1880    ///
1881    /// assert_eq!(packet.mug_size, 240);
1882    /// assert_eq!(packet.temperature, 77);
1883    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1884    /// assert_eq!(suffix, &[6u8][..]);
1885    ///
1886    /// // These bytes are not valid instance of `Packet`.
1887    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1888    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1889    /// ```
1890    #[must_use = "has no side effects"]
1891    #[inline]
1892    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1893    where
1894        Self: KnownLayout + Immutable,
1895    {
1896        static_assert_dst_is_not_zst!(Self);
1897        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1898    }
1899
1900    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1901    ///
1902    /// This method computes the [largest possible size of `Self`][valid-size]
1903    /// that can fit in the trailing bytes of `source`. If that suffix is a
1904    /// valid instance of `Self`, this method returns a reference to those bytes
1905    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1906    /// are insufficient bytes, or if the suffix of `source` would not be
1907    /// appropriately aligned, or if the suffix is not a valid instance of
1908    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1909    /// can [infallibly discard the alignment error][ConvertError::from].
1910    ///
1911    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1912    ///
1913    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1914    /// [self-unaligned]: Unaligned
1915    /// [slice-dst]: KnownLayout#dynamically-sized-types
1916    ///
1917    /// # Compile-Time Assertions
1918    ///
1919    /// This method cannot yet be used on unsized types whose dynamically-sized
1920    /// component is zero-sized. Attempting to use this method on such types
1921    /// results in a compile-time assertion error; e.g.:
1922    ///
1923    /// ```compile_fail,E0080
1924    /// use zerocopy::*;
1925    /// # use zerocopy_derive::*;
1926    ///
1927    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1928    /// #[repr(C)]
1929    /// struct ZSTy {
1930    ///     leading_sized: u16,
1931    ///     trailing_dst: [()],
1932    /// }
1933    ///
1934    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1935    /// ```
1936    ///
1937    /// # Examples
1938    ///
1939    /// ```
1940    /// use zerocopy::TryFromBytes;
1941    /// # use zerocopy_derive::*;
1942    ///
1943    /// // The only valid value of this type is the byte `0xC0`
1944    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1945    /// #[repr(u8)]
1946    /// enum C0 { xC0 = 0xC0 }
1947    ///
1948    /// // The only valid value of this type is the bytes `0xC0C0`.
1949    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1950    /// #[repr(C)]
1951    /// struct C0C0(C0, C0);
1952    ///
1953    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1954    /// #[repr(C)]
1955    /// struct Packet {
1956    ///     magic_number: C0C0,
1957    ///     mug_size: u8,
1958    ///     temperature: u8,
1959    ///     marshmallows: [[u8; 2]],
1960    /// }
1961    ///
1962    /// // These are more bytes than are needed to encode a `Packet`.
1963    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1964    ///
1965    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1966    ///
1967    /// assert_eq!(packet.mug_size, 240);
1968    /// assert_eq!(packet.temperature, 77);
1969    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1970    /// assert_eq!(prefix, &[0u8][..]);
1971    ///
1972    /// // These bytes are not valid instance of `Packet`.
1973    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1974    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1975    /// ```
1976    #[must_use = "has no side effects"]
1977    #[inline]
1978    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1979    where
1980        Self: KnownLayout + Immutable,
1981    {
1982        static_assert_dst_is_not_zst!(Self);
1983        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1984    }
1985
1986    /// Attempts to interpret the given `source` as a `&mut Self` without
1987    /// copying.
1988    ///
1989    /// If the bytes of `source` are a valid instance of `Self`, this method
1990    /// returns a reference to those bytes interpreted as a `Self`. If the
1991    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1992    /// `source` is not appropriately aligned, or if `source` is not a valid
1993    /// instance of `Self`, this returns `Err`. If [`Self:
1994    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1995    /// error][ConvertError::from].
1996    ///
1997    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1998    ///
1999    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2000    /// [self-unaligned]: Unaligned
2001    /// [slice-dst]: KnownLayout#dynamically-sized-types
2002    ///
2003    /// # Compile-Time Assertions
2004    ///
2005    /// This method cannot yet be used on unsized types whose dynamically-sized
2006    /// component is zero-sized. Attempting to use this method on such types
2007    /// results in a compile-time assertion error; e.g.:
2008    ///
2009    /// ```compile_fail,E0080
2010    /// use zerocopy::*;
2011    /// # use zerocopy_derive::*;
2012    ///
2013    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2014    /// #[repr(C, packed)]
2015    /// struct ZSTy {
2016    ///     leading_sized: [u8; 2],
2017    ///     trailing_dst: [()],
2018    /// }
2019    ///
2020    /// let mut source = [85, 85];
2021    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
2022    /// ```
2023    ///
2024    /// # Examples
2025    ///
2026    /// ```
2027    /// use zerocopy::TryFromBytes;
2028    /// # use zerocopy_derive::*;
2029    ///
2030    /// // The only valid value of this type is the byte `0xC0`
2031    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2032    /// #[repr(u8)]
2033    /// enum C0 { xC0 = 0xC0 }
2034    ///
2035    /// // The only valid value of this type is the bytes `0xC0C0`.
2036    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2037    /// #[repr(C)]
2038    /// struct C0C0(C0, C0);
2039    ///
2040    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2041    /// #[repr(C, packed)]
2042    /// struct Packet {
2043    ///     magic_number: C0C0,
2044    ///     mug_size: u8,
2045    ///     temperature: u8,
2046    ///     marshmallows: [[u8; 2]],
2047    /// }
2048    ///
2049    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2050    ///
2051    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2052    ///
2053    /// assert_eq!(packet.mug_size, 240);
2054    /// assert_eq!(packet.temperature, 77);
2055    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2056    ///
2057    /// packet.temperature = 111;
2058    ///
2059    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2060    ///
2061    /// // These bytes are not valid instance of `Packet`.
2062    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2063    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2064    /// ```
2065    #[must_use = "has no side effects"]
2066    #[inline]
2067    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2068    where
2069        Self: KnownLayout + IntoBytes,
2070    {
2071        static_assert_dst_is_not_zst!(Self);
2072        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2073            Ok(source) => {
2074                // This call may panic. If that happens, it doesn't cause any soundness
2075                // issues, as we have not generated any invalid state which we need to
2076                // fix before returning.
2077                match source.try_into_valid() {
2078                    Ok(source) => Ok(source.as_mut()),
2079                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2080                }
2081            }
2082            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2083        }
2084    }
2085
2086    /// Attempts to interpret the prefix of the given `source` as a `&mut
2087    /// Self`.
2088    ///
2089    /// This method computes the [largest possible size of `Self`][valid-size]
2090    /// that can fit in the leading bytes of `source`. If that prefix is a valid
2091    /// instance of `Self`, this method returns a reference to those bytes
2092    /// interpreted as `Self`, and a reference to the remaining bytes. If there
2093    /// are insufficient bytes, or if `source` is not appropriately aligned, or
2094    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2095    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2096    /// alignment error][ConvertError::from].
2097    ///
2098    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2099    ///
2100    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2101    /// [self-unaligned]: Unaligned
2102    /// [slice-dst]: KnownLayout#dynamically-sized-types
2103    ///
2104    /// # Compile-Time Assertions
2105    ///
2106    /// This method cannot yet be used on unsized types whose dynamically-sized
2107    /// component is zero-sized. Attempting to use this method on such types
2108    /// results in a compile-time assertion error; e.g.:
2109    ///
2110    /// ```compile_fail,E0080
2111    /// use zerocopy::*;
2112    /// # use zerocopy_derive::*;
2113    ///
2114    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2115    /// #[repr(C, packed)]
2116    /// struct ZSTy {
2117    ///     leading_sized: [u8; 2],
2118    ///     trailing_dst: [()],
2119    /// }
2120    ///
2121    /// let mut source = [85, 85];
2122    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
2123    /// ```
2124    ///
2125    /// # Examples
2126    ///
2127    /// ```
2128    /// use zerocopy::TryFromBytes;
2129    /// # use zerocopy_derive::*;
2130    ///
2131    /// // The only valid value of this type is the byte `0xC0`
2132    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2133    /// #[repr(u8)]
2134    /// enum C0 { xC0 = 0xC0 }
2135    ///
2136    /// // The only valid value of this type is the bytes `0xC0C0`.
2137    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2138    /// #[repr(C)]
2139    /// struct C0C0(C0, C0);
2140    ///
2141    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2142    /// #[repr(C, packed)]
2143    /// struct Packet {
2144    ///     magic_number: C0C0,
2145    ///     mug_size: u8,
2146    ///     temperature: u8,
2147    ///     marshmallows: [[u8; 2]],
2148    /// }
2149    ///
2150    /// // These are more bytes than are needed to encode a `Packet`.
2151    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2152    ///
2153    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2154    ///
2155    /// assert_eq!(packet.mug_size, 240);
2156    /// assert_eq!(packet.temperature, 77);
2157    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2158    /// assert_eq!(suffix, &[6u8][..]);
2159    ///
2160    /// packet.temperature = 111;
2161    /// suffix[0] = 222;
2162    ///
2163    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2164    ///
2165    /// // These bytes are not valid instance of `Packet`.
2166    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2167    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2168    /// ```
2169    #[must_use = "has no side effects"]
2170    #[inline]
2171    fn try_mut_from_prefix(
2172        source: &mut [u8],
2173    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2174    where
2175        Self: KnownLayout + IntoBytes,
2176    {
2177        static_assert_dst_is_not_zst!(Self);
2178        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2179    }
2180
2181    /// Attempts to interpret the suffix of the given `source` as a `&mut
2182    /// Self`.
2183    ///
2184    /// This method computes the [largest possible size of `Self`][valid-size]
2185    /// that can fit in the trailing bytes of `source`. If that suffix is a
2186    /// valid instance of `Self`, this method returns a reference to those bytes
2187    /// interpreted as `Self`, and a reference to the preceding bytes. If there
2188    /// are insufficient bytes, or if the suffix of `source` would not be
2189    /// appropriately aligned, or if the suffix is not a valid instance of
2190    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2191    /// can [infallibly discard the alignment error][ConvertError::from].
2192    ///
2193    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2194    ///
2195    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2196    /// [self-unaligned]: Unaligned
2197    /// [slice-dst]: KnownLayout#dynamically-sized-types
2198    ///
2199    /// # Compile-Time Assertions
2200    ///
2201    /// This method cannot yet be used on unsized types whose dynamically-sized
2202    /// component is zero-sized. Attempting to use this method on such types
2203    /// results in a compile-time assertion error; e.g.:
2204    ///
2205    /// ```compile_fail,E0080
2206    /// use zerocopy::*;
2207    /// # use zerocopy_derive::*;
2208    ///
2209    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2210    /// #[repr(C, packed)]
2211    /// struct ZSTy {
2212    ///     leading_sized: u16,
2213    ///     trailing_dst: [()],
2214    /// }
2215    ///
2216    /// let mut source = [85, 85];
2217    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2218    /// ```
2219    ///
2220    /// # Examples
2221    ///
2222    /// ```
2223    /// use zerocopy::TryFromBytes;
2224    /// # use zerocopy_derive::*;
2225    ///
2226    /// // The only valid value of this type is the byte `0xC0`
2227    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2228    /// #[repr(u8)]
2229    /// enum C0 { xC0 = 0xC0 }
2230    ///
2231    /// // The only valid value of this type is the bytes `0xC0C0`.
2232    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2233    /// #[repr(C)]
2234    /// struct C0C0(C0, C0);
2235    ///
2236    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2237    /// #[repr(C, packed)]
2238    /// struct Packet {
2239    ///     magic_number: C0C0,
2240    ///     mug_size: u8,
2241    ///     temperature: u8,
2242    ///     marshmallows: [[u8; 2]],
2243    /// }
2244    ///
2245    /// // These are more bytes than are needed to encode a `Packet`.
2246    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2247    ///
2248    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2249    ///
2250    /// assert_eq!(packet.mug_size, 240);
2251    /// assert_eq!(packet.temperature, 77);
2252    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2253    /// assert_eq!(prefix, &[0u8][..]);
2254    ///
2255    /// prefix[0] = 111;
2256    /// packet.temperature = 222;
2257    ///
2258    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2259    ///
2260    /// // These bytes are not valid instance of `Packet`.
2261    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2262    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2263    /// ```
2264    #[must_use = "has no side effects"]
2265    #[inline]
2266    fn try_mut_from_suffix(
2267        source: &mut [u8],
2268    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2269    where
2270        Self: KnownLayout + IntoBytes,
2271    {
2272        static_assert_dst_is_not_zst!(Self);
2273        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2274    }
2275
2276    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2277    /// equal to `count`.
2278    ///
2279    /// This method attempts to return a reference to `source` interpreted as a
2280    /// `Self` with `count` trailing elements. If the length of `source` is not
2281    /// equal to the size of `Self` with `count` elements, if `source` is not
2282    /// appropriately aligned, or if `source` does not contain a valid instance
2283    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2284    /// you can [infallibly discard the alignment error][ConvertError::from].
2285    ///
2286    /// [self-unaligned]: Unaligned
2287    /// [slice-dst]: KnownLayout#dynamically-sized-types
2288    ///
2289    /// # Examples
2290    ///
2291    /// ```
2292    /// # #![allow(non_camel_case_types)] // For C0::xC0
2293    /// use zerocopy::TryFromBytes;
2294    /// # use zerocopy_derive::*;
2295    ///
2296    /// // The only valid value of this type is the byte `0xC0`
2297    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2298    /// #[repr(u8)]
2299    /// enum C0 { xC0 = 0xC0 }
2300    ///
2301    /// // The only valid value of this type is the bytes `0xC0C0`.
2302    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2303    /// #[repr(C)]
2304    /// struct C0C0(C0, C0);
2305    ///
2306    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2307    /// #[repr(C)]
2308    /// struct Packet {
2309    ///     magic_number: C0C0,
2310    ///     mug_size: u8,
2311    ///     temperature: u8,
2312    ///     marshmallows: [[u8; 2]],
2313    /// }
2314    ///
2315    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2316    ///
2317    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2318    ///
2319    /// assert_eq!(packet.mug_size, 240);
2320    /// assert_eq!(packet.temperature, 77);
2321    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2322    ///
2323    /// // These bytes are not valid instance of `Packet`.
2324    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2325    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2326    /// ```
2327    ///
2328    /// Since an explicit `count` is provided, this method supports types with
2329    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2330    /// which do not take an explicit count do not support such types.
2331    ///
2332    /// ```
2333    /// use core::num::NonZeroU16;
2334    /// use zerocopy::*;
2335    /// # use zerocopy_derive::*;
2336    ///
2337    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2338    /// #[repr(C)]
2339    /// struct ZSTy {
2340    ///     leading_sized: NonZeroU16,
2341    ///     trailing_dst: [()],
2342    /// }
2343    ///
2344    /// let src = 0xCAFEu16.as_bytes();
2345    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2346    /// assert_eq!(zsty.trailing_dst.len(), 42);
2347    /// ```
2348    ///
2349    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2350    #[must_use = "has no side effects"]
2351    #[inline]
2352    fn try_ref_from_bytes_with_elems(
2353        source: &[u8],
2354        count: usize,
2355    ) -> Result<&Self, TryCastError<&[u8], Self>>
2356    where
2357        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2358    {
2359        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2360        {
2361            Ok(source) => {
2362                // This call may panic. If that happens, it doesn't cause any soundness
2363                // issues, as we have not generated any invalid state which we need to
2364                // fix before returning.
2365                match source.try_into_valid() {
2366                    Ok(source) => Ok(source.as_ref()),
2367                    Err(e) => {
2368                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2369                    }
2370                }
2371            }
2372            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2373        }
2374    }
2375
2376    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2377    /// a DST length equal to `count`.
2378    ///
2379    /// This method attempts to return a reference to the prefix of `source`
2380    /// interpreted as a `Self` with `count` trailing elements, and a reference
2381    /// to the remaining bytes. If the length of `source` is less than the size
2382    /// of `Self` with `count` elements, if `source` is not appropriately
2383    /// aligned, or if the prefix of `source` does not contain a valid instance
2384    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2385    /// you can [infallibly discard the alignment error][ConvertError::from].
2386    ///
2387    /// [self-unaligned]: Unaligned
2388    /// [slice-dst]: KnownLayout#dynamically-sized-types
2389    ///
2390    /// # Examples
2391    ///
2392    /// ```
2393    /// # #![allow(non_camel_case_types)] // For C0::xC0
2394    /// use zerocopy::TryFromBytes;
2395    /// # use zerocopy_derive::*;
2396    ///
2397    /// // The only valid value of this type is the byte `0xC0`
2398    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2399    /// #[repr(u8)]
2400    /// enum C0 { xC0 = 0xC0 }
2401    ///
2402    /// // The only valid value of this type is the bytes `0xC0C0`.
2403    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2404    /// #[repr(C)]
2405    /// struct C0C0(C0, C0);
2406    ///
2407    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2408    /// #[repr(C)]
2409    /// struct Packet {
2410    ///     magic_number: C0C0,
2411    ///     mug_size: u8,
2412    ///     temperature: u8,
2413    ///     marshmallows: [[u8; 2]],
2414    /// }
2415    ///
2416    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2417    ///
2418    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2419    ///
2420    /// assert_eq!(packet.mug_size, 240);
2421    /// assert_eq!(packet.temperature, 77);
2422    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2423    /// assert_eq!(suffix, &[8u8][..]);
2424    ///
2425    /// // These bytes are not valid instance of `Packet`.
2426    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2427    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2428    /// ```
2429    ///
2430    /// Since an explicit `count` is provided, this method supports types with
2431    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2432    /// which do not take an explicit count do not support such types.
2433    ///
2434    /// ```
2435    /// use core::num::NonZeroU16;
2436    /// use zerocopy::*;
2437    /// # use zerocopy_derive::*;
2438    ///
2439    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2440    /// #[repr(C)]
2441    /// struct ZSTy {
2442    ///     leading_sized: NonZeroU16,
2443    ///     trailing_dst: [()],
2444    /// }
2445    ///
2446    /// let src = 0xCAFEu16.as_bytes();
2447    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2448    /// assert_eq!(zsty.trailing_dst.len(), 42);
2449    /// ```
2450    ///
2451    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2452    #[must_use = "has no side effects"]
2453    #[inline]
2454    fn try_ref_from_prefix_with_elems(
2455        source: &[u8],
2456        count: usize,
2457    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2458    where
2459        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2460    {
2461        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2462    }
2463
2464    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2465    /// a DST length equal to `count`.
2466    ///
2467    /// This method attempts to return a reference to the suffix of `source`
2468    /// interpreted as a `Self` with `count` trailing elements, and a reference
2469    /// to the preceding bytes. If the length of `source` is less than the size
2470    /// of `Self` with `count` elements, if the suffix of `source` is not
2471    /// appropriately aligned, or if the suffix of `source` does not contain a
2472    /// valid instance of `Self`, this returns `Err`. If [`Self:
2473    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2474    /// error][ConvertError::from].
2475    ///
2476    /// [self-unaligned]: Unaligned
2477    /// [slice-dst]: KnownLayout#dynamically-sized-types
2478    ///
2479    /// # Examples
2480    ///
2481    /// ```
2482    /// # #![allow(non_camel_case_types)] // For C0::xC0
2483    /// use zerocopy::TryFromBytes;
2484    /// # use zerocopy_derive::*;
2485    ///
2486    /// // The only valid value of this type is the byte `0xC0`
2487    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2488    /// #[repr(u8)]
2489    /// enum C0 { xC0 = 0xC0 }
2490    ///
2491    /// // The only valid value of this type is the bytes `0xC0C0`.
2492    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2493    /// #[repr(C)]
2494    /// struct C0C0(C0, C0);
2495    ///
2496    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2497    /// #[repr(C)]
2498    /// struct Packet {
2499    ///     magic_number: C0C0,
2500    ///     mug_size: u8,
2501    ///     temperature: u8,
2502    ///     marshmallows: [[u8; 2]],
2503    /// }
2504    ///
2505    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2506    ///
2507    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2508    ///
2509    /// assert_eq!(packet.mug_size, 240);
2510    /// assert_eq!(packet.temperature, 77);
2511    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2512    /// assert_eq!(prefix, &[123u8][..]);
2513    ///
2514    /// // These bytes are not valid instance of `Packet`.
2515    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2516    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2517    /// ```
2518    ///
2519    /// Since an explicit `count` is provided, this method supports types with
2520    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2521    /// which do not take an explicit count do not support such types.
2522    ///
2523    /// ```
2524    /// use core::num::NonZeroU16;
2525    /// use zerocopy::*;
2526    /// # use zerocopy_derive::*;
2527    ///
2528    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2529    /// #[repr(C)]
2530    /// struct ZSTy {
2531    ///     leading_sized: NonZeroU16,
2532    ///     trailing_dst: [()],
2533    /// }
2534    ///
2535    /// let src = 0xCAFEu16.as_bytes();
2536    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2537    /// assert_eq!(zsty.trailing_dst.len(), 42);
2538    /// ```
2539    ///
2540    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2541    #[must_use = "has no side effects"]
2542    #[inline]
2543    fn try_ref_from_suffix_with_elems(
2544        source: &[u8],
2545        count: usize,
2546    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2547    where
2548        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2549    {
2550        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2551    }
2552
2553    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2554    /// length equal to `count`.
2555    ///
2556    /// This method attempts to return a reference to `source` interpreted as a
2557    /// `Self` with `count` trailing elements. If the length of `source` is not
2558    /// equal to the size of `Self` with `count` elements, if `source` is not
2559    /// appropriately aligned, or if `source` does not contain a valid instance
2560    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2561    /// you can [infallibly discard the alignment error][ConvertError::from].
2562    ///
2563    /// [self-unaligned]: Unaligned
2564    /// [slice-dst]: KnownLayout#dynamically-sized-types
2565    ///
2566    /// # Examples
2567    ///
2568    /// ```
2569    /// # #![allow(non_camel_case_types)] // For C0::xC0
2570    /// use zerocopy::TryFromBytes;
2571    /// # use zerocopy_derive::*;
2572    ///
2573    /// // The only valid value of this type is the byte `0xC0`
2574    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2575    /// #[repr(u8)]
2576    /// enum C0 { xC0 = 0xC0 }
2577    ///
2578    /// // The only valid value of this type is the bytes `0xC0C0`.
2579    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2580    /// #[repr(C)]
2581    /// struct C0C0(C0, C0);
2582    ///
2583    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2584    /// #[repr(C, packed)]
2585    /// struct Packet {
2586    ///     magic_number: C0C0,
2587    ///     mug_size: u8,
2588    ///     temperature: u8,
2589    ///     marshmallows: [[u8; 2]],
2590    /// }
2591    ///
2592    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2593    ///
2594    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2595    ///
2596    /// assert_eq!(packet.mug_size, 240);
2597    /// assert_eq!(packet.temperature, 77);
2598    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2599    ///
2600    /// packet.temperature = 111;
2601    ///
2602    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2603    ///
2604    /// // These bytes are not valid instance of `Packet`.
2605    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2606    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2607    /// ```
2608    ///
2609    /// Since an explicit `count` is provided, this method supports types with
2610    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2611    /// which do not take an explicit count do not support such types.
2612    ///
2613    /// ```
2614    /// use core::num::NonZeroU16;
2615    /// use zerocopy::*;
2616    /// # use zerocopy_derive::*;
2617    ///
2618    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2619    /// #[repr(C, packed)]
2620    /// struct ZSTy {
2621    ///     leading_sized: NonZeroU16,
2622    ///     trailing_dst: [()],
2623    /// }
2624    ///
2625    /// let mut src = 0xCAFEu16;
2626    /// let src = src.as_mut_bytes();
2627    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2628    /// assert_eq!(zsty.trailing_dst.len(), 42);
2629    /// ```
2630    ///
2631    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2632    #[must_use = "has no side effects"]
2633    #[inline]
2634    fn try_mut_from_bytes_with_elems(
2635        source: &mut [u8],
2636        count: usize,
2637    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2638    where
2639        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2640    {
2641        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2642        {
2643            Ok(source) => {
2644                // This call may panic. If that happens, it doesn't cause any soundness
2645                // issues, as we have not generated any invalid state which we need to
2646                // fix before returning.
2647                match source.try_into_valid() {
2648                    Ok(source) => Ok(source.as_mut()),
2649                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2650                }
2651            }
2652            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2653        }
2654    }
2655
2656    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2657    /// with a DST length equal to `count`.
2658    ///
2659    /// This method attempts to return a reference to the prefix of `source`
2660    /// interpreted as a `Self` with `count` trailing elements, and a reference
2661    /// to the remaining bytes. If the length of `source` is less than the size
2662    /// of `Self` with `count` elements, if `source` is not appropriately
2663    /// aligned, or if the prefix of `source` does not contain a valid instance
2664    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2665    /// you can [infallibly discard the alignment error][ConvertError::from].
2666    ///
2667    /// [self-unaligned]: Unaligned
2668    /// [slice-dst]: KnownLayout#dynamically-sized-types
2669    ///
2670    /// # Examples
2671    ///
2672    /// ```
2673    /// # #![allow(non_camel_case_types)] // For C0::xC0
2674    /// use zerocopy::TryFromBytes;
2675    /// # use zerocopy_derive::*;
2676    ///
2677    /// // The only valid value of this type is the byte `0xC0`
2678    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2679    /// #[repr(u8)]
2680    /// enum C0 { xC0 = 0xC0 }
2681    ///
2682    /// // The only valid value of this type is the bytes `0xC0C0`.
2683    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2684    /// #[repr(C)]
2685    /// struct C0C0(C0, C0);
2686    ///
2687    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2688    /// #[repr(C, packed)]
2689    /// struct Packet {
2690    ///     magic_number: C0C0,
2691    ///     mug_size: u8,
2692    ///     temperature: u8,
2693    ///     marshmallows: [[u8; 2]],
2694    /// }
2695    ///
2696    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2697    ///
2698    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2699    ///
2700    /// assert_eq!(packet.mug_size, 240);
2701    /// assert_eq!(packet.temperature, 77);
2702    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2703    /// assert_eq!(suffix, &[8u8][..]);
2704    ///
2705    /// packet.temperature = 111;
2706    /// suffix[0] = 222;
2707    ///
2708    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2709    ///
2710    /// // These bytes are not valid instance of `Packet`.
2711    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2712    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2713    /// ```
2714    ///
2715    /// Since an explicit `count` is provided, this method supports types with
2716    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2717    /// which do not take an explicit count do not support such types.
2718    ///
2719    /// ```
2720    /// use core::num::NonZeroU16;
2721    /// use zerocopy::*;
2722    /// # use zerocopy_derive::*;
2723    ///
2724    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2725    /// #[repr(C, packed)]
2726    /// struct ZSTy {
2727    ///     leading_sized: NonZeroU16,
2728    ///     trailing_dst: [()],
2729    /// }
2730    ///
2731    /// let mut src = 0xCAFEu16;
2732    /// let src = src.as_mut_bytes();
2733    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2734    /// assert_eq!(zsty.trailing_dst.len(), 42);
2735    /// ```
2736    ///
2737    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2738    #[must_use = "has no side effects"]
2739    #[inline]
2740    fn try_mut_from_prefix_with_elems(
2741        source: &mut [u8],
2742        count: usize,
2743    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2744    where
2745        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2746    {
2747        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2748    }
2749
2750    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2751    /// with a DST length equal to `count`.
2752    ///
2753    /// This method attempts to return a reference to the suffix of `source`
2754    /// interpreted as a `Self` with `count` trailing elements, and a reference
2755    /// to the preceding bytes. If the length of `source` is less than the size
2756    /// of `Self` with `count` elements, if the suffix of `source` is not
2757    /// appropriately aligned, or if the suffix of `source` does not contain a
2758    /// valid instance of `Self`, this returns `Err`. If [`Self:
2759    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2760    /// error][ConvertError::from].
2761    ///
2762    /// [self-unaligned]: Unaligned
2763    /// [slice-dst]: KnownLayout#dynamically-sized-types
2764    ///
2765    /// # Examples
2766    ///
2767    /// ```
2768    /// # #![allow(non_camel_case_types)] // For C0::xC0
2769    /// use zerocopy::TryFromBytes;
2770    /// # use zerocopy_derive::*;
2771    ///
2772    /// // The only valid value of this type is the byte `0xC0`
2773    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2774    /// #[repr(u8)]
2775    /// enum C0 { xC0 = 0xC0 }
2776    ///
2777    /// // The only valid value of this type is the bytes `0xC0C0`.
2778    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2779    /// #[repr(C)]
2780    /// struct C0C0(C0, C0);
2781    ///
2782    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2783    /// #[repr(C, packed)]
2784    /// struct Packet {
2785    ///     magic_number: C0C0,
2786    ///     mug_size: u8,
2787    ///     temperature: u8,
2788    ///     marshmallows: [[u8; 2]],
2789    /// }
2790    ///
2791    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2792    ///
2793    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2794    ///
2795    /// assert_eq!(packet.mug_size, 240);
2796    /// assert_eq!(packet.temperature, 77);
2797    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2798    /// assert_eq!(prefix, &[123u8][..]);
2799    ///
2800    /// prefix[0] = 111;
2801    /// packet.temperature = 222;
2802    ///
2803    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2804    ///
2805    /// // These bytes are not valid instance of `Packet`.
2806    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2807    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2808    /// ```
2809    ///
2810    /// Since an explicit `count` is provided, this method supports types with
2811    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2812    /// which do not take an explicit count do not support such types.
2813    ///
2814    /// ```
2815    /// use core::num::NonZeroU16;
2816    /// use zerocopy::*;
2817    /// # use zerocopy_derive::*;
2818    ///
2819    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2820    /// #[repr(C, packed)]
2821    /// struct ZSTy {
2822    ///     leading_sized: NonZeroU16,
2823    ///     trailing_dst: [()],
2824    /// }
2825    ///
2826    /// let mut src = 0xCAFEu16;
2827    /// let src = src.as_mut_bytes();
2828    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2829    /// assert_eq!(zsty.trailing_dst.len(), 42);
2830    /// ```
2831    ///
2832    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2833    #[must_use = "has no side effects"]
2834    #[inline]
2835    fn try_mut_from_suffix_with_elems(
2836        source: &mut [u8],
2837        count: usize,
2838    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2839    where
2840        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2841    {
2842        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2843    }
2844
2845    /// Attempts to read the given `source` as a `Self`.
2846    ///
2847    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2848    /// instance of `Self`, this returns `Err`.
2849    ///
2850    /// # Examples
2851    ///
2852    /// ```
2853    /// use zerocopy::TryFromBytes;
2854    /// # use zerocopy_derive::*;
2855    ///
2856    /// // The only valid value of this type is the byte `0xC0`
2857    /// #[derive(TryFromBytes)]
2858    /// #[repr(u8)]
2859    /// enum C0 { xC0 = 0xC0 }
2860    ///
2861    /// // The only valid value of this type is the bytes `0xC0C0`.
2862    /// #[derive(TryFromBytes)]
2863    /// #[repr(C)]
2864    /// struct C0C0(C0, C0);
2865    ///
2866    /// #[derive(TryFromBytes)]
2867    /// #[repr(C)]
2868    /// struct Packet {
2869    ///     magic_number: C0C0,
2870    ///     mug_size: u8,
2871    ///     temperature: u8,
2872    /// }
2873    ///
2874    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2875    ///
2876    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2877    ///
2878    /// assert_eq!(packet.mug_size, 240);
2879    /// assert_eq!(packet.temperature, 77);
2880    ///
2881    /// // These bytes are not valid instance of `Packet`.
2882    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2883    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2884    /// ```
2885    #[must_use = "has no side effects"]
2886    #[inline]
2887    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2888    where
2889        Self: Sized,
2890    {
2891        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2892            Ok(candidate) => candidate,
2893            Err(e) => {
2894                return Err(TryReadError::Size(e.with_dst()));
2895            }
2896        };
2897        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2898        // its bytes are initialized.
2899        unsafe { try_read_from(source, candidate) }
2900    }
2901
2902    /// Attempts to read a `Self` from the prefix of the given `source`.
2903    ///
2904    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2905    /// of `source`, returning that `Self` and any remaining bytes. If
2906    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2907    /// of `Self`, it returns `Err`.
2908    ///
2909    /// # Examples
2910    ///
2911    /// ```
2912    /// use zerocopy::TryFromBytes;
2913    /// # use zerocopy_derive::*;
2914    ///
2915    /// // The only valid value of this type is the byte `0xC0`
2916    /// #[derive(TryFromBytes)]
2917    /// #[repr(u8)]
2918    /// enum C0 { xC0 = 0xC0 }
2919    ///
2920    /// // The only valid value of this type is the bytes `0xC0C0`.
2921    /// #[derive(TryFromBytes)]
2922    /// #[repr(C)]
2923    /// struct C0C0(C0, C0);
2924    ///
2925    /// #[derive(TryFromBytes)]
2926    /// #[repr(C)]
2927    /// struct Packet {
2928    ///     magic_number: C0C0,
2929    ///     mug_size: u8,
2930    ///     temperature: u8,
2931    /// }
2932    ///
2933    /// // These are more bytes than are needed to encode a `Packet`.
2934    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2935    ///
2936    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2937    ///
2938    /// assert_eq!(packet.mug_size, 240);
2939    /// assert_eq!(packet.temperature, 77);
2940    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2941    ///
2942    /// // These bytes are not valid instance of `Packet`.
2943    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2944    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2945    /// ```
2946    #[must_use = "has no side effects"]
2947    #[inline]
2948    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2949    where
2950        Self: Sized,
2951    {
2952        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2953            Ok(candidate) => candidate,
2954            Err(e) => {
2955                return Err(TryReadError::Size(e.with_dst()));
2956            }
2957        };
2958        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2959        // its bytes are initialized.
2960        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2961    }
2962
2963    /// Attempts to read a `Self` from the suffix of the given `source`.
2964    ///
2965    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2966    /// of `source`, returning that `Self` and any preceding bytes. If
2967    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2968    /// of `Self`, it returns `Err`.
2969    ///
2970    /// # Examples
2971    ///
2972    /// ```
2973    /// # #![allow(non_camel_case_types)] // For C0::xC0
2974    /// use zerocopy::TryFromBytes;
2975    /// # use zerocopy_derive::*;
2976    ///
2977    /// // The only valid value of this type is the byte `0xC0`
2978    /// #[derive(TryFromBytes)]
2979    /// #[repr(u8)]
2980    /// enum C0 { xC0 = 0xC0 }
2981    ///
2982    /// // The only valid value of this type is the bytes `0xC0C0`.
2983    /// #[derive(TryFromBytes)]
2984    /// #[repr(C)]
2985    /// struct C0C0(C0, C0);
2986    ///
2987    /// #[derive(TryFromBytes)]
2988    /// #[repr(C)]
2989    /// struct Packet {
2990    ///     magic_number: C0C0,
2991    ///     mug_size: u8,
2992    ///     temperature: u8,
2993    /// }
2994    ///
2995    /// // These are more bytes than are needed to encode a `Packet`.
2996    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2997    ///
2998    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2999    ///
3000    /// assert_eq!(packet.mug_size, 240);
3001    /// assert_eq!(packet.temperature, 77);
3002    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3003    ///
3004    /// // These bytes are not valid instance of `Packet`.
3005    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
3006    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
3007    /// ```
3008    #[must_use = "has no side effects"]
3009    #[inline]
3010    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
3011    where
3012        Self: Sized,
3013    {
3014        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
3015            Ok(candidate) => candidate,
3016            Err(e) => {
3017                return Err(TryReadError::Size(e.with_dst()));
3018            }
3019        };
3020        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3021        // its bytes are initialized.
3022        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3023    }
3024}
3025
3026#[inline(always)]
3027fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3028    source: &[u8],
3029    cast_type: CastType,
3030    meta: Option<T::PointerMetadata>,
3031) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3032    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3033        Ok((source, prefix_suffix)) => {
3034            // This call may panic. If that happens, it doesn't cause any soundness
3035            // issues, as we have not generated any invalid state which we need to
3036            // fix before returning.
3037            match source.try_into_valid() {
3038                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3039                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3040            }
3041        }
3042        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3043    }
3044}
3045
3046#[inline(always)]
3047fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3048    candidate: &mut [u8],
3049    cast_type: CastType,
3050    meta: Option<T::PointerMetadata>,
3051) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3052    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3053        Ok((candidate, prefix_suffix)) => {
3054            // This call may panic. If that happens, it doesn't cause any soundness
3055            // issues, as we have not generated any invalid state which we need to
3056            // fix before returning.
3057            match candidate.try_into_valid() {
3058                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3059                Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3060            }
3061        }
3062        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3063    }
3064}
3065
3066#[inline(always)]
3067fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3068    (u, t)
3069}
3070
3071/// # Safety
3072///
3073/// All bytes of `candidate` must be initialized.
3074#[inline(always)]
3075unsafe fn try_read_from<S, T: TryFromBytes>(
3076    source: S,
3077    mut candidate: CoreMaybeUninit<T>,
3078) -> Result<T, TryReadError<S, T>> {
3079    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3080    // to add a `T: Immutable` bound.
3081    let c_ptr = Ptr::from_mut(&mut candidate);
3082    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3083    // `candidate`, which the caller promises is entirely initialized. Since
3084    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3085    // no values written to an `Initialized` `c_ptr` can violate its validity.
3086    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3087    // via `c_ptr` so long as it is live, so we don't need to worry about the
3088    // fact that `c_ptr` may have more restricted validity than `candidate`.
3089    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3090    let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3091
3092    // Since we don't have `T: KnownLayout`, we hack around that by using
3093    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3094    //
3095    // This call may panic. If that happens, it doesn't cause any soundness
3096    // issues, as we have not generated any invalid state which we need to fix
3097    // before returning.
3098    if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3099        return Err(ValidityError::new(source).into());
3100    }
3101
3102    fn _assert_same_size_and_validity<T>()
3103    where
3104        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3105        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3106    {
3107    }
3108
3109    _assert_same_size_and_validity::<T>();
3110
3111    // SAFETY: We just validated that `candidate` contains a valid
3112    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3113    // guaranteed by the preceding type assertion.
3114    Ok(unsafe { candidate.assume_init() })
3115}
3116
3117/// Types for which a sequence of `0` bytes is a valid instance.
3118///
3119/// Any memory region of the appropriate length which is guaranteed to contain
3120/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3121/// overhead. This is useful whenever memory is known to be in a zeroed state,
3122/// such memory returned from some allocation routines.
3123///
3124/// # Warning: Padding bytes
3125///
3126/// Note that, when a value is moved or copied, only the non-padding bytes of
3127/// that value are guaranteed to be preserved. It is unsound to assume that
3128/// values written to padding bytes are preserved after a move or copy. For more
3129/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3130///
3131/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3132///
3133/// # Implementation
3134///
3135/// **Do not implement this trait yourself!** Instead, use
3136/// [`#[derive(FromZeros)]`][derive]; e.g.:
3137///
3138/// ```
3139/// # use zerocopy_derive::{FromZeros, Immutable};
3140/// #[derive(FromZeros)]
3141/// struct MyStruct {
3142/// # /*
3143///     ...
3144/// # */
3145/// }
3146///
3147/// #[derive(FromZeros)]
3148/// #[repr(u8)]
3149/// enum MyEnum {
3150/// #   Variant0,
3151/// # /*
3152///     ...
3153/// # */
3154/// }
3155///
3156/// #[derive(FromZeros, Immutable)]
3157/// union MyUnion {
3158/// #   variant: u8,
3159/// # /*
3160///     ...
3161/// # */
3162/// }
3163/// ```
3164///
3165/// This derive performs a sophisticated, compile-time safety analysis to
3166/// determine whether a type is `FromZeros`.
3167///
3168/// # Safety
3169///
3170/// *This section describes what is required in order for `T: FromZeros`, and
3171/// what unsafe code may assume of such types. If you don't plan on implementing
3172/// `FromZeros` manually, and you don't plan on writing unsafe code that
3173/// operates on `FromZeros` types, then you don't need to read this section.*
3174///
3175/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3176/// `T` whose bytes are all initialized to zero. If a type is marked as
3177/// `FromZeros` which violates this contract, it may cause undefined behavior.
3178///
3179/// `#[derive(FromZeros)]` only permits [types which satisfy these
3180/// requirements][derive-analysis].
3181///
3182#[cfg_attr(
3183    feature = "derive",
3184    doc = "[derive]: zerocopy_derive::FromZeros",
3185    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3186)]
3187#[cfg_attr(
3188    not(feature = "derive"),
3189    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3190    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3191)]
3192#[cfg_attr(
3193    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3194    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3195)]
3196pub unsafe trait FromZeros: TryFromBytes {
3197    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3198    // safe.
3199    #[doc(hidden)]
3200    fn only_derive_is_allowed_to_implement_this_trait()
3201    where
3202        Self: Sized;
3203
3204    /// Overwrites `self` with zeros.
3205    ///
3206    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3207    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3208    /// drop the current value and replace it with a new one — it simply
3209    /// modifies the bytes of the existing value.
3210    ///
3211    /// # Examples
3212    ///
3213    /// ```
3214    /// # use zerocopy::FromZeros;
3215    /// # use zerocopy_derive::*;
3216    /// #
3217    /// #[derive(FromZeros)]
3218    /// #[repr(C)]
3219    /// struct PacketHeader {
3220    ///     src_port: [u8; 2],
3221    ///     dst_port: [u8; 2],
3222    ///     length: [u8; 2],
3223    ///     checksum: [u8; 2],
3224    /// }
3225    ///
3226    /// let mut header = PacketHeader {
3227    ///     src_port: 100u16.to_be_bytes(),
3228    ///     dst_port: 200u16.to_be_bytes(),
3229    ///     length: 300u16.to_be_bytes(),
3230    ///     checksum: 400u16.to_be_bytes(),
3231    /// };
3232    ///
3233    /// header.zero();
3234    ///
3235    /// assert_eq!(header.src_port, [0, 0]);
3236    /// assert_eq!(header.dst_port, [0, 0]);
3237    /// assert_eq!(header.length, [0, 0]);
3238    /// assert_eq!(header.checksum, [0, 0]);
3239    /// ```
3240    #[inline(always)]
3241    fn zero(&mut self) {
3242        let slf: *mut Self = self;
3243        let len = mem::size_of_val(self);
3244        // SAFETY:
3245        // - `self` is guaranteed by the type system to be valid for writes of
3246        //   size `size_of_val(self)`.
3247        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3248        //   as required by `u8`.
3249        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3250        //   of `Self.`
3251        //
3252        // FIXME(#429): Add references to docs and quotes.
3253        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3254    }
3255
3256    /// Creates an instance of `Self` from zeroed bytes.
3257    ///
3258    /// # Examples
3259    ///
3260    /// ```
3261    /// # use zerocopy::FromZeros;
3262    /// # use zerocopy_derive::*;
3263    /// #
3264    /// #[derive(FromZeros)]
3265    /// #[repr(C)]
3266    /// struct PacketHeader {
3267    ///     src_port: [u8; 2],
3268    ///     dst_port: [u8; 2],
3269    ///     length: [u8; 2],
3270    ///     checksum: [u8; 2],
3271    /// }
3272    ///
3273    /// let header: PacketHeader = FromZeros::new_zeroed();
3274    ///
3275    /// assert_eq!(header.src_port, [0, 0]);
3276    /// assert_eq!(header.dst_port, [0, 0]);
3277    /// assert_eq!(header.length, [0, 0]);
3278    /// assert_eq!(header.checksum, [0, 0]);
3279    /// ```
3280    #[must_use = "has no side effects"]
3281    #[inline(always)]
3282    fn new_zeroed() -> Self
3283    where
3284        Self: Sized,
3285    {
3286        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3287        unsafe { mem::zeroed() }
3288    }
3289
3290    /// Creates a `Box<Self>` from zeroed bytes.
3291    ///
3292    /// This function is useful for allocating large values on the heap and
3293    /// zero-initializing them, without ever creating a temporary instance of
3294    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3295    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3296    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3297    ///
3298    /// On systems that use a heap implementation that supports allocating from
3299    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3300    /// have performance benefits.
3301    ///
3302    /// # Errors
3303    ///
3304    /// Returns an error on allocation failure. Allocation failure is guaranteed
3305    /// never to cause a panic or an abort.
3306    #[must_use = "has no side effects (other than allocation)"]
3307    #[cfg(any(feature = "alloc", test))]
3308    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3309    #[inline]
3310    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3311    where
3312        Self: Sized,
3313    {
3314        // If `T` is a ZST, then return a proper boxed instance of it. There is
3315        // no allocation, but `Box` does require a correct dangling pointer.
3316        let layout = Layout::new::<Self>();
3317        if layout.size() == 0 {
3318            // Construct the `Box` from a dangling pointer to avoid calling
3319            // `Self::new_zeroed`. This ensures that stack space is never
3320            // allocated for `Self` even on lower opt-levels where this branch
3321            // might not get optimized out.
3322
3323            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3324            // requirements are that the pointer is non-null and sufficiently
3325            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3326            // is sufficiently aligned. Since the produced pointer is a
3327            // `NonNull`, it is non-null.
3328            //
3329            // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3330            //
3331            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3332            //
3333            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3334            //
3335            //   Creates a new `NonNull` that is dangling, but well-aligned.
3336            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3337        }
3338
3339        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3340        #[allow(clippy::undocumented_unsafe_blocks)]
3341        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3342        if ptr.is_null() {
3343            return Err(AllocError);
3344        }
3345        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3346        #[allow(clippy::undocumented_unsafe_blocks)]
3347        Ok(unsafe { Box::from_raw(ptr) })
3348    }
3349
3350    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3351    ///
3352    /// This function is useful for allocating large values of `[Self]` on the
3353    /// heap and zero-initializing them, without ever creating a temporary
3354    /// instance of `[Self; _]` on the stack. For example,
3355    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3356    /// the heap; it does not require storing the slice on the stack.
3357    ///
3358    /// On systems that use a heap implementation that supports allocating from
3359    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3360    /// benefits.
3361    ///
3362    /// If `Self` is a zero-sized type, then this function will return a
3363    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3364    /// actual information, but its `len()` property will report the correct
3365    /// value.
3366    ///
3367    /// # Errors
3368    ///
3369    /// Returns an error on allocation failure. Allocation failure is
3370    /// guaranteed never to cause a panic or an abort.
3371    #[must_use = "has no side effects (other than allocation)"]
3372    #[cfg(feature = "alloc")]
3373    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3374    #[inline]
3375    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3376    where
3377        Self: KnownLayout<PointerMetadata = usize>,
3378    {
3379        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3380        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3381        // (and, consequently, the `Box` derived from it) is a valid instance of
3382        // `Self`, because `Self` is `FromZeros`.
3383        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3384    }
3385
3386    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3387    #[doc(hidden)]
3388    #[cfg(feature = "alloc")]
3389    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3390    #[must_use = "has no side effects (other than allocation)"]
3391    #[inline(always)]
3392    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3393    where
3394        Self: Sized,
3395    {
3396        <[Self]>::new_box_zeroed_with_elems(len)
3397    }
3398
3399    /// Creates a `Vec<Self>` from zeroed bytes.
3400    ///
3401    /// This function is useful for allocating large values of `Vec`s and
3402    /// zero-initializing them, without ever creating a temporary instance of
3403    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3404    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3405    /// heap; it does not require storing intermediate values on the stack.
3406    ///
3407    /// On systems that use a heap implementation that supports allocating from
3408    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3409    ///
3410    /// If `Self` is a zero-sized type, then this function will return a
3411    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3412    /// actual information, but its `len()` property will report the correct
3413    /// value.
3414    ///
3415    /// # Errors
3416    ///
3417    /// Returns an error on allocation failure. Allocation failure is
3418    /// guaranteed never to cause a panic or an abort.
3419    #[must_use = "has no side effects (other than allocation)"]
3420    #[cfg(feature = "alloc")]
3421    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3422    #[inline(always)]
3423    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3424    where
3425        Self: Sized,
3426    {
3427        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3428    }
3429
3430    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3431    /// the vector. The new items are initialized with zeros.
3432    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3433    #[cfg(feature = "alloc")]
3434    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3435    #[inline(always)]
3436    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3437    where
3438        Self: Sized,
3439    {
3440        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3441        // panic condition is not satisfied.
3442        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3443    }
3444
3445    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3446    /// items are initialized with zeros.
3447    ///
3448    /// # Panics
3449    ///
3450    /// Panics if `position > v.len()`.
3451    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3452    #[cfg(feature = "alloc")]
3453    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3454    #[inline]
3455    fn insert_vec_zeroed(
3456        v: &mut Vec<Self>,
3457        position: usize,
3458        additional: usize,
3459    ) -> Result<(), AllocError>
3460    where
3461        Self: Sized,
3462    {
3463        assert!(position <= v.len());
3464        // We only conditionally compile on versions on which `try_reserve` is
3465        // stable; the Clippy lint is a false positive.
3466        v.try_reserve(additional).map_err(|_| AllocError)?;
3467        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3468        // * `ptr.add(position)`
3469        // * `position + additional`
3470        // * `v.len() + additional`
3471        //
3472        // `v.len() - position` cannot overflow because we asserted that
3473        // `position <= v.len()`.
3474        #[allow(clippy::multiple_unsafe_ops_per_block)]
3475        unsafe {
3476            // This is a potentially overlapping copy.
3477            let ptr = v.as_mut_ptr();
3478            #[allow(clippy::arithmetic_side_effects)]
3479            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3480            ptr.add(position).write_bytes(0, additional);
3481            #[allow(clippy::arithmetic_side_effects)]
3482            v.set_len(v.len() + additional);
3483        }
3484
3485        Ok(())
3486    }
3487}
3488
3489/// Analyzes whether a type is [`FromBytes`].
3490///
3491/// This derive analyzes, at compile time, whether the annotated type satisfies
3492/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3493/// supertraits if it is sound to do so. This derive can be applied to structs,
3494/// enums, and unions;
3495/// e.g.:
3496///
3497/// ```
3498/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3499/// #[derive(FromBytes)]
3500/// struct MyStruct {
3501/// # /*
3502///     ...
3503/// # */
3504/// }
3505///
3506/// #[derive(FromBytes)]
3507/// #[repr(u8)]
3508/// enum MyEnum {
3509/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3510/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3511/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3512/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3513/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3514/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3515/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3516/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3517/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3518/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3519/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3520/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3521/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3522/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3523/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3524/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3525/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3526/// #   VFF,
3527/// # /*
3528///     ...
3529/// # */
3530/// }
3531///
3532/// #[derive(FromBytes, Immutable)]
3533/// union MyUnion {
3534/// #   variant: u8,
3535/// # /*
3536///     ...
3537/// # */
3538/// }
3539/// ```
3540///
3541/// [safety conditions]: trait@FromBytes#safety
3542///
3543/// # Analysis
3544///
3545/// *This section describes, roughly, the analysis performed by this derive to
3546/// determine whether it is sound to implement `FromBytes` for a given type.
3547/// Unless you are modifying the implementation of this derive, or attempting to
3548/// manually implement `FromBytes` for a type yourself, you don't need to read
3549/// this section.*
3550///
3551/// If a type has the following properties, then this derive can implement
3552/// `FromBytes` for that type:
3553///
3554/// - If the type is a struct, all of its fields must be `FromBytes`.
3555/// - If the type is an enum:
3556///   - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3557///     or `i16`.
3558///   - The maximum number of discriminants must be used (so that every possible
3559///     bit pattern is a valid one).
3560///   - Its fields must be `FromBytes`.
3561///
3562/// This analysis is subject to change. Unsafe code may *only* rely on the
3563/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3564/// implementation details of this derive.
3565///
3566/// ## Why isn't an explicit representation required for structs?
3567///
3568/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3569/// that structs are marked with `#[repr(C)]`.
3570///
3571/// Per the [Rust reference](reference),
3572///
3573/// > The representation of a type can change the padding between fields, but
3574/// > does not change the layout of the fields themselves.
3575///
3576/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3577///
3578/// Since the layout of structs only consists of padding bytes and field bytes,
3579/// a struct is soundly `FromBytes` if:
3580/// 1. its padding is soundly `FromBytes`, and
3581/// 2. its fields are soundly `FromBytes`.
3582///
3583/// The answer to the first question is always yes: padding bytes do not have
3584/// any validity constraints. A [discussion] of this question in the Unsafe Code
3585/// Guidelines Working Group concluded that it would be virtually unimaginable
3586/// for future versions of rustc to add validity constraints to padding bytes.
3587///
3588/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3589///
3590/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3591/// its fields are `FromBytes`.
3592#[cfg(any(feature = "derive", test))]
3593#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3594pub use zerocopy_derive::FromBytes;
3595
3596/// Types for which any bit pattern is valid.
3597///
3598/// Any memory region of the appropriate length which contains initialized bytes
3599/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3600/// useful for efficiently parsing bytes as structured data.
3601///
3602/// # Warning: Padding bytes
3603///
3604/// Note that, when a value is moved or copied, only the non-padding bytes of
3605/// that value are guaranteed to be preserved. It is unsound to assume that
3606/// values written to padding bytes are preserved after a move or copy. For
3607/// example, the following is unsound:
3608///
3609/// ```rust,no_run
3610/// use core::mem::{size_of, transmute};
3611/// use zerocopy::FromZeros;
3612/// # use zerocopy_derive::*;
3613///
3614/// // Assume `Foo` is a type with padding bytes.
3615/// #[derive(FromZeros, Default)]
3616/// struct Foo {
3617/// # /*
3618///     ...
3619/// # */
3620/// }
3621///
3622/// let mut foo: Foo = Foo::default();
3623/// FromZeros::zero(&mut foo);
3624/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3625/// // those writes are not guaranteed to be preserved in padding bytes when
3626/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3627/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3628/// ```
3629///
3630/// # Implementation
3631///
3632/// **Do not implement this trait yourself!** Instead, use
3633/// [`#[derive(FromBytes)]`][derive]; e.g.:
3634///
3635/// ```
3636/// # use zerocopy_derive::{FromBytes, Immutable};
3637/// #[derive(FromBytes)]
3638/// struct MyStruct {
3639/// # /*
3640///     ...
3641/// # */
3642/// }
3643///
3644/// #[derive(FromBytes)]
3645/// #[repr(u8)]
3646/// enum MyEnum {
3647/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3648/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3649/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3650/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3651/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3652/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3653/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3654/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3655/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3656/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3657/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3658/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3659/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3660/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3661/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3662/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3663/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3664/// #   VFF,
3665/// # /*
3666///     ...
3667/// # */
3668/// }
3669///
3670/// #[derive(FromBytes, Immutable)]
3671/// union MyUnion {
3672/// #   variant: u8,
3673/// # /*
3674///     ...
3675/// # */
3676/// }
3677/// ```
3678///
3679/// This derive performs a sophisticated, compile-time safety analysis to
3680/// determine whether a type is `FromBytes`.
3681///
3682/// # Safety
3683///
3684/// *This section describes what is required in order for `T: FromBytes`, and
3685/// what unsafe code may assume of such types. If you don't plan on implementing
3686/// `FromBytes` manually, and you don't plan on writing unsafe code that
3687/// operates on `FromBytes` types, then you don't need to read this section.*
3688///
3689/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3690/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3691/// words, any byte value which is not uninitialized). If a type is marked as
3692/// `FromBytes` which violates this contract, it may cause undefined behavior.
3693///
3694/// `#[derive(FromBytes)]` only permits [types which satisfy these
3695/// requirements][derive-analysis].
3696///
3697#[cfg_attr(
3698    feature = "derive",
3699    doc = "[derive]: zerocopy_derive::FromBytes",
3700    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3701)]
3702#[cfg_attr(
3703    not(feature = "derive"),
3704    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3705    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3706)]
3707#[cfg_attr(
3708    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3709    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3710)]
3711pub unsafe trait FromBytes: FromZeros {
3712    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3713    // safe.
3714    #[doc(hidden)]
3715    fn only_derive_is_allowed_to_implement_this_trait()
3716    where
3717        Self: Sized;
3718
3719    /// Interprets the given `source` as a `&Self`.
3720    ///
3721    /// This method attempts to return a reference to `source` interpreted as a
3722    /// `Self`. If the length of `source` is not a [valid size of
3723    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3724    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3725    /// [infallibly discard the alignment error][size-error-from].
3726    ///
3727    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3728    ///
3729    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3730    /// [self-unaligned]: Unaligned
3731    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3732    /// [slice-dst]: KnownLayout#dynamically-sized-types
3733    ///
3734    /// # Compile-Time Assertions
3735    ///
3736    /// This method cannot yet be used on unsized types whose dynamically-sized
3737    /// component is zero-sized. Attempting to use this method on such types
3738    /// results in a compile-time assertion error; e.g.:
3739    ///
3740    /// ```compile_fail,E0080
3741    /// use zerocopy::*;
3742    /// # use zerocopy_derive::*;
3743    ///
3744    /// #[derive(FromBytes, Immutable, KnownLayout)]
3745    /// #[repr(C)]
3746    /// struct ZSTy {
3747    ///     leading_sized: u16,
3748    ///     trailing_dst: [()],
3749    /// }
3750    ///
3751    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3752    /// ```
3753    ///
3754    /// # Examples
3755    ///
3756    /// ```
3757    /// use zerocopy::FromBytes;
3758    /// # use zerocopy_derive::*;
3759    ///
3760    /// #[derive(FromBytes, KnownLayout, Immutable)]
3761    /// #[repr(C)]
3762    /// struct PacketHeader {
3763    ///     src_port: [u8; 2],
3764    ///     dst_port: [u8; 2],
3765    ///     length: [u8; 2],
3766    ///     checksum: [u8; 2],
3767    /// }
3768    ///
3769    /// #[derive(FromBytes, KnownLayout, Immutable)]
3770    /// #[repr(C)]
3771    /// struct Packet {
3772    ///     header: PacketHeader,
3773    ///     body: [u8],
3774    /// }
3775    ///
3776    /// // These bytes encode a `Packet`.
3777    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3778    ///
3779    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3780    ///
3781    /// assert_eq!(packet.header.src_port, [0, 1]);
3782    /// assert_eq!(packet.header.dst_port, [2, 3]);
3783    /// assert_eq!(packet.header.length, [4, 5]);
3784    /// assert_eq!(packet.header.checksum, [6, 7]);
3785    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3786    /// ```
3787    #[must_use = "has no side effects"]
3788    #[inline]
3789    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3790    where
3791        Self: KnownLayout + Immutable,
3792    {
3793        static_assert_dst_is_not_zst!(Self);
3794        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3795            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3796            Err(err) => Err(err.map_src(|src| src.as_ref())),
3797        }
3798    }
3799
3800    /// Interprets the prefix of the given `source` as a `&Self` without
3801    /// copying.
3802    ///
3803    /// This method computes the [largest possible size of `Self`][valid-size]
3804    /// that can fit in the leading bytes of `source`, then attempts to return
3805    /// both a reference to those bytes interpreted as a `Self`, and a reference
3806    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3807    /// is not appropriately aligned, this returns `Err`. If [`Self:
3808    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3809    /// error][size-error-from].
3810    ///
3811    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3812    ///
3813    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3814    /// [self-unaligned]: Unaligned
3815    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3816    /// [slice-dst]: KnownLayout#dynamically-sized-types
3817    ///
3818    /// # Compile-Time Assertions
3819    ///
3820    /// This method cannot yet be used on unsized types whose dynamically-sized
3821    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3822    /// support such types. Attempting to use this method on such types results
3823    /// in a compile-time assertion error; e.g.:
3824    ///
3825    /// ```compile_fail,E0080
3826    /// use zerocopy::*;
3827    /// # use zerocopy_derive::*;
3828    ///
3829    /// #[derive(FromBytes, Immutable, KnownLayout)]
3830    /// #[repr(C)]
3831    /// struct ZSTy {
3832    ///     leading_sized: u16,
3833    ///     trailing_dst: [()],
3834    /// }
3835    ///
3836    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3837    /// ```
3838    ///
3839    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3840    ///
3841    /// # Examples
3842    ///
3843    /// ```
3844    /// use zerocopy::FromBytes;
3845    /// # use zerocopy_derive::*;
3846    ///
3847    /// #[derive(FromBytes, KnownLayout, Immutable)]
3848    /// #[repr(C)]
3849    /// struct PacketHeader {
3850    ///     src_port: [u8; 2],
3851    ///     dst_port: [u8; 2],
3852    ///     length: [u8; 2],
3853    ///     checksum: [u8; 2],
3854    /// }
3855    ///
3856    /// #[derive(FromBytes, KnownLayout, Immutable)]
3857    /// #[repr(C)]
3858    /// struct Packet {
3859    ///     header: PacketHeader,
3860    ///     body: [[u8; 2]],
3861    /// }
3862    ///
3863    /// // These are more bytes than are needed to encode a `Packet`.
3864    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3865    ///
3866    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3867    ///
3868    /// assert_eq!(packet.header.src_port, [0, 1]);
3869    /// assert_eq!(packet.header.dst_port, [2, 3]);
3870    /// assert_eq!(packet.header.length, [4, 5]);
3871    /// assert_eq!(packet.header.checksum, [6, 7]);
3872    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3873    /// assert_eq!(suffix, &[14u8][..]);
3874    /// ```
3875    #[must_use = "has no side effects"]
3876    #[inline]
3877    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3878    where
3879        Self: KnownLayout + Immutable,
3880    {
3881        static_assert_dst_is_not_zst!(Self);
3882        ref_from_prefix_suffix(source, None, CastType::Prefix)
3883    }
3884
3885    /// Interprets the suffix of the given bytes as a `&Self`.
3886    ///
3887    /// This method computes the [largest possible size of `Self`][valid-size]
3888    /// that can fit in the trailing bytes of `source`, then attempts to return
3889    /// both a reference to those bytes interpreted as a `Self`, and a reference
3890    /// to the preceding bytes. If there are insufficient bytes, or if that
3891    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3892    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3893    /// alignment error][size-error-from].
3894    ///
3895    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3896    ///
3897    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3898    /// [self-unaligned]: Unaligned
3899    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3900    /// [slice-dst]: KnownLayout#dynamically-sized-types
3901    ///
3902    /// # Compile-Time Assertions
3903    ///
3904    /// This method cannot yet be used on unsized types whose dynamically-sized
3905    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3906    /// support such types. Attempting to use this method on such types results
3907    /// in a compile-time assertion error; e.g.:
3908    ///
3909    /// ```compile_fail,E0080
3910    /// use zerocopy::*;
3911    /// # use zerocopy_derive::*;
3912    ///
3913    /// #[derive(FromBytes, Immutable, KnownLayout)]
3914    /// #[repr(C)]
3915    /// struct ZSTy {
3916    ///     leading_sized: u16,
3917    ///     trailing_dst: [()],
3918    /// }
3919    ///
3920    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3921    /// ```
3922    ///
3923    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3924    ///
3925    /// # Examples
3926    ///
3927    /// ```
3928    /// use zerocopy::FromBytes;
3929    /// # use zerocopy_derive::*;
3930    ///
3931    /// #[derive(FromBytes, Immutable, KnownLayout)]
3932    /// #[repr(C)]
3933    /// struct PacketTrailer {
3934    ///     frame_check_sequence: [u8; 4],
3935    /// }
3936    ///
3937    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3938    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3939    ///
3940    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3941    ///
3942    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3943    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3944    /// ```
3945    #[must_use = "has no side effects"]
3946    #[inline]
3947    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3948    where
3949        Self: Immutable + KnownLayout,
3950    {
3951        static_assert_dst_is_not_zst!(Self);
3952        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3953    }
3954
3955    /// Interprets the given `source` as a `&mut Self`.
3956    ///
3957    /// This method attempts to return a reference to `source` interpreted as a
3958    /// `Self`. If the length of `source` is not a [valid size of
3959    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3960    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3961    /// [infallibly discard the alignment error][size-error-from].
3962    ///
3963    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3964    ///
3965    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3966    /// [self-unaligned]: Unaligned
3967    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3968    /// [slice-dst]: KnownLayout#dynamically-sized-types
3969    ///
3970    /// # Compile-Time Assertions
3971    ///
3972    /// This method cannot yet be used on unsized types whose dynamically-sized
3973    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3974    /// support such types. Attempting to use this method on such types results
3975    /// in a compile-time assertion error; e.g.:
3976    ///
3977    /// ```compile_fail,E0080
3978    /// use zerocopy::*;
3979    /// # use zerocopy_derive::*;
3980    ///
3981    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3982    /// #[repr(C, packed)]
3983    /// struct ZSTy {
3984    ///     leading_sized: [u8; 2],
3985    ///     trailing_dst: [()],
3986    /// }
3987    ///
3988    /// let mut source = [85, 85];
3989    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
3990    /// ```
3991    ///
3992    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3993    ///
3994    /// # Examples
3995    ///
3996    /// ```
3997    /// use zerocopy::FromBytes;
3998    /// # use zerocopy_derive::*;
3999    ///
4000    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4001    /// #[repr(C)]
4002    /// struct PacketHeader {
4003    ///     src_port: [u8; 2],
4004    ///     dst_port: [u8; 2],
4005    ///     length: [u8; 2],
4006    ///     checksum: [u8; 2],
4007    /// }
4008    ///
4009    /// // These bytes encode a `PacketHeader`.
4010    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4011    ///
4012    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
4013    ///
4014    /// assert_eq!(header.src_port, [0, 1]);
4015    /// assert_eq!(header.dst_port, [2, 3]);
4016    /// assert_eq!(header.length, [4, 5]);
4017    /// assert_eq!(header.checksum, [6, 7]);
4018    ///
4019    /// header.checksum = [0, 0];
4020    ///
4021    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4022    /// ```
4023    #[must_use = "has no side effects"]
4024    #[inline]
4025    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4026    where
4027        Self: IntoBytes + KnownLayout,
4028    {
4029        static_assert_dst_is_not_zst!(Self);
4030        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4031            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4032            Err(err) => Err(err.map_src(|src| src.as_mut())),
4033        }
4034    }
4035
4036    /// Interprets the prefix of the given `source` as a `&mut Self` without
4037    /// copying.
4038    ///
4039    /// This method computes the [largest possible size of `Self`][valid-size]
4040    /// that can fit in the leading bytes of `source`, then attempts to return
4041    /// both a reference to those bytes interpreted as a `Self`, and a reference
4042    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4043    /// is not appropriately aligned, this returns `Err`. If [`Self:
4044    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4045    /// error][size-error-from].
4046    ///
4047    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4048    ///
4049    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4050    /// [self-unaligned]: Unaligned
4051    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4052    /// [slice-dst]: KnownLayout#dynamically-sized-types
4053    ///
4054    /// # Compile-Time Assertions
4055    ///
4056    /// This method cannot yet be used on unsized types whose dynamically-sized
4057    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4058    /// support such types. Attempting to use this method on such types results
4059    /// in a compile-time assertion error; e.g.:
4060    ///
4061    /// ```compile_fail,E0080
4062    /// use zerocopy::*;
4063    /// # use zerocopy_derive::*;
4064    ///
4065    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4066    /// #[repr(C, packed)]
4067    /// struct ZSTy {
4068    ///     leading_sized: [u8; 2],
4069    ///     trailing_dst: [()],
4070    /// }
4071    ///
4072    /// let mut source = [85, 85];
4073    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
4074    /// ```
4075    ///
4076    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4077    ///
4078    /// # Examples
4079    ///
4080    /// ```
4081    /// use zerocopy::FromBytes;
4082    /// # use zerocopy_derive::*;
4083    ///
4084    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4085    /// #[repr(C)]
4086    /// struct PacketHeader {
4087    ///     src_port: [u8; 2],
4088    ///     dst_port: [u8; 2],
4089    ///     length: [u8; 2],
4090    ///     checksum: [u8; 2],
4091    /// }
4092    ///
4093    /// // These are more bytes than are needed to encode a `PacketHeader`.
4094    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4095    ///
4096    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4097    ///
4098    /// assert_eq!(header.src_port, [0, 1]);
4099    /// assert_eq!(header.dst_port, [2, 3]);
4100    /// assert_eq!(header.length, [4, 5]);
4101    /// assert_eq!(header.checksum, [6, 7]);
4102    /// assert_eq!(body, &[8, 9][..]);
4103    ///
4104    /// header.checksum = [0, 0];
4105    /// body.fill(1);
4106    ///
4107    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4108    /// ```
4109    #[must_use = "has no side effects"]
4110    #[inline]
4111    fn mut_from_prefix(
4112        source: &mut [u8],
4113    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4114    where
4115        Self: IntoBytes + KnownLayout,
4116    {
4117        static_assert_dst_is_not_zst!(Self);
4118        mut_from_prefix_suffix(source, None, CastType::Prefix)
4119    }
4120
4121    /// Interprets the suffix of the given `source` as a `&mut Self` without
4122    /// copying.
4123    ///
4124    /// This method computes the [largest possible size of `Self`][valid-size]
4125    /// that can fit in the trailing bytes of `source`, then attempts to return
4126    /// both a reference to those bytes interpreted as a `Self`, and a reference
4127    /// to the preceding bytes. If there are insufficient bytes, or if that
4128    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4129    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4130    /// alignment error][size-error-from].
4131    ///
4132    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4133    ///
4134    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4135    /// [self-unaligned]: Unaligned
4136    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4137    /// [slice-dst]: KnownLayout#dynamically-sized-types
4138    ///
4139    /// # Compile-Time Assertions
4140    ///
4141    /// This method cannot yet be used on unsized types whose dynamically-sized
4142    /// component is zero-sized. Attempting to use this method on such types
4143    /// results in a compile-time assertion error; e.g.:
4144    ///
4145    /// ```compile_fail,E0080
4146    /// use zerocopy::*;
4147    /// # use zerocopy_derive::*;
4148    ///
4149    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4150    /// #[repr(C, packed)]
4151    /// struct ZSTy {
4152    ///     leading_sized: [u8; 2],
4153    ///     trailing_dst: [()],
4154    /// }
4155    ///
4156    /// let mut source = [85, 85];
4157    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
4158    /// ```
4159    ///
4160    /// # Examples
4161    ///
4162    /// ```
4163    /// use zerocopy::FromBytes;
4164    /// # use zerocopy_derive::*;
4165    ///
4166    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4167    /// #[repr(C)]
4168    /// struct PacketTrailer {
4169    ///     frame_check_sequence: [u8; 4],
4170    /// }
4171    ///
4172    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4173    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4174    ///
4175    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4176    ///
4177    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4178    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4179    ///
4180    /// prefix.fill(0);
4181    /// trailer.frame_check_sequence.fill(1);
4182    ///
4183    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4184    /// ```
4185    #[must_use = "has no side effects"]
4186    #[inline]
4187    fn mut_from_suffix(
4188        source: &mut [u8],
4189    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4190    where
4191        Self: IntoBytes + KnownLayout,
4192    {
4193        static_assert_dst_is_not_zst!(Self);
4194        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4195    }
4196
4197    /// Interprets the given `source` as a `&Self` with a DST length equal to
4198    /// `count`.
4199    ///
4200    /// This method attempts to return a reference to `source` interpreted as a
4201    /// `Self` with `count` trailing elements. If the length of `source` is not
4202    /// equal to the size of `Self` with `count` elements, or if `source` is not
4203    /// appropriately aligned, this returns `Err`. If [`Self:
4204    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4205    /// error][size-error-from].
4206    ///
4207    /// [self-unaligned]: Unaligned
4208    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4209    ///
4210    /// # Examples
4211    ///
4212    /// ```
4213    /// use zerocopy::FromBytes;
4214    /// # use zerocopy_derive::*;
4215    ///
4216    /// # #[derive(Debug, PartialEq, Eq)]
4217    /// #[derive(FromBytes, Immutable)]
4218    /// #[repr(C)]
4219    /// struct Pixel {
4220    ///     r: u8,
4221    ///     g: u8,
4222    ///     b: u8,
4223    ///     a: u8,
4224    /// }
4225    ///
4226    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4227    ///
4228    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4229    ///
4230    /// assert_eq!(pixels, &[
4231    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4232    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4233    /// ]);
4234    ///
4235    /// ```
4236    ///
4237    /// Since an explicit `count` is provided, this method supports types with
4238    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4239    /// which do not take an explicit count do not support such types.
4240    ///
4241    /// ```
4242    /// use zerocopy::*;
4243    /// # use zerocopy_derive::*;
4244    ///
4245    /// #[derive(FromBytes, Immutable, KnownLayout)]
4246    /// #[repr(C)]
4247    /// struct ZSTy {
4248    ///     leading_sized: [u8; 2],
4249    ///     trailing_dst: [()],
4250    /// }
4251    ///
4252    /// let src = &[85, 85][..];
4253    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4254    /// assert_eq!(zsty.trailing_dst.len(), 42);
4255    /// ```
4256    ///
4257    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4258    #[must_use = "has no side effects"]
4259    #[inline]
4260    fn ref_from_bytes_with_elems(
4261        source: &[u8],
4262        count: usize,
4263    ) -> Result<&Self, CastError<&[u8], Self>>
4264    where
4265        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4266    {
4267        let source = Ptr::from_ref(source);
4268        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4269        match maybe_slf {
4270            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4271            Err(err) => Err(err.map_src(|s| s.as_ref())),
4272        }
4273    }
4274
4275    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4276    /// equal to `count`.
4277    ///
4278    /// This method attempts to return a reference to the prefix of `source`
4279    /// interpreted as a `Self` with `count` trailing elements, and a reference
4280    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4281    /// is not appropriately aligned, this returns `Err`. If [`Self:
4282    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4283    /// error][size-error-from].
4284    ///
4285    /// [self-unaligned]: Unaligned
4286    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4287    ///
4288    /// # Examples
4289    ///
4290    /// ```
4291    /// use zerocopy::FromBytes;
4292    /// # use zerocopy_derive::*;
4293    ///
4294    /// # #[derive(Debug, PartialEq, Eq)]
4295    /// #[derive(FromBytes, Immutable)]
4296    /// #[repr(C)]
4297    /// struct Pixel {
4298    ///     r: u8,
4299    ///     g: u8,
4300    ///     b: u8,
4301    ///     a: u8,
4302    /// }
4303    ///
4304    /// // These are more bytes than are needed to encode two `Pixel`s.
4305    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4306    ///
4307    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4308    ///
4309    /// assert_eq!(pixels, &[
4310    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4311    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4312    /// ]);
4313    ///
4314    /// assert_eq!(suffix, &[8, 9]);
4315    /// ```
4316    ///
4317    /// Since an explicit `count` is provided, this method supports types with
4318    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4319    /// which do not take an explicit count do not support such types.
4320    ///
4321    /// ```
4322    /// use zerocopy::*;
4323    /// # use zerocopy_derive::*;
4324    ///
4325    /// #[derive(FromBytes, Immutable, KnownLayout)]
4326    /// #[repr(C)]
4327    /// struct ZSTy {
4328    ///     leading_sized: [u8; 2],
4329    ///     trailing_dst: [()],
4330    /// }
4331    ///
4332    /// let src = &[85, 85][..];
4333    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4334    /// assert_eq!(zsty.trailing_dst.len(), 42);
4335    /// ```
4336    ///
4337    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4338    #[must_use = "has no side effects"]
4339    #[inline]
4340    fn ref_from_prefix_with_elems(
4341        source: &[u8],
4342        count: usize,
4343    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4344    where
4345        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4346    {
4347        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4348    }
4349
4350    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4351    /// equal to `count`.
4352    ///
4353    /// This method attempts to return a reference to the suffix of `source`
4354    /// interpreted as a `Self` with `count` trailing elements, and a reference
4355    /// to the preceding bytes. If there are insufficient bytes, or if that
4356    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4357    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4358    /// alignment error][size-error-from].
4359    ///
4360    /// [self-unaligned]: Unaligned
4361    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4362    ///
4363    /// # Examples
4364    ///
4365    /// ```
4366    /// use zerocopy::FromBytes;
4367    /// # use zerocopy_derive::*;
4368    ///
4369    /// # #[derive(Debug, PartialEq, Eq)]
4370    /// #[derive(FromBytes, Immutable)]
4371    /// #[repr(C)]
4372    /// struct Pixel {
4373    ///     r: u8,
4374    ///     g: u8,
4375    ///     b: u8,
4376    ///     a: u8,
4377    /// }
4378    ///
4379    /// // These are more bytes than are needed to encode two `Pixel`s.
4380    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4381    ///
4382    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4383    ///
4384    /// assert_eq!(prefix, &[0, 1]);
4385    ///
4386    /// assert_eq!(pixels, &[
4387    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4388    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4389    /// ]);
4390    /// ```
4391    ///
4392    /// Since an explicit `count` is provided, this method supports types with
4393    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4394    /// which do not take an explicit count do not support such types.
4395    ///
4396    /// ```
4397    /// use zerocopy::*;
4398    /// # use zerocopy_derive::*;
4399    ///
4400    /// #[derive(FromBytes, Immutable, KnownLayout)]
4401    /// #[repr(C)]
4402    /// struct ZSTy {
4403    ///     leading_sized: [u8; 2],
4404    ///     trailing_dst: [()],
4405    /// }
4406    ///
4407    /// let src = &[85, 85][..];
4408    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4409    /// assert_eq!(zsty.trailing_dst.len(), 42);
4410    /// ```
4411    ///
4412    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4413    #[must_use = "has no side effects"]
4414    #[inline]
4415    fn ref_from_suffix_with_elems(
4416        source: &[u8],
4417        count: usize,
4418    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4419    where
4420        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4421    {
4422        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4423    }
4424
4425    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4426    /// to `count`.
4427    ///
4428    /// This method attempts to return a reference to `source` interpreted as a
4429    /// `Self` with `count` trailing elements. If the length of `source` is not
4430    /// equal to the size of `Self` with `count` elements, or if `source` is not
4431    /// appropriately aligned, this returns `Err`. If [`Self:
4432    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4433    /// error][size-error-from].
4434    ///
4435    /// [self-unaligned]: Unaligned
4436    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4437    ///
4438    /// # Examples
4439    ///
4440    /// ```
4441    /// use zerocopy::FromBytes;
4442    /// # use zerocopy_derive::*;
4443    ///
4444    /// # #[derive(Debug, PartialEq, Eq)]
4445    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4446    /// #[repr(C)]
4447    /// struct Pixel {
4448    ///     r: u8,
4449    ///     g: u8,
4450    ///     b: u8,
4451    ///     a: u8,
4452    /// }
4453    ///
4454    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4455    ///
4456    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4457    ///
4458    /// assert_eq!(pixels, &[
4459    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4460    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4461    /// ]);
4462    ///
4463    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4464    ///
4465    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4466    /// ```
4467    ///
4468    /// Since an explicit `count` is provided, this method supports types with
4469    /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4470    /// which do not take an explicit count do not support such types.
4471    ///
4472    /// ```
4473    /// use zerocopy::*;
4474    /// # use zerocopy_derive::*;
4475    ///
4476    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4477    /// #[repr(C, packed)]
4478    /// struct ZSTy {
4479    ///     leading_sized: [u8; 2],
4480    ///     trailing_dst: [()],
4481    /// }
4482    ///
4483    /// let src = &mut [85, 85][..];
4484    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4485    /// assert_eq!(zsty.trailing_dst.len(), 42);
4486    /// ```
4487    ///
4488    /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4489    #[must_use = "has no side effects"]
4490    #[inline]
4491    fn mut_from_bytes_with_elems(
4492        source: &mut [u8],
4493        count: usize,
4494    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4495    where
4496        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4497    {
4498        let source = Ptr::from_mut(source);
4499        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4500        match maybe_slf {
4501            Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4502            Err(err) => Err(err.map_src(|s| s.as_mut())),
4503        }
4504    }
4505
4506    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4507    /// length equal to `count`.
4508    ///
4509    /// This method attempts to return a reference to the prefix of `source`
4510    /// interpreted as a `Self` with `count` trailing elements, and a reference
4511    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4512    /// is not appropriately aligned, this returns `Err`. If [`Self:
4513    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4514    /// error][size-error-from].
4515    ///
4516    /// [self-unaligned]: Unaligned
4517    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4518    ///
4519    /// # Examples
4520    ///
4521    /// ```
4522    /// use zerocopy::FromBytes;
4523    /// # use zerocopy_derive::*;
4524    ///
4525    /// # #[derive(Debug, PartialEq, Eq)]
4526    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4527    /// #[repr(C)]
4528    /// struct Pixel {
4529    ///     r: u8,
4530    ///     g: u8,
4531    ///     b: u8,
4532    ///     a: u8,
4533    /// }
4534    ///
4535    /// // These are more bytes than are needed to encode two `Pixel`s.
4536    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4537    ///
4538    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4539    ///
4540    /// assert_eq!(pixels, &[
4541    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4542    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4543    /// ]);
4544    ///
4545    /// assert_eq!(suffix, &[8, 9]);
4546    ///
4547    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4548    /// suffix.fill(1);
4549    ///
4550    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4551    /// ```
4552    ///
4553    /// Since an explicit `count` is provided, this method supports types with
4554    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4555    /// which do not take an explicit count do not support such types.
4556    ///
4557    /// ```
4558    /// use zerocopy::*;
4559    /// # use zerocopy_derive::*;
4560    ///
4561    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4562    /// #[repr(C, packed)]
4563    /// struct ZSTy {
4564    ///     leading_sized: [u8; 2],
4565    ///     trailing_dst: [()],
4566    /// }
4567    ///
4568    /// let src = &mut [85, 85][..];
4569    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4570    /// assert_eq!(zsty.trailing_dst.len(), 42);
4571    /// ```
4572    ///
4573    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4574    #[must_use = "has no side effects"]
4575    #[inline]
4576    fn mut_from_prefix_with_elems(
4577        source: &mut [u8],
4578        count: usize,
4579    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4580    where
4581        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4582    {
4583        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4584    }
4585
4586    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4587    /// length equal to `count`.
4588    ///
4589    /// This method attempts to return a reference to the suffix of `source`
4590    /// interpreted as a `Self` with `count` trailing elements, and a reference
4591    /// to the remaining bytes. If there are insufficient bytes, or if that
4592    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4593    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4594    /// alignment error][size-error-from].
4595    ///
4596    /// [self-unaligned]: Unaligned
4597    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4598    ///
4599    /// # Examples
4600    ///
4601    /// ```
4602    /// use zerocopy::FromBytes;
4603    /// # use zerocopy_derive::*;
4604    ///
4605    /// # #[derive(Debug, PartialEq, Eq)]
4606    /// #[derive(FromBytes, IntoBytes, Immutable)]
4607    /// #[repr(C)]
4608    /// struct Pixel {
4609    ///     r: u8,
4610    ///     g: u8,
4611    ///     b: u8,
4612    ///     a: u8,
4613    /// }
4614    ///
4615    /// // These are more bytes than are needed to encode two `Pixel`s.
4616    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4617    ///
4618    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4619    ///
4620    /// assert_eq!(prefix, &[0, 1]);
4621    ///
4622    /// assert_eq!(pixels, &[
4623    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4624    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4625    /// ]);
4626    ///
4627    /// prefix.fill(9);
4628    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4629    ///
4630    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4631    /// ```
4632    ///
4633    /// Since an explicit `count` is provided, this method supports types with
4634    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4635    /// which do not take an explicit count do not support such types.
4636    ///
4637    /// ```
4638    /// use zerocopy::*;
4639    /// # use zerocopy_derive::*;
4640    ///
4641    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4642    /// #[repr(C, packed)]
4643    /// struct ZSTy {
4644    ///     leading_sized: [u8; 2],
4645    ///     trailing_dst: [()],
4646    /// }
4647    ///
4648    /// let src = &mut [85, 85][..];
4649    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4650    /// assert_eq!(zsty.trailing_dst.len(), 42);
4651    /// ```
4652    ///
4653    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4654    #[must_use = "has no side effects"]
4655    #[inline]
4656    fn mut_from_suffix_with_elems(
4657        source: &mut [u8],
4658        count: usize,
4659    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4660    where
4661        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4662    {
4663        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4664    }
4665
4666    /// Reads a copy of `Self` from the given `source`.
4667    ///
4668    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4669    ///
4670    /// # Examples
4671    ///
4672    /// ```
4673    /// use zerocopy::FromBytes;
4674    /// # use zerocopy_derive::*;
4675    ///
4676    /// #[derive(FromBytes)]
4677    /// #[repr(C)]
4678    /// struct PacketHeader {
4679    ///     src_port: [u8; 2],
4680    ///     dst_port: [u8; 2],
4681    ///     length: [u8; 2],
4682    ///     checksum: [u8; 2],
4683    /// }
4684    ///
4685    /// // These bytes encode a `PacketHeader`.
4686    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4687    ///
4688    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4689    ///
4690    /// assert_eq!(header.src_port, [0, 1]);
4691    /// assert_eq!(header.dst_port, [2, 3]);
4692    /// assert_eq!(header.length, [4, 5]);
4693    /// assert_eq!(header.checksum, [6, 7]);
4694    /// ```
4695    #[must_use = "has no side effects"]
4696    #[inline]
4697    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4698    where
4699        Self: Sized,
4700    {
4701        match Ref::<_, Unalign<Self>>::sized_from(source) {
4702            Ok(r) => Ok(Ref::read(&r).into_inner()),
4703            Err(CastError::Size(e)) => Err(e.with_dst()),
4704            Err(CastError::Alignment(_)) => {
4705                // SAFETY: `Unalign<Self>` is trivially aligned, so
4706                // `Ref::sized_from` cannot fail due to unmet alignment
4707                // requirements.
4708                unsafe { core::hint::unreachable_unchecked() }
4709            }
4710            Err(CastError::Validity(i)) => match i {},
4711        }
4712    }
4713
4714    /// Reads a copy of `Self` from the prefix of the given `source`.
4715    ///
4716    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4717    /// of `source`, returning that `Self` and any remaining bytes. If
4718    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4719    ///
4720    /// # Examples
4721    ///
4722    /// ```
4723    /// use zerocopy::FromBytes;
4724    /// # use zerocopy_derive::*;
4725    ///
4726    /// #[derive(FromBytes)]
4727    /// #[repr(C)]
4728    /// struct PacketHeader {
4729    ///     src_port: [u8; 2],
4730    ///     dst_port: [u8; 2],
4731    ///     length: [u8; 2],
4732    ///     checksum: [u8; 2],
4733    /// }
4734    ///
4735    /// // These are more bytes than are needed to encode a `PacketHeader`.
4736    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4737    ///
4738    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4739    ///
4740    /// assert_eq!(header.src_port, [0, 1]);
4741    /// assert_eq!(header.dst_port, [2, 3]);
4742    /// assert_eq!(header.length, [4, 5]);
4743    /// assert_eq!(header.checksum, [6, 7]);
4744    /// assert_eq!(body, [8, 9]);
4745    /// ```
4746    #[must_use = "has no side effects"]
4747    #[inline]
4748    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4749    where
4750        Self: Sized,
4751    {
4752        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4753            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4754            Err(CastError::Size(e)) => Err(e.with_dst()),
4755            Err(CastError::Alignment(_)) => {
4756                // SAFETY: `Unalign<Self>` is trivially aligned, so
4757                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4758                // requirements.
4759                unsafe { core::hint::unreachable_unchecked() }
4760            }
4761            Err(CastError::Validity(i)) => match i {},
4762        }
4763    }
4764
4765    /// Reads a copy of `Self` from the suffix of the given `source`.
4766    ///
4767    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4768    /// of `source`, returning that `Self` and any preceding bytes. If
4769    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4770    ///
4771    /// # Examples
4772    ///
4773    /// ```
4774    /// use zerocopy::FromBytes;
4775    /// # use zerocopy_derive::*;
4776    ///
4777    /// #[derive(FromBytes)]
4778    /// #[repr(C)]
4779    /// struct PacketTrailer {
4780    ///     frame_check_sequence: [u8; 4],
4781    /// }
4782    ///
4783    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4784    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4785    ///
4786    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4787    ///
4788    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4789    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4790    /// ```
4791    #[must_use = "has no side effects"]
4792    #[inline]
4793    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4794    where
4795        Self: Sized,
4796    {
4797        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4798            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4799            Err(CastError::Size(e)) => Err(e.with_dst()),
4800            Err(CastError::Alignment(_)) => {
4801                // SAFETY: `Unalign<Self>` is trivially aligned, so
4802                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4803                // requirements.
4804                unsafe { core::hint::unreachable_unchecked() }
4805            }
4806            Err(CastError::Validity(i)) => match i {},
4807        }
4808    }
4809
4810    /// Reads a copy of `self` from an `io::Read`.
4811    ///
4812    /// This is useful for interfacing with operating system byte sinks (files,
4813    /// sockets, etc.).
4814    ///
4815    /// # Examples
4816    ///
4817    /// ```no_run
4818    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4819    /// use std::fs::File;
4820    /// # use zerocopy_derive::*;
4821    ///
4822    /// #[derive(FromBytes)]
4823    /// #[repr(C)]
4824    /// struct BitmapFileHeader {
4825    ///     signature: [u8; 2],
4826    ///     size: U32,
4827    ///     reserved: U64,
4828    ///     offset: U64,
4829    /// }
4830    ///
4831    /// let mut file = File::open("image.bin").unwrap();
4832    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4833    /// ```
4834    #[cfg(feature = "std")]
4835    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
4836    #[inline(always)]
4837    fn read_from_io<R>(mut src: R) -> io::Result<Self>
4838    where
4839        Self: Sized,
4840        R: io::Read,
4841    {
4842        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4843        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4844        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4845        // will not necessarily preserve zeros written to those padding byte
4846        // locations, and so `buf` could contain uninitialized bytes.
4847        let mut buf = CoreMaybeUninit::<Self>::uninit();
4848        buf.zero();
4849
4850        let ptr = Ptr::from_mut(&mut buf);
4851        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4852        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4853        // cannot be used to write values which will violate `buf`'s bit
4854        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4855        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4856        // cannot be violated even though `buf` may have more permissive bit
4857        // validity than `ptr`.
4858        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4859        let ptr = ptr.as_bytes();
4860        src.read_exact(ptr.as_mut())?;
4861        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4862        // `FromBytes`.
4863        Ok(unsafe { buf.assume_init() })
4864    }
4865
4866    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4867    #[doc(hidden)]
4868    #[must_use = "has no side effects"]
4869    #[inline(always)]
4870    fn ref_from(source: &[u8]) -> Option<&Self>
4871    where
4872        Self: KnownLayout + Immutable,
4873    {
4874        Self::ref_from_bytes(source).ok()
4875    }
4876
4877    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4878    #[doc(hidden)]
4879    #[must_use = "has no side effects"]
4880    #[inline(always)]
4881    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4882    where
4883        Self: KnownLayout + IntoBytes,
4884    {
4885        Self::mut_from_bytes(source).ok()
4886    }
4887
4888    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4889    #[doc(hidden)]
4890    #[must_use = "has no side effects"]
4891    #[inline(always)]
4892    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4893    where
4894        Self: Sized + Immutable,
4895    {
4896        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4897    }
4898
4899    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4900    #[doc(hidden)]
4901    #[must_use = "has no side effects"]
4902    #[inline(always)]
4903    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4904    where
4905        Self: Sized + Immutable,
4906    {
4907        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4908    }
4909
4910    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4911    #[doc(hidden)]
4912    #[must_use = "has no side effects"]
4913    #[inline(always)]
4914    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4915    where
4916        Self: Sized + IntoBytes,
4917    {
4918        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4919    }
4920
4921    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4922    #[doc(hidden)]
4923    #[must_use = "has no side effects"]
4924    #[inline(always)]
4925    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4926    where
4927        Self: Sized + IntoBytes,
4928    {
4929        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4930    }
4931
4932    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4933    #[doc(hidden)]
4934    #[must_use = "has no side effects"]
4935    #[inline(always)]
4936    fn read_from(source: &[u8]) -> Option<Self>
4937    where
4938        Self: Sized,
4939    {
4940        Self::read_from_bytes(source).ok()
4941    }
4942}
4943
4944/// Interprets the given affix of the given bytes as a `&Self`.
4945///
4946/// This method computes the largest possible size of `Self` that can fit in the
4947/// prefix or suffix bytes of `source`, then attempts to return both a reference
4948/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4949/// If there are insufficient bytes, or if that affix of `source` is not
4950/// appropriately aligned, this returns `Err`.
4951#[inline(always)]
4952fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4953    source: &[u8],
4954    meta: Option<T::PointerMetadata>,
4955    cast_type: CastType,
4956) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4957    let (slf, prefix_suffix) = Ptr::from_ref(source)
4958        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4959        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4960    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4961}
4962
4963/// Interprets the given affix of the given bytes as a `&mut Self` without
4964/// copying.
4965///
4966/// This method computes the largest possible size of `Self` that can fit in the
4967/// prefix or suffix bytes of `source`, then attempts to return both a reference
4968/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4969/// If there are insufficient bytes, or if that affix of `source` is not
4970/// appropriately aligned, this returns `Err`.
4971#[inline(always)]
4972fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4973    source: &mut [u8],
4974    meta: Option<T::PointerMetadata>,
4975    cast_type: CastType,
4976) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4977    let (slf, prefix_suffix) = Ptr::from_mut(source)
4978        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4979        .map_err(|err| err.map_src(|s| s.as_mut()))?;
4980    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
4981}
4982
4983/// Analyzes whether a type is [`IntoBytes`].
4984///
4985/// This derive analyzes, at compile time, whether the annotated type satisfies
4986/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4987/// sound to do so. This derive can be applied to structs and enums (see below
4988/// for union support); e.g.:
4989///
4990/// ```
4991/// # use zerocopy_derive::{IntoBytes};
4992/// #[derive(IntoBytes)]
4993/// #[repr(C)]
4994/// struct MyStruct {
4995/// # /*
4996///     ...
4997/// # */
4998/// }
4999///
5000/// #[derive(IntoBytes)]
5001/// #[repr(u8)]
5002/// enum MyEnum {
5003/// #   Variant,
5004/// # /*
5005///     ...
5006/// # */
5007/// }
5008/// ```
5009///
5010/// [safety conditions]: trait@IntoBytes#safety
5011///
5012/// # Error Messages
5013///
5014/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
5015/// for `IntoBytes` is implemented, you may get an error like this:
5016///
5017/// ```text
5018/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5019///   --> lib.rs:23:10
5020///    |
5021///  1 | #[derive(IntoBytes)]
5022///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5023///    |
5024///    = help: the following implementations were found:
5025///                   <() as PaddingFree<T, false>>
5026/// ```
5027///
5028/// This error indicates that the type being annotated has padding bytes, which
5029/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5030/// fields by using types in the [`byteorder`] module, wrapping field types in
5031/// [`Unalign`], adding explicit struct fields where those padding bytes would
5032/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5033/// layout] for more information about type layout and padding.
5034///
5035/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5036///
5037/// # Unions
5038///
5039/// Currently, union bit validity is [up in the air][union-validity], and so
5040/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5041/// However, implementing `IntoBytes` on a union type is likely sound on all
5042/// existing Rust toolchains - it's just that it may become unsound in the
5043/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5044/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5045///
5046/// ```shell
5047/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5048/// ```
5049///
5050/// However, it is your responsibility to ensure that this derive is sound on
5051/// the specific versions of the Rust toolchain you are using! We make no
5052/// stability or soundness guarantees regarding this cfg, and may remove it at
5053/// any point.
5054///
5055/// We are actively working with Rust to stabilize the necessary language
5056/// guarantees to support this in a forwards-compatible way, which will enable
5057/// us to remove the cfg gate. As part of this effort, we need to know how much
5058/// demand there is for this feature. If you would like to use `IntoBytes` on
5059/// unions, [please let us know][discussion].
5060///
5061/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5062/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5063///
5064/// # Analysis
5065///
5066/// *This section describes, roughly, the analysis performed by this derive to
5067/// determine whether it is sound to implement `IntoBytes` for a given type.
5068/// Unless you are modifying the implementation of this derive, or attempting to
5069/// manually implement `IntoBytes` for a type yourself, you don't need to read
5070/// this section.*
5071///
5072/// If a type has the following properties, then this derive can implement
5073/// `IntoBytes` for that type:
5074///
5075/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5076///     - if the type is `repr(transparent)` or `repr(packed)`, it is
5077///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5078///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5079///       if its field is [`IntoBytes`]; else,
5080///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
5081///       is sized and has no padding bytes; else,
5082///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
5083/// - If the type is an enum:
5084///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5085///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5086///   - It must have no padding bytes.
5087///   - Its fields must be [`IntoBytes`].
5088///
5089/// This analysis is subject to change. Unsafe code may *only* rely on the
5090/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5091/// implementation details of this derive.
5092///
5093/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5094#[cfg(any(feature = "derive", test))]
5095#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5096pub use zerocopy_derive::IntoBytes;
5097
5098/// Types that can be converted to an immutable slice of initialized bytes.
5099///
5100/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5101/// same size. This is useful for efficiently serializing structured data as raw
5102/// bytes.
5103///
5104/// # Implementation
5105///
5106/// **Do not implement this trait yourself!** Instead, use
5107/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5108///
5109/// ```
5110/// # use zerocopy_derive::IntoBytes;
5111/// #[derive(IntoBytes)]
5112/// #[repr(C)]
5113/// struct MyStruct {
5114/// # /*
5115///     ...
5116/// # */
5117/// }
5118///
5119/// #[derive(IntoBytes)]
5120/// #[repr(u8)]
5121/// enum MyEnum {
5122/// #   Variant0,
5123/// # /*
5124///     ...
5125/// # */
5126/// }
5127/// ```
5128///
5129/// This derive performs a sophisticated, compile-time safety analysis to
5130/// determine whether a type is `IntoBytes`. See the [derive
5131/// documentation][derive] for guidance on how to interpret error messages
5132/// produced by the derive's analysis.
5133///
5134/// # Safety
5135///
5136/// *This section describes what is required in order for `T: IntoBytes`, and
5137/// what unsafe code may assume of such types. If you don't plan on implementing
5138/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5139/// operates on `IntoBytes` types, then you don't need to read this section.*
5140///
5141/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5142/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5143/// marked as `IntoBytes` which violates this contract, it may cause undefined
5144/// behavior.
5145///
5146/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5147/// requirements][derive-analysis].
5148///
5149#[cfg_attr(
5150    feature = "derive",
5151    doc = "[derive]: zerocopy_derive::IntoBytes",
5152    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5153)]
5154#[cfg_attr(
5155    not(feature = "derive"),
5156    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5157    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5158)]
5159#[cfg_attr(
5160    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5161    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5162)]
5163pub unsafe trait IntoBytes {
5164    // The `Self: Sized` bound makes it so that this function doesn't prevent
5165    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5166    // prevent object safety, but those provide a benefit in exchange for object
5167    // safety. If at some point we remove those methods, change their type
5168    // signatures, or move them out of this trait so that `IntoBytes` is object
5169    // safe again, it's important that this function not prevent object safety.
5170    #[doc(hidden)]
5171    fn only_derive_is_allowed_to_implement_this_trait()
5172    where
5173        Self: Sized;
5174
5175    /// Gets the bytes of this value.
5176    ///
5177    /// # Examples
5178    ///
5179    /// ```
5180    /// use zerocopy::IntoBytes;
5181    /// # use zerocopy_derive::*;
5182    ///
5183    /// #[derive(IntoBytes, Immutable)]
5184    /// #[repr(C)]
5185    /// struct PacketHeader {
5186    ///     src_port: [u8; 2],
5187    ///     dst_port: [u8; 2],
5188    ///     length: [u8; 2],
5189    ///     checksum: [u8; 2],
5190    /// }
5191    ///
5192    /// let header = PacketHeader {
5193    ///     src_port: [0, 1],
5194    ///     dst_port: [2, 3],
5195    ///     length: [4, 5],
5196    ///     checksum: [6, 7],
5197    /// };
5198    ///
5199    /// let bytes = header.as_bytes();
5200    ///
5201    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5202    /// ```
5203    #[must_use = "has no side effects"]
5204    #[inline(always)]
5205    fn as_bytes(&self) -> &[u8]
5206    where
5207        Self: Immutable,
5208    {
5209        // Note that this method does not have a `Self: Sized` bound;
5210        // `size_of_val` works for unsized values too.
5211        let len = mem::size_of_val(self);
5212        let slf: *const Self = self;
5213
5214        // SAFETY:
5215        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5216        //   many bytes because...
5217        //   - `slf` is the same pointer as `self`, and `self` is a reference
5218        //     which points to an object whose size is `len`. Thus...
5219        //     - The entire region of `len` bytes starting at `slf` is contained
5220        //       within a single allocation.
5221        //     - `slf` is non-null.
5222        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5223        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5224        //   initialized.
5225        // - Since `slf` is derived from `self`, and `self` is an immutable
5226        //   reference, the only other references to this memory region that
5227        //   could exist are other immutable references, which by `Self:
5228        //   Immutable` don't permit mutation.
5229        // - The total size of the resulting slice is no larger than
5230        //   `isize::MAX` because no allocation produced by safe code can be
5231        //   larger than `isize::MAX`.
5232        //
5233        // FIXME(#429): Add references to docs and quotes.
5234        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5235    }
5236
5237    /// Gets the bytes of this value mutably.
5238    ///
5239    /// # Examples
5240    ///
5241    /// ```
5242    /// use zerocopy::IntoBytes;
5243    /// # use zerocopy_derive::*;
5244    ///
5245    /// # #[derive(Eq, PartialEq, Debug)]
5246    /// #[derive(FromBytes, IntoBytes, Immutable)]
5247    /// #[repr(C)]
5248    /// struct PacketHeader {
5249    ///     src_port: [u8; 2],
5250    ///     dst_port: [u8; 2],
5251    ///     length: [u8; 2],
5252    ///     checksum: [u8; 2],
5253    /// }
5254    ///
5255    /// let mut header = PacketHeader {
5256    ///     src_port: [0, 1],
5257    ///     dst_port: [2, 3],
5258    ///     length: [4, 5],
5259    ///     checksum: [6, 7],
5260    /// };
5261    ///
5262    /// let bytes = header.as_mut_bytes();
5263    ///
5264    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5265    ///
5266    /// bytes.reverse();
5267    ///
5268    /// assert_eq!(header, PacketHeader {
5269    ///     src_port: [7, 6],
5270    ///     dst_port: [5, 4],
5271    ///     length: [3, 2],
5272    ///     checksum: [1, 0],
5273    /// });
5274    /// ```
5275    #[must_use = "has no side effects"]
5276    #[inline(always)]
5277    fn as_mut_bytes(&mut self) -> &mut [u8]
5278    where
5279        Self: FromBytes,
5280    {
5281        // Note that this method does not have a `Self: Sized` bound;
5282        // `size_of_val` works for unsized values too.
5283        let len = mem::size_of_val(self);
5284        let slf: *mut Self = self;
5285
5286        // SAFETY:
5287        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5288        //   size_of::<u8>()` many bytes because...
5289        //   - `slf` is the same pointer as `self`, and `self` is a reference
5290        //     which points to an object whose size is `len`. Thus...
5291        //     - The entire region of `len` bytes starting at `slf` is contained
5292        //       within a single allocation.
5293        //     - `slf` is non-null.
5294        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5295        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5296        //   initialized.
5297        // - `Self: FromBytes` ensures that no write to this memory region
5298        //   could result in it containing an invalid `Self`.
5299        // - Since `slf` is derived from `self`, and `self` is a mutable
5300        //   reference, no other references to this memory region can exist.
5301        // - The total size of the resulting slice is no larger than
5302        //   `isize::MAX` because no allocation produced by safe code can be
5303        //   larger than `isize::MAX`.
5304        //
5305        // FIXME(#429): Add references to docs and quotes.
5306        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5307    }
5308
5309    /// Writes a copy of `self` to `dst`.
5310    ///
5311    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5312    ///
5313    /// # Examples
5314    ///
5315    /// ```
5316    /// use zerocopy::IntoBytes;
5317    /// # use zerocopy_derive::*;
5318    ///
5319    /// #[derive(IntoBytes, Immutable)]
5320    /// #[repr(C)]
5321    /// struct PacketHeader {
5322    ///     src_port: [u8; 2],
5323    ///     dst_port: [u8; 2],
5324    ///     length: [u8; 2],
5325    ///     checksum: [u8; 2],
5326    /// }
5327    ///
5328    /// let header = PacketHeader {
5329    ///     src_port: [0, 1],
5330    ///     dst_port: [2, 3],
5331    ///     length: [4, 5],
5332    ///     checksum: [6, 7],
5333    /// };
5334    ///
5335    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5336    ///
5337    /// header.write_to(&mut bytes[..]);
5338    ///
5339    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5340    /// ```
5341    ///
5342    /// If too many or too few target bytes are provided, `write_to` returns
5343    /// `Err` and leaves the target bytes unmodified:
5344    ///
5345    /// ```
5346    /// # use zerocopy::IntoBytes;
5347    /// # let header = u128::MAX;
5348    /// let mut excessive_bytes = &mut [0u8; 128][..];
5349    ///
5350    /// let write_result = header.write_to(excessive_bytes);
5351    ///
5352    /// assert!(write_result.is_err());
5353    /// assert_eq!(excessive_bytes, [0u8; 128]);
5354    /// ```
5355    #[must_use = "callers should check the return value to see if the operation succeeded"]
5356    #[inline]
5357    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5358    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5359    where
5360        Self: Immutable,
5361    {
5362        let src = self.as_bytes();
5363        if dst.len() == src.len() {
5364            // SAFETY: Within this branch of the conditional, we have ensured
5365            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5366            // source nor the size of the destination change between the above
5367            // size check and the invocation of `copy_unchecked`.
5368            unsafe { util::copy_unchecked(src, dst) }
5369            Ok(())
5370        } else {
5371            Err(SizeError::new(self))
5372        }
5373    }
5374
5375    /// Writes a copy of `self` to the prefix of `dst`.
5376    ///
5377    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5378    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5379    ///
5380    /// # Examples
5381    ///
5382    /// ```
5383    /// use zerocopy::IntoBytes;
5384    /// # use zerocopy_derive::*;
5385    ///
5386    /// #[derive(IntoBytes, Immutable)]
5387    /// #[repr(C)]
5388    /// struct PacketHeader {
5389    ///     src_port: [u8; 2],
5390    ///     dst_port: [u8; 2],
5391    ///     length: [u8; 2],
5392    ///     checksum: [u8; 2],
5393    /// }
5394    ///
5395    /// let header = PacketHeader {
5396    ///     src_port: [0, 1],
5397    ///     dst_port: [2, 3],
5398    ///     length: [4, 5],
5399    ///     checksum: [6, 7],
5400    /// };
5401    ///
5402    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5403    ///
5404    /// header.write_to_prefix(&mut bytes[..]);
5405    ///
5406    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5407    /// ```
5408    ///
5409    /// If insufficient target bytes are provided, `write_to_prefix` returns
5410    /// `Err` and leaves the target bytes unmodified:
5411    ///
5412    /// ```
5413    /// # use zerocopy::IntoBytes;
5414    /// # let header = u128::MAX;
5415    /// let mut insufficient_bytes = &mut [0, 0][..];
5416    ///
5417    /// let write_result = header.write_to_suffix(insufficient_bytes);
5418    ///
5419    /// assert!(write_result.is_err());
5420    /// assert_eq!(insufficient_bytes, [0, 0]);
5421    /// ```
5422    #[must_use = "callers should check the return value to see if the operation succeeded"]
5423    #[inline]
5424    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5425    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5426    where
5427        Self: Immutable,
5428    {
5429        let src = self.as_bytes();
5430        match dst.get_mut(..src.len()) {
5431            Some(dst) => {
5432                // SAFETY: Within this branch of the `match`, we have ensured
5433                // through fallible subslicing that `dst.len()` is equal to
5434                // `src.len()`. Neither the size of the source nor the size of
5435                // the destination change between the above subslicing operation
5436                // and the invocation of `copy_unchecked`.
5437                unsafe { util::copy_unchecked(src, dst) }
5438                Ok(())
5439            }
5440            None => Err(SizeError::new(self)),
5441        }
5442    }
5443
5444    /// Writes a copy of `self` to the suffix of `dst`.
5445    ///
5446    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5447    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5448    ///
5449    /// # Examples
5450    ///
5451    /// ```
5452    /// use zerocopy::IntoBytes;
5453    /// # use zerocopy_derive::*;
5454    ///
5455    /// #[derive(IntoBytes, Immutable)]
5456    /// #[repr(C)]
5457    /// struct PacketHeader {
5458    ///     src_port: [u8; 2],
5459    ///     dst_port: [u8; 2],
5460    ///     length: [u8; 2],
5461    ///     checksum: [u8; 2],
5462    /// }
5463    ///
5464    /// let header = PacketHeader {
5465    ///     src_port: [0, 1],
5466    ///     dst_port: [2, 3],
5467    ///     length: [4, 5],
5468    ///     checksum: [6, 7],
5469    /// };
5470    ///
5471    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5472    ///
5473    /// header.write_to_suffix(&mut bytes[..]);
5474    ///
5475    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5476    ///
5477    /// let mut insufficient_bytes = &mut [0, 0][..];
5478    ///
5479    /// let write_result = header.write_to_suffix(insufficient_bytes);
5480    ///
5481    /// assert!(write_result.is_err());
5482    /// assert_eq!(insufficient_bytes, [0, 0]);
5483    /// ```
5484    ///
5485    /// If insufficient target bytes are provided, `write_to_suffix` returns
5486    /// `Err` and leaves the target bytes unmodified:
5487    ///
5488    /// ```
5489    /// # use zerocopy::IntoBytes;
5490    /// # let header = u128::MAX;
5491    /// let mut insufficient_bytes = &mut [0, 0][..];
5492    ///
5493    /// let write_result = header.write_to_suffix(insufficient_bytes);
5494    ///
5495    /// assert!(write_result.is_err());
5496    /// assert_eq!(insufficient_bytes, [0, 0]);
5497    /// ```
5498    #[must_use = "callers should check the return value to see if the operation succeeded"]
5499    #[inline]
5500    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5501    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5502    where
5503        Self: Immutable,
5504    {
5505        let src = self.as_bytes();
5506        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5507            start
5508        } else {
5509            return Err(SizeError::new(self));
5510        };
5511        let dst = if let Some(dst) = dst.get_mut(start..) {
5512            dst
5513        } else {
5514            // get_mut() should never return None here. We return a `SizeError`
5515            // rather than .unwrap() because in the event the branch is not
5516            // optimized away, returning a value is generally lighter-weight
5517            // than panicking.
5518            return Err(SizeError::new(self));
5519        };
5520        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5521        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5522        // nor the size of the destination change between the above subslicing
5523        // operation and the invocation of `copy_unchecked`.
5524        unsafe {
5525            util::copy_unchecked(src, dst);
5526        }
5527        Ok(())
5528    }
5529
5530    /// Writes a copy of `self` to an `io::Write`.
5531    ///
5532    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5533    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5534    ///
5535    /// # Examples
5536    ///
5537    /// ```no_run
5538    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5539    /// use std::fs::File;
5540    /// # use zerocopy_derive::*;
5541    ///
5542    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5543    /// #[repr(C, packed)]
5544    /// struct GrayscaleImage {
5545    ///     height: U16,
5546    ///     width: U16,
5547    ///     pixels: [U16],
5548    /// }
5549    ///
5550    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5551    /// let mut file = File::create("image.bin").unwrap();
5552    /// image.write_to_io(&mut file).unwrap();
5553    /// ```
5554    ///
5555    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5556    /// have occurred; e.g.:
5557    ///
5558    /// ```
5559    /// # use zerocopy::IntoBytes;
5560    ///
5561    /// let src = u128::MAX;
5562    /// let mut dst = [0u8; 2];
5563    ///
5564    /// let write_result = src.write_to_io(&mut dst[..]);
5565    ///
5566    /// assert!(write_result.is_err());
5567    /// assert_eq!(dst, [255, 255]);
5568    /// ```
5569    #[cfg(feature = "std")]
5570    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5571    #[inline(always)]
5572    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5573    where
5574        Self: Immutable,
5575        W: io::Write,
5576    {
5577        dst.write_all(self.as_bytes())
5578    }
5579
5580    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5581    #[doc(hidden)]
5582    #[inline]
5583    fn as_bytes_mut(&mut self) -> &mut [u8]
5584    where
5585        Self: FromBytes,
5586    {
5587        self.as_mut_bytes()
5588    }
5589}
5590
5591/// Analyzes whether a type is [`Unaligned`].
5592///
5593/// This derive analyzes, at compile time, whether the annotated type satisfies
5594/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5595/// sound to do so. This derive can be applied to structs, enums, and unions;
5596/// e.g.:
5597///
5598/// ```
5599/// # use zerocopy_derive::Unaligned;
5600/// #[derive(Unaligned)]
5601/// #[repr(C)]
5602/// struct MyStruct {
5603/// # /*
5604///     ...
5605/// # */
5606/// }
5607///
5608/// #[derive(Unaligned)]
5609/// #[repr(u8)]
5610/// enum MyEnum {
5611/// #   Variant0,
5612/// # /*
5613///     ...
5614/// # */
5615/// }
5616///
5617/// #[derive(Unaligned)]
5618/// #[repr(packed)]
5619/// union MyUnion {
5620/// #   variant: u8,
5621/// # /*
5622///     ...
5623/// # */
5624/// }
5625/// ```
5626///
5627/// # Analysis
5628///
5629/// *This section describes, roughly, the analysis performed by this derive to
5630/// determine whether it is sound to implement `Unaligned` for a given type.
5631/// Unless you are modifying the implementation of this derive, or attempting to
5632/// manually implement `Unaligned` for a type yourself, you don't need to read
5633/// this section.*
5634///
5635/// If a type has the following properties, then this derive can implement
5636/// `Unaligned` for that type:
5637///
5638/// - If the type is a struct or union:
5639///   - If `repr(align(N))` is provided, `N` must equal 1.
5640///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5641///     [`Unaligned`].
5642///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5643///     `repr(packed)` or `repr(packed(1))`.
5644/// - If the type is an enum:
5645///   - If `repr(align(N))` is provided, `N` must equal 1.
5646///   - It must be a field-less enum (meaning that all variants have no fields).
5647///   - It must be `repr(i8)` or `repr(u8)`.
5648///
5649/// [safety conditions]: trait@Unaligned#safety
5650#[cfg(any(feature = "derive", test))]
5651#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5652pub use zerocopy_derive::Unaligned;
5653
5654/// Types with no alignment requirement.
5655///
5656/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5657///
5658/// # Implementation
5659///
5660/// **Do not implement this trait yourself!** Instead, use
5661/// [`#[derive(Unaligned)]`][derive]; e.g.:
5662///
5663/// ```
5664/// # use zerocopy_derive::Unaligned;
5665/// #[derive(Unaligned)]
5666/// #[repr(C)]
5667/// struct MyStruct {
5668/// # /*
5669///     ...
5670/// # */
5671/// }
5672///
5673/// #[derive(Unaligned)]
5674/// #[repr(u8)]
5675/// enum MyEnum {
5676/// #   Variant0,
5677/// # /*
5678///     ...
5679/// # */
5680/// }
5681///
5682/// #[derive(Unaligned)]
5683/// #[repr(packed)]
5684/// union MyUnion {
5685/// #   variant: u8,
5686/// # /*
5687///     ...
5688/// # */
5689/// }
5690/// ```
5691///
5692/// This derive performs a sophisticated, compile-time safety analysis to
5693/// determine whether a type is `Unaligned`.
5694///
5695/// # Safety
5696///
5697/// *This section describes what is required in order for `T: Unaligned`, and
5698/// what unsafe code may assume of such types. If you don't plan on implementing
5699/// `Unaligned` manually, and you don't plan on writing unsafe code that
5700/// operates on `Unaligned` types, then you don't need to read this section.*
5701///
5702/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5703/// reference to `T` at any memory location regardless of alignment. If a type
5704/// is marked as `Unaligned` which violates this contract, it may cause
5705/// undefined behavior.
5706///
5707/// `#[derive(Unaligned)]` only permits [types which satisfy these
5708/// requirements][derive-analysis].
5709///
5710#[cfg_attr(
5711    feature = "derive",
5712    doc = "[derive]: zerocopy_derive::Unaligned",
5713    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5714)]
5715#[cfg_attr(
5716    not(feature = "derive"),
5717    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5718    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5719)]
5720#[cfg_attr(
5721    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5722    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5723)]
5724pub unsafe trait Unaligned {
5725    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5726    // safe.
5727    #[doc(hidden)]
5728    fn only_derive_is_allowed_to_implement_this_trait()
5729    where
5730        Self: Sized;
5731}
5732
5733/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5734///
5735/// This derive can be applied to structs and enums implementing both
5736/// [`Immutable`] and [`IntoBytes`]; e.g.:
5737///
5738/// ```
5739/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5740/// #[derive(ByteEq, Immutable, IntoBytes)]
5741/// #[repr(C)]
5742/// struct MyStruct {
5743/// # /*
5744///     ...
5745/// # */
5746/// }
5747///
5748/// #[derive(ByteEq, Immutable, IntoBytes)]
5749/// #[repr(u8)]
5750/// enum MyEnum {
5751/// #   Variant,
5752/// # /*
5753///     ...
5754/// # */
5755/// }
5756/// ```
5757///
5758/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5759/// equality by individually comparing each field. Instead, the implementation
5760/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
5761/// `self` and `other` to byte slices and compares those slices for equality.
5762/// This may have performance advantages.
5763#[cfg(any(feature = "derive", test))]
5764#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5765pub use zerocopy_derive::ByteEq;
5766/// Derives an optimized [`Hash`] implementation.
5767///
5768/// This derive can be applied to structs and enums implementing both
5769/// [`Immutable`] and [`IntoBytes`]; e.g.:
5770///
5771/// ```
5772/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5773/// #[derive(ByteHash, Immutable, IntoBytes)]
5774/// #[repr(C)]
5775/// struct MyStruct {
5776/// # /*
5777///     ...
5778/// # */
5779/// }
5780///
5781/// #[derive(ByteHash, Immutable, IntoBytes)]
5782/// #[repr(u8)]
5783/// enum MyEnum {
5784/// #   Variant,
5785/// # /*
5786///     ...
5787/// # */
5788/// }
5789/// ```
5790///
5791/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5792/// individually hashing each field and combining the results. Instead, the
5793/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5794/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
5795/// it in a single call to [`Hasher::write()`]. This may have performance
5796/// advantages.
5797///
5798/// [`Hash`]: core::hash::Hash
5799/// [`Hash::hash()`]: core::hash::Hash::hash()
5800/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5801#[cfg(any(feature = "derive", test))]
5802#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5803pub use zerocopy_derive::ByteHash;
5804/// Implements [`SplitAt`].
5805///
5806/// This derive can be applied to structs; e.g.:
5807///
5808/// ```
5809/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5810/// #[derive(ByteEq, Immutable, IntoBytes)]
5811/// #[repr(C)]
5812/// struct MyStruct {
5813/// # /*
5814///     ...
5815/// # */
5816/// }
5817/// ```
5818#[cfg(any(feature = "derive", test))]
5819#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5820pub use zerocopy_derive::SplitAt;
5821
5822#[cfg(feature = "alloc")]
5823#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5824#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5825mod alloc_support {
5826    use super::*;
5827
5828    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5829    /// vector. The new items are initialized with zeros.
5830    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5831    #[doc(hidden)]
5832    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5833    #[inline(always)]
5834    pub fn extend_vec_zeroed<T: FromZeros>(
5835        v: &mut Vec<T>,
5836        additional: usize,
5837    ) -> Result<(), AllocError> {
5838        <T as FromZeros>::extend_vec_zeroed(v, additional)
5839    }
5840
5841    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5842    /// items are initialized with zeros.
5843    ///
5844    /// # Panics
5845    ///
5846    /// Panics if `position > v.len()`.
5847    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5848    #[doc(hidden)]
5849    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5850    #[inline(always)]
5851    pub fn insert_vec_zeroed<T: FromZeros>(
5852        v: &mut Vec<T>,
5853        position: usize,
5854        additional: usize,
5855    ) -> Result<(), AllocError> {
5856        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5857    }
5858}
5859
5860#[cfg(feature = "alloc")]
5861#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5862#[doc(hidden)]
5863pub use alloc_support::*;
5864
5865#[cfg(test)]
5866#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5867mod tests {
5868    use static_assertions::assert_impl_all;
5869
5870    use super::*;
5871    use crate::util::testutil::*;
5872
5873    // An unsized type.
5874    //
5875    // This is used to test the custom derives of our traits. The `[u8]` type
5876    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5877    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5878    #[repr(transparent)]
5879    struct Unsized([u8]);
5880
5881    impl Unsized {
5882        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5883            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5884            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5885            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5886            // guaranteed by the language spec, we can just change this since
5887            // it's in test code.
5888            //
5889            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5890            unsafe { mem::transmute(slc) }
5891        }
5892    }
5893
5894    #[test]
5895    fn test_known_layout() {
5896        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5897        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5898        // of `$ty`.
5899        macro_rules! test {
5900            ($ty:ty, $expect:expr) => {
5901                let expect = $expect;
5902                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5903                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5904                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5905            };
5906        }
5907
5908        let layout =
5909            |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
5910                align: NonZeroUsize::new(align).unwrap(),
5911                size_info: match trailing_slice_elem_size {
5912                    None => SizeInfo::Sized { size: offset },
5913                    Some(elem_size) => {
5914                        SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
5915                    }
5916                },
5917                statically_shallow_unpadded,
5918            };
5919
5920        test!((), layout(0, 1, None, false));
5921        test!(u8, layout(1, 1, None, false));
5922        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5923        // platforms.
5924        test!(u64, layout(8, mem::align_of::<u64>(), None, false));
5925        test!(AU64, layout(8, 8, None, false));
5926
5927        test!(Option<&'static ()>, usize::LAYOUT);
5928
5929        test!([()], layout(0, 1, Some(0), true));
5930        test!([u8], layout(0, 1, Some(1), true));
5931        test!(str, layout(0, 1, Some(1), true));
5932    }
5933
5934    #[cfg(feature = "derive")]
5935    #[test]
5936    fn test_known_layout_derive() {
5937        // In this and other files (`late_compile_pass.rs`,
5938        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5939        // modes of `derive(KnownLayout)` for the following combination of
5940        // properties:
5941        //
5942        // +------------+--------------------------------------+-----------+
5943        // |            |      trailing field properties       |           |
5944        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5945        // |------------+----------+----------------+----------+-----------|
5946        // |          N |        N |              N |        N |      KL00 |
5947        // |          N |        N |              N |        Y |      KL01 |
5948        // |          N |        N |              Y |        N |      KL02 |
5949        // |          N |        N |              Y |        Y |      KL03 |
5950        // |          N |        Y |              N |        N |      KL04 |
5951        // |          N |        Y |              N |        Y |      KL05 |
5952        // |          N |        Y |              Y |        N |      KL06 |
5953        // |          N |        Y |              Y |        Y |      KL07 |
5954        // |          Y |        N |              N |        N |      KL08 |
5955        // |          Y |        N |              N |        Y |      KL09 |
5956        // |          Y |        N |              Y |        N |      KL10 |
5957        // |          Y |        N |              Y |        Y |      KL11 |
5958        // |          Y |        Y |              N |        N |      KL12 |
5959        // |          Y |        Y |              N |        Y |      KL13 |
5960        // |          Y |        Y |              Y |        N |      KL14 |
5961        // |          Y |        Y |              Y |        Y |      KL15 |
5962        // +------------+----------+----------------+----------+-----------+
5963
5964        struct NotKnownLayout<T = ()> {
5965            _t: T,
5966        }
5967
5968        #[derive(KnownLayout)]
5969        #[repr(C)]
5970        struct AlignSize<const ALIGN: usize, const SIZE: usize>
5971        where
5972            elain::Align<ALIGN>: elain::Alignment,
5973        {
5974            _align: elain::Align<ALIGN>,
5975            size: [u8; SIZE],
5976        }
5977
5978        type AU16 = AlignSize<2, 2>;
5979        type AU32 = AlignSize<4, 4>;
5980
5981        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5982
5983        let sized_layout = |align, size| DstLayout {
5984            align: NonZeroUsize::new(align).unwrap(),
5985            size_info: SizeInfo::Sized { size },
5986            statically_shallow_unpadded: false,
5987        };
5988
5989        let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
5990            align: NonZeroUsize::new(align).unwrap(),
5991            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5992            statically_shallow_unpadded,
5993        };
5994
5995        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5996        // |          N |        N |              N |        Y |      KL01 |
5997        #[allow(dead_code)]
5998        #[derive(KnownLayout)]
5999        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6000
6001        let expected = DstLayout::for_type::<KL01>();
6002
6003        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6004        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6005
6006        // ...with `align(N)`:
6007        #[allow(dead_code)]
6008        #[derive(KnownLayout)]
6009        #[repr(align(64))]
6010        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6011
6012        let expected = DstLayout::for_type::<KL01Align>();
6013
6014        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6015        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6016
6017        // ...with `packed`:
6018        #[allow(dead_code)]
6019        #[derive(KnownLayout)]
6020        #[repr(packed)]
6021        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6022
6023        let expected = DstLayout::for_type::<KL01Packed>();
6024
6025        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6026        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6027
6028        // ...with `packed(N)`:
6029        #[allow(dead_code)]
6030        #[derive(KnownLayout)]
6031        #[repr(packed(2))]
6032        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6033
6034        assert_impl_all!(KL01PackedN: KnownLayout);
6035
6036        let expected = DstLayout::for_type::<KL01PackedN>();
6037
6038        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6039        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6040
6041        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6042        // |          N |        N |              Y |        Y |      KL03 |
6043        #[allow(dead_code)]
6044        #[derive(KnownLayout)]
6045        struct KL03(NotKnownLayout, u8);
6046
6047        let expected = DstLayout::for_type::<KL03>();
6048
6049        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6050        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6051
6052        // ... with `align(N)`
6053        #[allow(dead_code)]
6054        #[derive(KnownLayout)]
6055        #[repr(align(64))]
6056        struct KL03Align(NotKnownLayout<AU32>, u8);
6057
6058        let expected = DstLayout::for_type::<KL03Align>();
6059
6060        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6061        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6062
6063        // ... with `packed`:
6064        #[allow(dead_code)]
6065        #[derive(KnownLayout)]
6066        #[repr(packed)]
6067        struct KL03Packed(NotKnownLayout<AU32>, u8);
6068
6069        let expected = DstLayout::for_type::<KL03Packed>();
6070
6071        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6072        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6073
6074        // ... with `packed(N)`
6075        #[allow(dead_code)]
6076        #[derive(KnownLayout)]
6077        #[repr(packed(2))]
6078        struct KL03PackedN(NotKnownLayout<AU32>, u8);
6079
6080        assert_impl_all!(KL03PackedN: KnownLayout);
6081
6082        let expected = DstLayout::for_type::<KL03PackedN>();
6083
6084        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6085        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6086
6087        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6088        // |          N |        Y |              N |        Y |      KL05 |
6089        #[allow(dead_code)]
6090        #[derive(KnownLayout)]
6091        struct KL05<T>(u8, T);
6092
6093        fn _test_kl05<T>(t: T) -> impl KnownLayout {
6094            KL05(0u8, t)
6095        }
6096
6097        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6098        // |          N |        Y |              Y |        Y |      KL07 |
6099        #[allow(dead_code)]
6100        #[derive(KnownLayout)]
6101        struct KL07<T: KnownLayout>(u8, T);
6102
6103        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6104            let _ = KL07(0u8, t);
6105        }
6106
6107        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6108        // |          Y |        N |              Y |        N |      KL10 |
6109        #[allow(dead_code)]
6110        #[derive(KnownLayout)]
6111        #[repr(C)]
6112        struct KL10(NotKnownLayout<AU32>, [u8]);
6113
6114        let expected = DstLayout::new_zst(None)
6115            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6116            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6117            .pad_to_align();
6118
6119        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6120        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6121
6122        // ...with `align(N)`:
6123        #[allow(dead_code)]
6124        #[derive(KnownLayout)]
6125        #[repr(C, align(64))]
6126        struct KL10Align(NotKnownLayout<AU32>, [u8]);
6127
6128        let repr_align = NonZeroUsize::new(64);
6129
6130        let expected = DstLayout::new_zst(repr_align)
6131            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6132            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6133            .pad_to_align();
6134
6135        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6136        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6137
6138        // ...with `packed`:
6139        #[allow(dead_code)]
6140        #[derive(KnownLayout)]
6141        #[repr(C, packed)]
6142        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6143
6144        let repr_packed = NonZeroUsize::new(1);
6145
6146        let expected = DstLayout::new_zst(None)
6147            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6148            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6149            .pad_to_align();
6150
6151        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6152        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6153
6154        // ...with `packed(N)`:
6155        #[allow(dead_code)]
6156        #[derive(KnownLayout)]
6157        #[repr(C, packed(2))]
6158        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6159
6160        let repr_packed = NonZeroUsize::new(2);
6161
6162        let expected = DstLayout::new_zst(None)
6163            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6164            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6165            .pad_to_align();
6166
6167        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6168        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6169
6170        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6171        // |          Y |        N |              Y |        Y |      KL11 |
6172        #[allow(dead_code)]
6173        #[derive(KnownLayout)]
6174        #[repr(C)]
6175        struct KL11(NotKnownLayout<AU64>, u8);
6176
6177        let expected = DstLayout::new_zst(None)
6178            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6179            .extend(<u8 as KnownLayout>::LAYOUT, None)
6180            .pad_to_align();
6181
6182        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6183        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6184
6185        // ...with `align(N)`:
6186        #[allow(dead_code)]
6187        #[derive(KnownLayout)]
6188        #[repr(C, align(64))]
6189        struct KL11Align(NotKnownLayout<AU64>, u8);
6190
6191        let repr_align = NonZeroUsize::new(64);
6192
6193        let expected = DstLayout::new_zst(repr_align)
6194            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6195            .extend(<u8 as KnownLayout>::LAYOUT, None)
6196            .pad_to_align();
6197
6198        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6199        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6200
6201        // ...with `packed`:
6202        #[allow(dead_code)]
6203        #[derive(KnownLayout)]
6204        #[repr(C, packed)]
6205        struct KL11Packed(NotKnownLayout<AU64>, u8);
6206
6207        let repr_packed = NonZeroUsize::new(1);
6208
6209        let expected = DstLayout::new_zst(None)
6210            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6211            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6212            .pad_to_align();
6213
6214        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6215        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6216
6217        // ...with `packed(N)`:
6218        #[allow(dead_code)]
6219        #[derive(KnownLayout)]
6220        #[repr(C, packed(2))]
6221        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6222
6223        let repr_packed = NonZeroUsize::new(2);
6224
6225        let expected = DstLayout::new_zst(None)
6226            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6227            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6228            .pad_to_align();
6229
6230        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6231        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6232
6233        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6234        // |          Y |        Y |              Y |        N |      KL14 |
6235        #[allow(dead_code)]
6236        #[derive(KnownLayout)]
6237        #[repr(C)]
6238        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6239
6240        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6241            _assert_kl(kl)
6242        }
6243
6244        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6245        // |          Y |        Y |              Y |        Y |      KL15 |
6246        #[allow(dead_code)]
6247        #[derive(KnownLayout)]
6248        #[repr(C)]
6249        struct KL15<T: KnownLayout>(u8, T);
6250
6251        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6252            let _ = KL15(0u8, t);
6253        }
6254
6255        // Test a variety of combinations of field types:
6256        //  - ()
6257        //  - u8
6258        //  - AU16
6259        //  - [()]
6260        //  - [u8]
6261        //  - [AU16]
6262
6263        #[allow(clippy::upper_case_acronyms, dead_code)]
6264        #[derive(KnownLayout)]
6265        #[repr(C)]
6266        struct KLTU<T, U: ?Sized>(T, U);
6267
6268        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6269
6270        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6271
6272        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6273
6274        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6275
6276        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6277
6278        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6279
6280        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6281
6282        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6283
6284        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6285
6286        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6287
6288        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6289
6290        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6291
6292        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6293
6294        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6295
6296        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6297
6298        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6299
6300        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6301
6302        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6303
6304        // Test a variety of field counts.
6305
6306        #[derive(KnownLayout)]
6307        #[repr(C)]
6308        struct KLF0;
6309
6310        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6311
6312        #[derive(KnownLayout)]
6313        #[repr(C)]
6314        struct KLF1([u8]);
6315
6316        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6317
6318        #[derive(KnownLayout)]
6319        #[repr(C)]
6320        struct KLF2(NotKnownLayout<u8>, [u8]);
6321
6322        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6323
6324        #[derive(KnownLayout)]
6325        #[repr(C)]
6326        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6327
6328        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6329
6330        #[derive(KnownLayout)]
6331        #[repr(C)]
6332        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6333
6334        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6335    }
6336
6337    #[test]
6338    fn test_object_safety() {
6339        fn _takes_immutable(_: &dyn Immutable) {}
6340        fn _takes_unaligned(_: &dyn Unaligned) {}
6341    }
6342
6343    #[test]
6344    fn test_from_zeros_only() {
6345        // Test types that implement `FromZeros` but not `FromBytes`.
6346
6347        assert!(!bool::new_zeroed());
6348        assert_eq!(char::new_zeroed(), '\0');
6349
6350        #[cfg(feature = "alloc")]
6351        {
6352            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6353            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6354
6355            assert_eq!(
6356                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6357                [false, false, false]
6358            );
6359            assert_eq!(
6360                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6361                ['\0', '\0', '\0']
6362            );
6363
6364            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6365            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6366        }
6367
6368        let mut string = "hello".to_string();
6369        let s: &mut str = string.as_mut();
6370        assert_eq!(s, "hello");
6371        s.zero();
6372        assert_eq!(s, "\0\0\0\0\0");
6373    }
6374
6375    #[test]
6376    fn test_zst_count_preserved() {
6377        // Test that, when an explicit count is provided to for a type with a
6378        // ZST trailing slice element, that count is preserved. This is
6379        // important since, for such types, all element counts result in objects
6380        // of the same size, and so the correct behavior is ambiguous. However,
6381        // preserving the count as requested by the user is the behavior that we
6382        // document publicly.
6383
6384        // FromZeros methods
6385        #[cfg(feature = "alloc")]
6386        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6387        #[cfg(feature = "alloc")]
6388        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6389
6390        // FromBytes methods
6391        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6392        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6393        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6394        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6395        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6396        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6397    }
6398
6399    #[test]
6400    fn test_read_write() {
6401        const VAL: u64 = 0x12345678;
6402        #[cfg(target_endian = "big")]
6403        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6404        #[cfg(target_endian = "little")]
6405        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6406        const ZEROS: [u8; 8] = [0u8; 8];
6407
6408        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6409
6410        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6411        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6412        // zeros.
6413        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6414        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6415        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6416        // The first 8 bytes are all zeros and the second 8 bytes are from
6417        // `VAL_BYTES`
6418        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6419        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6420        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6421
6422        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6423
6424        let mut bytes = [0u8; 8];
6425        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6426        assert_eq!(bytes, VAL_BYTES);
6427        let mut bytes = [0u8; 16];
6428        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6429        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6430        assert_eq!(bytes, want);
6431        let mut bytes = [0u8; 16];
6432        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6433        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6434        assert_eq!(bytes, want);
6435    }
6436
6437    #[test]
6438    #[cfg(feature = "std")]
6439    fn test_read_io_with_padding_soundness() {
6440        // This test is designed to exhibit potential UB in
6441        // `FromBytes::read_from_io`. (see #2319, #2320).
6442
6443        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6444        // will have inter-field padding between `x` and `y`.
6445        #[derive(FromBytes)]
6446        #[repr(C)]
6447        struct WithPadding {
6448            x: u8,
6449            y: u16,
6450        }
6451        struct ReadsInRead;
6452        impl std::io::Read for ReadsInRead {
6453            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6454                // This body branches on every byte of `buf`, ensuring that it
6455                // exhibits UB if any byte of `buf` is uninitialized.
6456                if buf.iter().all(|&x| x == 0) {
6457                    Ok(buf.len())
6458                } else {
6459                    buf.iter_mut().for_each(|x| *x = 0);
6460                    Ok(buf.len())
6461                }
6462            }
6463        }
6464        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6465    }
6466
6467    #[test]
6468    #[cfg(feature = "std")]
6469    fn test_read_write_io() {
6470        let mut long_buffer = [0, 0, 0, 0];
6471        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6472        assert_eq!(long_buffer, [255, 255, 0, 0]);
6473        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6474
6475        let mut short_buffer = [0, 0];
6476        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6477        assert_eq!(short_buffer, [255, 255]);
6478        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6479    }
6480
6481    #[test]
6482    fn test_try_from_bytes_try_read_from() {
6483        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6484        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6485
6486        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6487        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6488
6489        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6490        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6491
6492        // If we don't pass enough bytes, it fails.
6493        assert!(matches!(
6494            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6495            Err(TryReadError::Size(_))
6496        ));
6497        assert!(matches!(
6498            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6499            Err(TryReadError::Size(_))
6500        ));
6501        assert!(matches!(
6502            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6503            Err(TryReadError::Size(_))
6504        ));
6505
6506        // If we pass too many bytes, it fails.
6507        assert!(matches!(
6508            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6509            Err(TryReadError::Size(_))
6510        ));
6511
6512        // If we pass an invalid value, it fails.
6513        assert!(matches!(
6514            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6515            Err(TryReadError::Validity(_))
6516        ));
6517        assert!(matches!(
6518            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6519            Err(TryReadError::Validity(_))
6520        ));
6521        assert!(matches!(
6522            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6523            Err(TryReadError::Validity(_))
6524        ));
6525
6526        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6527        // alignment is 8, and since we read from two adjacent addresses one
6528        // byte apart, it is guaranteed that at least one of them (though
6529        // possibly both) will be misaligned.
6530        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6531        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6532        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6533
6534        assert_eq!(
6535            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6536            Ok((AU64(0), &[][..]))
6537        );
6538        assert_eq!(
6539            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6540            Ok((AU64(0), &[][..]))
6541        );
6542
6543        assert_eq!(
6544            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6545            Ok((&[][..], AU64(0)))
6546        );
6547        assert_eq!(
6548            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6549            Ok((&[][..], AU64(0)))
6550        );
6551    }
6552
6553    #[test]
6554    fn test_ref_from_mut_from_bytes() {
6555        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6556        // success cases. Exhaustive coverage for these methods is covered by
6557        // the `Ref` tests above, which these helper methods defer to.
6558
6559        let mut buf =
6560            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6561
6562        assert_eq!(
6563            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6564            [8, 9, 10, 11, 12, 13, 14, 15]
6565        );
6566        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6567        suffix.0 = 0x0101010101010101;
6568        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6569        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6570        assert_eq!(
6571            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6572            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6573        );
6574        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6575        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6576        suffix.0 = 0x0202020202020202;
6577        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6578        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6579        suffix[0] = 42;
6580        assert_eq!(
6581            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6582            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6583        );
6584        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6585        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6586    }
6587
6588    #[test]
6589    fn test_ref_from_mut_from_bytes_error() {
6590        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6591        // error cases.
6592
6593        // Fail because the buffer is too large.
6594        let mut buf = Align::<[u8; 16], AU64>::default();
6595        // `buf.t` should be aligned to 8, so only the length check should fail.
6596        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6597        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6598        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6599        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6600
6601        // Fail because the buffer is too small.
6602        let mut buf = Align::<[u8; 4], AU64>::default();
6603        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6604        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6605        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6606        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6607        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6608        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6609        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6610        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6611        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6612        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6613        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6614        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6615
6616        // Fail because the alignment is insufficient.
6617        let mut buf = Align::<[u8; 13], AU64>::default();
6618        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6619        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6620        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6621        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6622        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6623        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6624        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6625        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6626    }
6627
6628    #[test]
6629    fn test_to_methods() {
6630        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6631        ///
6632        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6633        /// before `t` has been modified. `post_mutation` is the expected
6634        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6635        /// has had its bits flipped (by applying `^= 0xFF`).
6636        ///
6637        /// `N` is the size of `t` in bytes.
6638        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6639            t: &mut T,
6640            bytes: &[u8],
6641            post_mutation: &T,
6642        ) {
6643            // Test that we can access the underlying bytes, and that we get the
6644            // right bytes and the right number of bytes.
6645            assert_eq!(t.as_bytes(), bytes);
6646
6647            // Test that changes to the underlying byte slices are reflected in
6648            // the original object.
6649            t.as_mut_bytes()[0] ^= 0xFF;
6650            assert_eq!(t, post_mutation);
6651            t.as_mut_bytes()[0] ^= 0xFF;
6652
6653            // `write_to` rejects slices that are too small or too large.
6654            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6655            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6656
6657            // `write_to` works as expected.
6658            let mut bytes = [0; N];
6659            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6660            assert_eq!(bytes, t.as_bytes());
6661
6662            // `write_to_prefix` rejects slices that are too small.
6663            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6664
6665            // `write_to_prefix` works with exact-sized slices.
6666            let mut bytes = [0; N];
6667            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6668            assert_eq!(bytes, t.as_bytes());
6669
6670            // `write_to_prefix` works with too-large slices, and any bytes past
6671            // the prefix aren't modified.
6672            let mut too_many_bytes = vec![0; N + 1];
6673            too_many_bytes[N] = 123;
6674            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6675            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6676            assert_eq!(too_many_bytes[N], 123);
6677
6678            // `write_to_suffix` rejects slices that are too small.
6679            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6680
6681            // `write_to_suffix` works with exact-sized slices.
6682            let mut bytes = [0; N];
6683            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6684            assert_eq!(bytes, t.as_bytes());
6685
6686            // `write_to_suffix` works with too-large slices, and any bytes
6687            // before the suffix aren't modified.
6688            let mut too_many_bytes = vec![0; N + 1];
6689            too_many_bytes[0] = 123;
6690            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6691            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6692            assert_eq!(too_many_bytes[0], 123);
6693        }
6694
6695        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6696        #[repr(C)]
6697        struct Foo {
6698            a: u32,
6699            b: Wrapping<u32>,
6700            c: Option<NonZeroU32>,
6701        }
6702
6703        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6704            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6705        } else {
6706            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6707        };
6708        let post_mutation_expected_a =
6709            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6710        test::<_, 12>(
6711            &mut Foo { a: 1, b: Wrapping(2), c: None },
6712            expected_bytes.as_bytes(),
6713            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6714        );
6715        test::<_, 3>(
6716            Unsized::from_mut_slice(&mut [1, 2, 3]),
6717            &[1, 2, 3],
6718            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6719        );
6720    }
6721
6722    #[test]
6723    fn test_array() {
6724        #[derive(FromBytes, IntoBytes, Immutable)]
6725        #[repr(C)]
6726        struct Foo {
6727            a: [u16; 33],
6728        }
6729
6730        let foo = Foo { a: [0xFFFF; 33] };
6731        let expected = [0xFFu8; 66];
6732        assert_eq!(foo.as_bytes(), &expected[..]);
6733    }
6734
6735    #[test]
6736    fn test_new_zeroed() {
6737        assert!(!bool::new_zeroed());
6738        assert_eq!(u64::new_zeroed(), 0);
6739        // This test exists in order to exercise unsafe code, especially when
6740        // running under Miri.
6741        #[allow(clippy::unit_cmp)]
6742        {
6743            assert_eq!(<()>::new_zeroed(), ());
6744        }
6745    }
6746
6747    #[test]
6748    fn test_transparent_packed_generic_struct() {
6749        #[derive(IntoBytes, FromBytes, Unaligned)]
6750        #[repr(transparent)]
6751        #[allow(dead_code)] // We never construct this type
6752        struct Foo<T> {
6753            _t: T,
6754            _phantom: PhantomData<()>,
6755        }
6756
6757        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6758        assert_impl_all!(Foo<u8>: Unaligned);
6759
6760        #[derive(IntoBytes, FromBytes, Unaligned)]
6761        #[repr(C, packed)]
6762        #[allow(dead_code)] // We never construct this type
6763        struct Bar<T, U> {
6764            _t: T,
6765            _u: U,
6766        }
6767
6768        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6769    }
6770
6771    #[cfg(feature = "alloc")]
6772    mod alloc {
6773        use super::*;
6774
6775        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6776        #[test]
6777        fn test_extend_vec_zeroed() {
6778            // Test extending when there is an existing allocation.
6779            let mut v = vec![100u16, 200, 300];
6780            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6781            assert_eq!(v.len(), 6);
6782            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6783            drop(v);
6784
6785            // Test extending when there is no existing allocation.
6786            let mut v: Vec<u64> = Vec::new();
6787            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6788            assert_eq!(v.len(), 3);
6789            assert_eq!(&*v, &[0, 0, 0]);
6790            drop(v);
6791        }
6792
6793        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6794        #[test]
6795        fn test_extend_vec_zeroed_zst() {
6796            // Test extending when there is an existing (fake) allocation.
6797            let mut v = vec![(), (), ()];
6798            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6799            assert_eq!(v.len(), 6);
6800            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6801            drop(v);
6802
6803            // Test extending when there is no existing (fake) allocation.
6804            let mut v: Vec<()> = Vec::new();
6805            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6806            assert_eq!(&*v, &[(), (), ()]);
6807            drop(v);
6808        }
6809
6810        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6811        #[test]
6812        fn test_insert_vec_zeroed() {
6813            // Insert at start (no existing allocation).
6814            let mut v: Vec<u64> = Vec::new();
6815            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6816            assert_eq!(v.len(), 2);
6817            assert_eq!(&*v, &[0, 0]);
6818            drop(v);
6819
6820            // Insert at start.
6821            let mut v = vec![100u64, 200, 300];
6822            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6823            assert_eq!(v.len(), 5);
6824            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6825            drop(v);
6826
6827            // Insert at middle.
6828            let mut v = vec![100u64, 200, 300];
6829            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6830            assert_eq!(v.len(), 4);
6831            assert_eq!(&*v, &[100, 0, 200, 300]);
6832            drop(v);
6833
6834            // Insert at end.
6835            let mut v = vec![100u64, 200, 300];
6836            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6837            assert_eq!(v.len(), 4);
6838            assert_eq!(&*v, &[100, 200, 300, 0]);
6839            drop(v);
6840        }
6841
6842        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6843        #[test]
6844        fn test_insert_vec_zeroed_zst() {
6845            // Insert at start (no existing fake allocation).
6846            let mut v: Vec<()> = Vec::new();
6847            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6848            assert_eq!(v.len(), 2);
6849            assert_eq!(&*v, &[(), ()]);
6850            drop(v);
6851
6852            // Insert at start.
6853            let mut v = vec![(), (), ()];
6854            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6855            assert_eq!(v.len(), 5);
6856            assert_eq!(&*v, &[(), (), (), (), ()]);
6857            drop(v);
6858
6859            // Insert at middle.
6860            let mut v = vec![(), (), ()];
6861            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6862            assert_eq!(v.len(), 4);
6863            assert_eq!(&*v, &[(), (), (), ()]);
6864            drop(v);
6865
6866            // Insert at end.
6867            let mut v = vec![(), (), ()];
6868            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6869            assert_eq!(v.len(), 4);
6870            assert_eq!(&*v, &[(), (), (), ()]);
6871            drop(v);
6872        }
6873
6874        #[test]
6875        fn test_new_box_zeroed() {
6876            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6877        }
6878
6879        #[test]
6880        fn test_new_box_zeroed_array() {
6881            drop(<[u32; 0x1000]>::new_box_zeroed());
6882        }
6883
6884        #[test]
6885        fn test_new_box_zeroed_zst() {
6886            // This test exists in order to exercise unsafe code, especially
6887            // when running under Miri.
6888            #[allow(clippy::unit_cmp)]
6889            {
6890                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6891            }
6892        }
6893
6894        #[test]
6895        fn test_new_box_zeroed_with_elems() {
6896            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6897            assert_eq!(s.len(), 3);
6898            assert_eq!(&*s, &[0, 0, 0]);
6899            s[1] = 3;
6900            assert_eq!(&*s, &[0, 3, 0]);
6901        }
6902
6903        #[test]
6904        fn test_new_box_zeroed_with_elems_empty() {
6905            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6906            assert_eq!(s.len(), 0);
6907        }
6908
6909        #[test]
6910        fn test_new_box_zeroed_with_elems_zst() {
6911            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6912            assert_eq!(s.len(), 3);
6913            assert!(s.get(10).is_none());
6914            // This test exists in order to exercise unsafe code, especially
6915            // when running under Miri.
6916            #[allow(clippy::unit_cmp)]
6917            {
6918                assert_eq!(s[1], ());
6919            }
6920            s[2] = ();
6921        }
6922
6923        #[test]
6924        fn test_new_box_zeroed_with_elems_zst_empty() {
6925            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6926            assert_eq!(s.len(), 0);
6927        }
6928
6929        #[test]
6930        fn new_box_zeroed_with_elems_errors() {
6931            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6932
6933            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6934            assert_eq!(
6935                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6936                Err(AllocError)
6937            );
6938        }
6939    }
6940
6941    #[test]
6942    #[allow(deprecated)]
6943    fn test_deprecated_from_bytes() {
6944        let val = 0u32;
6945        let bytes = val.as_bytes();
6946
6947        assert!(u32::ref_from(bytes).is_some());
6948        // mut_from needs mut bytes
6949        let mut val = 0u32;
6950        let mut_bytes = val.as_mut_bytes();
6951        assert!(u32::mut_from(mut_bytes).is_some());
6952
6953        assert!(u32::read_from(bytes).is_some());
6954
6955        let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
6956        assert!(slc.is_empty());
6957        assert_eq!(rest.len(), 4);
6958
6959        let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
6960        assert!(slc.is_empty());
6961        assert_eq!(rest.len(), 4);
6962
6963        let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
6964        assert!(slc.is_empty());
6965        assert_eq!(rest.len(), 4);
6966
6967        let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
6968        assert!(slc.is_empty());
6969        assert_eq!(rest.len(), 4);
6970    }
6971
6972    #[test]
6973    fn test_try_ref_from_prefix_suffix() {
6974        use crate::util::testutil::Align;
6975        let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
6976        let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
6977        assert_eq!(*r, 0);
6978        assert_eq!(rest.len(), 0);
6979
6980        let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
6981        assert_eq!(*r, 0);
6982        assert_eq!(rest.len(), 0);
6983    }
6984
6985    #[test]
6986    fn test_raw_dangling() {
6987        use crate::util::AsAddress;
6988        let ptr: NonNull<u32> = u32::raw_dangling();
6989        assert_eq!(AsAddress::addr(ptr), 1);
6990
6991        let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
6992        assert_eq!(AsAddress::addr(ptr), 1);
6993    }
6994
6995    #[test]
6996    fn test_try_ref_from_prefix_with_elems() {
6997        use crate::util::testutil::Align;
6998        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
6999        let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
7000        assert_eq!(r.len(), 2);
7001        assert_eq!(rest.len(), 0);
7002    }
7003
7004    #[test]
7005    fn test_try_ref_from_suffix_with_elems() {
7006        use crate::util::testutil::Align;
7007        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7008        let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
7009        assert_eq!(r.len(), 2);
7010        assert_eq!(rest.len(), 0);
7011    }
7012}