Skip to main content

zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::multiple_unsafe_ops_per_block,
263    clippy::must_use_candidate,
264    clippy::must_use_unit,
265    clippy::obfuscated_if_else,
266    clippy::perf,
267    clippy::print_stdout,
268    clippy::return_self_not_must_use,
269    clippy::std_instead_of_core,
270    clippy::style,
271    clippy::suspicious,
272    clippy::todo,
273    clippy::undocumented_unsafe_blocks,
274    clippy::unimplemented,
275    clippy::unnested_or_patterns,
276    clippy::unwrap_used,
277    clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284    rustdoc::bare_urls,
285    rustdoc::broken_intra_doc_links,
286    rustdoc::invalid_codeblock_attributes,
287    rustdoc::invalid_html_tags,
288    rustdoc::invalid_rust_codeblocks,
289    rustdoc::missing_crate_level_docs,
290    rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295    // In tests, you get line numbers and have access to source code, so panic
296    // messages are less important. You also often unwrap a lot, which would
297    // make expect'ing instead very verbose.
298    clippy::unwrap_used,
299    // In tests, there's no harm to "panic risks" - the worst that can happen is
300    // that your test will fail, and you'll fix it. By contrast, panic risks in
301    // production code introduce the possibly of code panicking unexpectedly "in
302    // the field".
303    clippy::arithmetic_side_effects,
304    clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308    all(feature = "simd-nightly", target_arch = "arm"),
309    feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313    feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319    any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320    feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361    cell::{Cell, UnsafeCell},
362    cmp::Ordering,
363    fmt::{self, Debug, Display, Formatter},
364    hash::Hasher,
365    marker::PhantomData,
366    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367    num::{
368        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370    },
371    ops::{Deref, DerefMut},
372    ptr::{self, NonNull},
373    slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378#[doc(hidden)]
379pub use crate::pointer::invariant::{self, BecauseExclusive};
380#[doc(hidden)]
381pub use crate::pointer::PtrInner;
382pub use crate::{
383    byte_slice::*,
384    byteorder::*,
385    error::*,
386    r#ref::*,
387    split_at::{Split, SplitAt},
388    wrappers::*,
389};
390
391#[cfg(any(feature = "alloc", test, kani))]
392extern crate alloc;
393#[cfg(any(feature = "alloc", test))]
394use alloc::{boxed::Box, vec::Vec};
395#[cfg(any(feature = "alloc", test))]
396use core::alloc::Layout;
397
398use util::MetadataOf;
399
400// Used by `KnownLayout`.
401#[doc(hidden)]
402pub use crate::layout::*;
403// Used by `TryFromBytes::is_bit_valid`.
404#[doc(hidden)]
405pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
406// For each trait polyfill, as soon as the corresponding feature is stable, the
407// polyfill import will be unused because method/function resolution will prefer
408// the inherent method/function over a trait method/function. Thus, we suppress
409// the `unused_imports` warning.
410//
411// See the documentation on `util::polyfills` for more information.
412#[allow(unused_imports)]
413use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
414
415#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)))]
416const _: () = {
417    #[deprecated = "Development of zerocopy using cargo is not supported. Please use `cargo.sh` or `win-cargo.bat` instead."]
418    #[allow(unused)]
419    const WARNING: () = ();
420    #[warn(deprecated)]
421    WARNING
422};
423
424/// Implements [`KnownLayout`].
425///
426/// This derive analyzes various aspects of a type's layout that are needed for
427/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
428/// e.g.:
429///
430/// ```
431/// # use zerocopy_derive::KnownLayout;
432/// #[derive(KnownLayout)]
433/// struct MyStruct {
434/// # /*
435///     ...
436/// # */
437/// }
438///
439/// #[derive(KnownLayout)]
440/// enum MyEnum {
441/// #   V00,
442/// # /*
443///     ...
444/// # */
445/// }
446///
447/// #[derive(KnownLayout)]
448/// union MyUnion {
449/// #   variant: u8,
450/// # /*
451///     ...
452/// # */
453/// }
454/// ```
455///
456/// # Limitations
457///
458/// This derive cannot currently be applied to unsized structs without an
459/// explicit `repr` attribute.
460///
461/// Some invocations of this derive run afoul of a [known bug] in Rust's type
462/// privacy checker. For example, this code:
463///
464/// ```compile_fail,E0446
465/// use zerocopy::*;
466/// # use zerocopy_derive::*;
467///
468/// #[derive(KnownLayout)]
469/// #[repr(C)]
470/// pub struct PublicType {
471///     leading: Foo,
472///     trailing: Bar,
473/// }
474///
475/// #[derive(KnownLayout)]
476/// struct Foo;
477///
478/// #[derive(KnownLayout)]
479/// struct Bar;
480/// ```
481///
482/// ...results in a compilation error:
483///
484/// ```text
485/// error[E0446]: private type `Bar` in public interface
486///  --> examples/bug.rs:3:10
487///    |
488/// 3  | #[derive(KnownLayout)]
489///    |          ^^^^^^^^^^^ can't leak private type
490/// ...
491/// 14 | struct Bar;
492///    | ---------- `Bar` declared as private
493///    |
494///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
495/// ```
496///
497/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
498/// structs whose trailing field type is less public than the enclosing struct.
499///
500/// To work around this, mark the trailing field type `pub` and annotate it with
501/// `#[doc(hidden)]`; e.g.:
502///
503/// ```no_run
504/// use zerocopy::*;
505/// # use zerocopy_derive::*;
506///
507/// #[derive(KnownLayout)]
508/// #[repr(C)]
509/// pub struct PublicType {
510///     leading: Foo,
511///     trailing: Bar,
512/// }
513///
514/// #[derive(KnownLayout)]
515/// struct Foo;
516///
517/// #[doc(hidden)]
518/// #[derive(KnownLayout)]
519/// pub struct Bar; // <- `Bar` is now also `pub`
520/// ```
521///
522/// [known bug]: https://github.com/rust-lang/rust/issues/45713
523#[cfg(any(feature = "derive", test))]
524#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
525pub use zerocopy_derive::KnownLayout;
526// These exist so that code which was written against the old names will get
527// less confusing error messages when they upgrade to a more recent version of
528// zerocopy. On our MSRV toolchain, the error messages read, for example:
529//
530//   error[E0603]: trait `FromZeroes` is private
531//       --> examples/deprecated.rs:1:15
532//        |
533//   1    | use zerocopy::FromZeroes;
534//        |               ^^^^^^^^^^ private trait
535//        |
536//   note: the trait `FromZeroes` is defined here
537//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
538//        |
539//   1845 | use FromZeros as FromZeroes;
540//        |     ^^^^^^^^^^^^^^^^^^^^^^^
541//
542// The "note" provides enough context to make it easy to figure out how to fix
543// the error.
544#[allow(unused)]
545use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
546
547/// Indicates that zerocopy can reason about certain aspects of a type's layout.
548///
549/// This trait is required by many of zerocopy's APIs. It supports sized types,
550/// slices, and [slice DSTs](#dynamically-sized-types).
551///
552/// # Implementation
553///
554/// **Do not implement this trait yourself!** Instead, use
555/// [`#[derive(KnownLayout)]`][derive]; e.g.:
556///
557/// ```
558/// # use zerocopy_derive::KnownLayout;
559/// #[derive(KnownLayout)]
560/// struct MyStruct {
561/// # /*
562///     ...
563/// # */
564/// }
565///
566/// #[derive(KnownLayout)]
567/// enum MyEnum {
568/// # /*
569///     ...
570/// # */
571/// }
572///
573/// #[derive(KnownLayout)]
574/// union MyUnion {
575/// #   variant: u8,
576/// # /*
577///     ...
578/// # */
579/// }
580/// ```
581///
582/// This derive performs a sophisticated analysis to deduce the layout
583/// characteristics of types. You **must** implement this trait via the derive.
584///
585/// # Dynamically-sized types
586///
587/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
588///
589/// A slice DST is a type whose trailing field is either a slice or another
590/// slice DST, rather than a type with fixed size. For example:
591///
592/// ```
593/// #[repr(C)]
594/// struct PacketHeader {
595/// # /*
596///     ...
597/// # */
598/// }
599///
600/// #[repr(C)]
601/// struct Packet {
602///     header: PacketHeader,
603///     body: [u8],
604/// }
605/// ```
606///
607/// It can be useful to think of slice DSTs as a generalization of slices - in
608/// other words, a normal slice is just the special case of a slice DST with
609/// zero leading fields. In particular:
610/// - Like slices, slice DSTs can have different lengths at runtime
611/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
612///   or via other indirection such as `Box`
613/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
614///   encodes the number of elements in the trailing slice field
615///
616/// ## Slice DST layout
617///
618/// Just like other composite Rust types, the layout of a slice DST is not
619/// well-defined unless it is specified using an explicit `#[repr(...)]`
620/// attribute such as `#[repr(C)]`. [Other representations are
621/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
622/// example.
623///
624/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
625/// types][repr-c-structs], but the presence of a variable-length field
626/// introduces the possibility of *dynamic padding*. In particular, it may be
627/// necessary to add trailing padding *after* the trailing slice field in order
628/// to satisfy the outer type's alignment, and the amount of padding required
629/// may be a function of the length of the trailing slice field. This is just a
630/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
631/// but it can result in surprising behavior. For example, consider the
632/// following type:
633///
634/// ```
635/// #[repr(C)]
636/// struct Foo {
637///     a: u32,
638///     b: u8,
639///     z: [u16],
640/// }
641/// ```
642///
643/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
644/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
645/// `Foo`:
646///
647/// ```text
648/// byte offset | 01234567
649///       field | aaaab---
650///                    ><
651/// ```
652///
653/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
654/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
655/// round up to offset 6. This means that there is one byte of padding between
656/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
657/// then two bytes of padding after `z` in order to satisfy the overall
658/// alignment of `Foo`. The size of this instance is 8 bytes.
659///
660/// What about if `z` has length 1?
661///
662/// ```text
663/// byte offset | 01234567
664///       field | aaaab-zz
665/// ```
666///
667/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
668/// that we no longer need padding after `z` in order to satisfy `Foo`'s
669/// alignment. We've now seen two different values of `Foo` with two different
670/// lengths of `z`, but they both have the same size - 8 bytes.
671///
672/// What about if `z` has length 2?
673///
674/// ```text
675/// byte offset | 012345678901
676///       field | aaaab-zzzz--
677/// ```
678///
679/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
680/// size to 10, and so we now need another 2 bytes of padding after `z` to
681/// satisfy `Foo`'s alignment.
682///
683/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
684/// applied to slice DSTs, but it can be surprising that the amount of trailing
685/// padding becomes a function of the trailing slice field's length, and thus
686/// can only be computed at runtime.
687///
688/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
689/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
690///
691/// ## What is a valid size?
692///
693/// There are two places in zerocopy's API that we refer to "a valid size" of a
694/// type. In normal casts or conversions, where the source is a byte slice, we
695/// need to know whether the source byte slice is a valid size of the
696/// destination type. In prefix or suffix casts, we need to know whether *there
697/// exists* a valid size of the destination type which fits in the source byte
698/// slice and, if so, what the largest such size is.
699///
700/// As outlined above, a slice DST's size is defined by the number of elements
701/// in its trailing slice field. However, there is not necessarily a 1-to-1
702/// mapping between trailing slice field length and overall size. As we saw in
703/// the previous section with the type `Foo`, instances with both 0 and 1
704/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
705///
706/// When we say "x is a valid size of `T`", we mean one of two things:
707/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
708/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
709///   `T` with `len` trailing slice elements has size `x`
710///
711/// When we say "largest possible size of `T` that fits in a byte slice", we
712/// mean one of two things:
713/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
714///   `size_of::<T>()` bytes long
715/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
716///   that the instance of `T` with `len` trailing slice elements fits in the
717///   byte slice, and to choose the largest such `len`, if any
718///
719///
720/// # Safety
721///
722/// This trait does not convey any safety guarantees to code outside this crate.
723///
724/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
725/// releases of zerocopy may make backwards-breaking changes to these items,
726/// including changes that only affect soundness, which may cause code which
727/// uses those items to silently become unsound.
728///
729#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
730#[cfg_attr(
731    not(feature = "derive"),
732    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
733)]
734#[cfg_attr(
735    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
736    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
737)]
738pub unsafe trait KnownLayout {
739    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
740    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
741    // it likely won't be in the future, but there's no reason not to be
742    // forwards-compatible with object safety.
743    #[doc(hidden)]
744    fn only_derive_is_allowed_to_implement_this_trait()
745    where
746        Self: Sized;
747
748    /// The type of metadata stored in a pointer to `Self`.
749    ///
750    /// This is `()` for sized types and [`usize`] for slice DSTs.
751    type PointerMetadata: PointerMetadata;
752
753    /// A maybe-uninitialized analog of `Self`
754    ///
755    /// # Safety
756    ///
757    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
758    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
759    #[doc(hidden)]
760    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
761
762    /// The layout of `Self`.
763    ///
764    /// # Safety
765    ///
766    /// Callers may assume that `LAYOUT` accurately reflects the layout of
767    /// `Self`. In particular:
768    /// - `LAYOUT.align` is equal to `Self`'s alignment
769    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
770    ///   where `size == size_of::<Self>()`
771    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
772    ///   SizeInfo::SliceDst(slice_layout)` where:
773    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
774    ///     slice elements is equal to `slice_layout.offset +
775    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
776    ///     of `LAYOUT.align`
777    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
778    ///     slice_layout.elem_size * elems, size)` are padding and must not be
779    ///     assumed to be initialized
780    #[doc(hidden)]
781    const LAYOUT: DstLayout;
782
783    /// SAFETY: The returned pointer has the same address and provenance as
784    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
785    /// elements in its trailing slice.
786    #[doc(hidden)]
787    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
788
789    /// Extracts the metadata from a pointer to `Self`.
790    ///
791    /// # Safety
792    ///
793    /// `pointer_to_metadata` always returns the correct metadata stored in
794    /// `ptr`.
795    #[doc(hidden)]
796    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
797
798    /// Computes the length of the byte range addressed by `ptr`.
799    ///
800    /// Returns `None` if the resulting length would not fit in an `usize`.
801    ///
802    /// # Safety
803    ///
804    /// Callers may assume that `size_of_val_raw` always returns the correct
805    /// size.
806    ///
807    /// Callers may assume that, if `ptr` addresses a byte range whose length
808    /// fits in an `usize`, this will return `Some`.
809    #[doc(hidden)]
810    #[must_use]
811    #[inline(always)]
812    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
813        let meta = Self::pointer_to_metadata(ptr.as_ptr());
814        // SAFETY: `size_for_metadata` promises to only return `None` if the
815        // resulting size would not fit in a `usize`.
816        Self::size_for_metadata(meta)
817    }
818
819    #[doc(hidden)]
820    #[must_use]
821    #[inline(always)]
822    fn raw_dangling() -> NonNull<Self> {
823        let meta = Self::PointerMetadata::from_elem_count(0);
824        Self::raw_from_ptr_len(NonNull::dangling(), meta)
825    }
826
827    /// Computes the size of an object of type `Self` with the given pointer
828    /// metadata.
829    ///
830    /// # Safety
831    ///
832    /// `size_for_metadata` promises to return `None` if and only if the
833    /// resulting size would not fit in a [`usize`]. Note that the returned size
834    /// could exceed the actual maximum valid size of an allocated object,
835    /// [`isize::MAX`].
836    ///
837    /// # Examples
838    ///
839    /// ```
840    /// use zerocopy::KnownLayout;
841    ///
842    /// assert_eq!(u8::size_for_metadata(()), Some(1));
843    /// assert_eq!(u16::size_for_metadata(()), Some(2));
844    /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
845    /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
846    ///
847    /// // This size exceeds the maximum valid object size (`isize::MAX`):
848    /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
849    ///
850    /// // This size, if computed, would exceed `usize::MAX`:
851    /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
852    /// ```
853    #[inline(always)]
854    fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
855        meta.size_for_metadata(Self::LAYOUT)
856    }
857}
858
859/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
860#[inline(always)]
861pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
862where
863    T: ?Sized + KnownLayout<PointerMetadata = usize>,
864{
865    trait LayoutFacts {
866        const SIZE_INFO: TrailingSliceLayout;
867    }
868
869    impl<T: ?Sized> LayoutFacts for T
870    where
871        T: KnownLayout<PointerMetadata = usize>,
872    {
873        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
874            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
875            crate::SizeInfo::SliceDst(info) => info,
876        };
877    }
878
879    T::SIZE_INFO
880}
881
882/// The metadata associated with a [`KnownLayout`] type.
883#[doc(hidden)]
884pub trait PointerMetadata: Copy + Eq + Debug {
885    /// Constructs a `Self` from an element count.
886    ///
887    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
888    /// `elems`. No other types are currently supported.
889    fn from_elem_count(elems: usize) -> Self;
890
891    /// Converts `self` to an element count.
892    ///
893    /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns
894    /// `self`. No other types are currently supported.
895    fn to_elem_count(self) -> usize;
896
897    /// Computes the size of the object with the given layout and pointer
898    /// metadata.
899    ///
900    /// # Panics
901    ///
902    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
903    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
904    /// panic.
905    ///
906    /// # Safety
907    ///
908    /// `size_for_metadata` promises to only return `None` if the resulting size
909    /// would not fit in a `usize`.
910    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
911}
912
913impl PointerMetadata for () {
914    #[inline]
915    #[allow(clippy::unused_unit)]
916    fn from_elem_count(_elems: usize) -> () {}
917
918    #[inline]
919    fn to_elem_count(self) -> usize {
920        0
921    }
922
923    #[inline]
924    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
925        match layout.size_info {
926            SizeInfo::Sized { size } => Some(size),
927            // NOTE: This branch is unreachable, but we return `None` rather
928            // than `unreachable!()` to avoid generating panic paths.
929            SizeInfo::SliceDst(_) => None,
930        }
931    }
932}
933
934impl PointerMetadata for usize {
935    #[inline]
936    fn from_elem_count(elems: usize) -> usize {
937        elems
938    }
939
940    #[inline]
941    fn to_elem_count(self) -> usize {
942        self
943    }
944
945    #[inline]
946    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
947        match layout.size_info {
948            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
949                let slice_len = elem_size.checked_mul(self)?;
950                let without_padding = offset.checked_add(slice_len)?;
951                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
952            }
953            // NOTE: This branch is unreachable, but we return `None` rather
954            // than `unreachable!()` to avoid generating panic paths.
955            SizeInfo::Sized { .. } => None,
956        }
957    }
958}
959
960// SAFETY: Delegates safety to `DstLayout::for_slice`.
961unsafe impl<T> KnownLayout for [T] {
962    #[allow(clippy::missing_inline_in_public_items, dead_code)]
963    #[cfg_attr(
964        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
965        coverage(off)
966    )]
967    fn only_derive_is_allowed_to_implement_this_trait()
968    where
969        Self: Sized,
970    {
971    }
972
973    type PointerMetadata = usize;
974
975    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
976    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
977    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
978    // identical, because they both lack a fixed-sized prefix and because they
979    // inherit the alignments of their inner element type (which are identical)
980    // [2][3].
981    //
982    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
983    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
984    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
985    // back-to-back [2][3].
986    //
987    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
988    //
989    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
990    //   `T`
991    //
992    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
993    //
994    //   Slices have the same layout as the section of the array they slice.
995    //
996    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
997    //
998    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
999    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
1000    //   element of the array is offset from the start of the array by `n *
1001    //   size_of::<T>()` bytes.
1002    type MaybeUninit = [CoreMaybeUninit<T>];
1003
1004    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
1005
1006    // SAFETY: `.cast` preserves address and provenance. The returned pointer
1007    // refers to an object with `elems` elements by construction.
1008    #[inline(always)]
1009    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
1010        // FIXME(#67): Remove this allow. See NonNullExt for more details.
1011        #[allow(unstable_name_collisions)]
1012        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
1013    }
1014
1015    #[inline(always)]
1016    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1017        #[allow(clippy::as_conversions)]
1018        let slc = ptr as *const [()];
1019
1020        // SAFETY:
1021        // - `()` has alignment 1, so `slc` is trivially aligned.
1022        // - `slc` was derived from a non-null pointer.
1023        // - The size is 0 regardless of the length, so it is sound to
1024        //   materialize a reference regardless of location.
1025        // - By invariant, `self.ptr` has valid provenance.
1026        let slc = unsafe { &*slc };
1027
1028        // This is correct because the preceding `as` cast preserves the number
1029        // of slice elements. [1]
1030        //
1031        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1032        //
1033        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
1034        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1035        //   elements in this slice. Casts between these raw pointer types
1036        //   preserve the number of elements. ... The same holds for `str` and
1037        //   any compound type whose unsized tail is a slice type, such as
1038        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1039        slc.len()
1040    }
1041}
1042
1043#[rustfmt::skip]
1044impl_known_layout!(
1045    (),
1046    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1047    bool, char,
1048    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1049    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1050);
1051#[rustfmt::skip]
1052#[cfg(feature = "float-nightly")]
1053impl_known_layout!(
1054    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1055    f16,
1056    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1057    f128
1058);
1059#[rustfmt::skip]
1060impl_known_layout!(
1061    T         => Option<T>,
1062    T: ?Sized => PhantomData<T>,
1063    T         => Wrapping<T>,
1064    T         => CoreMaybeUninit<T>,
1065    T: ?Sized => *const T,
1066    T: ?Sized => *mut T,
1067    T: ?Sized => &'_ T,
1068    T: ?Sized => &'_ mut T,
1069);
1070impl_known_layout!(const N: usize, T => [T; N]);
1071
1072// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1073// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1074//
1075// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1076//
1077//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1078//   `T`
1079//
1080// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1081//
1082//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1083//   `T`.
1084//
1085// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1086//
1087//   `Cell<T>` has the same in-memory representation as `T`.
1088#[allow(clippy::multiple_unsafe_ops_per_block)]
1089const _: () = unsafe {
1090    unsafe_impl_known_layout!(
1091        #[repr([u8])]
1092        str
1093    );
1094    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1095    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1096    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1097};
1098
1099// SAFETY:
1100// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1101//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1102//   - Fixed prefix size
1103//   - Alignment
1104//   - (For DSTs) trailing slice element size
1105// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1106//   require the same kind of pointer metadata, and thus it is valid to perform
1107//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1108//   preserves referent size (ie, `size_of_val_raw`).
1109const _: () = unsafe {
1110    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1111};
1112
1113// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1114// union fields being treated uniformly since they behave similarly to each
1115// other in terms of projecting validity – specifically, for a type `T` with
1116// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1117// have validity `V`. By contrast, if `T` is an enum or union type, then
1118// validity is not straightforwardly recursive in this way.
1119#[doc(hidden)]
1120pub const STRUCT_VARIANT_ID: i128 = -1;
1121#[doc(hidden)]
1122pub const UNION_VARIANT_ID: i128 = -2;
1123#[doc(hidden)]
1124pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1125
1126/// # Safety
1127///
1128/// `Self::ProjectToTag` must satisfy its safety invariant.
1129#[doc(hidden)]
1130pub unsafe trait HasTag {
1131    fn only_derive_is_allowed_to_implement_this_trait()
1132    where
1133        Self: Sized;
1134
1135    /// The type's enum tag, or `()` for non-enum types.
1136    type Tag: Immutable;
1137
1138    /// A pointer projection from `Self` to its tag.
1139    ///
1140    /// # Safety
1141    ///
1142    /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1143    /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1144    type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1145}
1146
1147/// Projects a given field from `Self`.
1148///
1149/// All implementations of `HasField` for a particular field `f` in `Self`
1150/// should use the same `Field` type; this ensures that `Field` is inferable
1151/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1152///
1153/// # Safety
1154///
1155/// A field `f` is `HasField` for `Self` if and only if:
1156///
1157/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1158///   `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1159///   `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1160///   of the enum variant in which `f` appears. Note that `Self` does not need
1161///   to actually *be* such a type – it just needs to have the same layout as
1162///   such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1163///   has the same layout as that enum.
1164/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1165///   if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1166/// - `Field` is a type with the same visibility as `f`.
1167/// - `Type` has the same type as `f`.
1168///
1169/// The caller must **not** assume that a pointer's referent being aligned
1170/// implies that calling `project` on that pointer will result in a pointer to
1171/// an aligned referent. For example, `HasField` may be implemented for
1172/// `#[repr(packed)]` structs.
1173///
1174/// The implementation of `project` must satisfy its safety post-condition.
1175#[doc(hidden)]
1176pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1177    HasTag
1178{
1179    fn only_derive_is_allowed_to_implement_this_trait()
1180    where
1181        Self: Sized;
1182
1183    /// The type of the field.
1184    type Type: ?Sized;
1185
1186    /// Projects from `slf` to the field.
1187    ///
1188    /// Users should generally not call `project` directly, and instead should
1189    /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1190    ///
1191    /// # Safety
1192    ///
1193    /// The returned pointer refers to a non-strict subset of the bytes of
1194    /// `slf`'s referent, and has the same provenance as `slf`.
1195    #[must_use]
1196    fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1197}
1198
1199/// Projects a given field from `Self`.
1200///
1201/// Implementations of this trait encode the conditions under which a field can
1202/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1203/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1204/// other words, it is a type-level function over invariants; `I` goes in,
1205/// `Self::Invariants` comes out.
1206///
1207/// # Safety
1208///
1209/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1210/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1211/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1212/// conforms to `T::Invariants`.
1213#[doc(hidden)]
1214pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1215    HasField<Field, VARIANT_ID, FIELD_ID>
1216where
1217    I: invariant::Invariants,
1218{
1219    fn only_derive_is_allowed_to_implement_this_trait()
1220    where
1221        Self: Sized;
1222
1223    /// The invariants of the projected field pointer, with respect to the
1224    /// invariants, `I`, of the containing pointer. The aliasing dimension of
1225    /// the invariants is guaranteed to remain unchanged.
1226    type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1227
1228    /// The failure mode of projection. `()` if the projection is fallible,
1229    /// otherwise [`core::convert::Infallible`].
1230    type Error;
1231
1232    /// Is the given field projectable from `ptr`?
1233    ///
1234    /// If a field with [`Self::Invariants`] is projectable from the referent,
1235    /// this function produces an `Ok(ptr)` from which the projection can be
1236    /// made; otherwise `Err`.
1237    ///
1238    /// This method must be overriden if the field's projectability depends on
1239    /// the value of the bytes in `ptr`.
1240    #[inline(always)]
1241    fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1242        trait IsInfallible {
1243            const IS_INFALLIBLE: bool;
1244        }
1245
1246        struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1247            PhantomData<(Field, I, T)>,
1248        )
1249        where
1250            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1251            I: invariant::Invariants;
1252
1253        impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1254            for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1255        where
1256            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1257            I: invariant::Invariants,
1258        {
1259            const IS_INFALLIBLE: bool = {
1260                let is_infallible = match VARIANT_ID {
1261                    // For nondestructive projections of struct and union
1262                    // fields, the projected field's satisfaction of
1263                    // `Invariants` does not depend on the value of the
1264                    // referent. This default implementation of `is_projectable`
1265                    // is non-destructive, as it does not overwrite any part of
1266                    // the referent.
1267                    crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1268                    _enum_variant => {
1269                        use crate::invariant::{Validity, ValidityKind};
1270                        match I::Validity::KIND {
1271                            // The `Uninit` and `Initialized` validity
1272                            // invariants do not depend on the enum's tag. In
1273                            // particular, we don't actually care about what
1274                            // variant is present – we can treat *any* range of
1275                            // uninitialized or initialized memory as containing
1276                            // an uninitialized or initialized instance of *any*
1277                            // type – the type itself is irrelevant.
1278                            ValidityKind::Uninit | ValidityKind::Initialized => true,
1279                            // The projectability of an enum field from an
1280                            // `AsInitialized` or `Valid` state is a dynamic
1281                            // property of its tag.
1282                            ValidityKind::AsInitialized | ValidityKind::Valid => false,
1283                        }
1284                    }
1285                };
1286                const_assert!(is_infallible);
1287                is_infallible
1288            };
1289        }
1290
1291        const_assert!(
1292            <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1293        );
1294
1295        Ok(())
1296    }
1297}
1298
1299/// Analyzes whether a type is [`FromZeros`].
1300///
1301/// This derive analyzes, at compile time, whether the annotated type satisfies
1302/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1303/// supertraits if it is sound to do so. This derive can be applied to structs,
1304/// enums, and unions; e.g.:
1305///
1306/// ```
1307/// # use zerocopy_derive::{FromZeros, Immutable};
1308/// #[derive(FromZeros)]
1309/// struct MyStruct {
1310/// # /*
1311///     ...
1312/// # */
1313/// }
1314///
1315/// #[derive(FromZeros)]
1316/// #[repr(u8)]
1317/// enum MyEnum {
1318/// #   Variant0,
1319/// # /*
1320///     ...
1321/// # */
1322/// }
1323///
1324/// #[derive(FromZeros, Immutable)]
1325/// union MyUnion {
1326/// #   variant: u8,
1327/// # /*
1328///     ...
1329/// # */
1330/// }
1331/// ```
1332///
1333/// [safety conditions]: trait@FromZeros#safety
1334///
1335/// # Analysis
1336///
1337/// *This section describes, roughly, the analysis performed by this derive to
1338/// determine whether it is sound to implement `FromZeros` for a given type.
1339/// Unless you are modifying the implementation of this derive, or attempting to
1340/// manually implement `FromZeros` for a type yourself, you don't need to read
1341/// this section.*
1342///
1343/// If a type has the following properties, then this derive can implement
1344/// `FromZeros` for that type:
1345///
1346/// - If the type is a struct, all of its fields must be `FromZeros`.
1347/// - If the type is an enum:
1348///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1349///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1350///   - It must have a variant with a discriminant/tag of `0`, and its fields
1351///     must be `FromZeros`. See [the reference] for a description of
1352///     discriminant values are specified.
1353///   - The fields of that variant must be `FromZeros`.
1354///
1355/// This analysis is subject to change. Unsafe code may *only* rely on the
1356/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1357/// implementation details of this derive.
1358///
1359/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1360///
1361/// ## Why isn't an explicit representation required for structs?
1362///
1363/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1364/// that structs are marked with `#[repr(C)]`.
1365///
1366/// Per the [Rust reference](reference),
1367///
1368/// > The representation of a type can change the padding between fields, but
1369/// > does not change the layout of the fields themselves.
1370///
1371/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1372///
1373/// Since the layout of structs only consists of padding bytes and field bytes,
1374/// a struct is soundly `FromZeros` if:
1375/// 1. its padding is soundly `FromZeros`, and
1376/// 2. its fields are soundly `FromZeros`.
1377///
1378/// The answer to the first question is always yes: padding bytes do not have
1379/// any validity constraints. A [discussion] of this question in the Unsafe Code
1380/// Guidelines Working Group concluded that it would be virtually unimaginable
1381/// for future versions of rustc to add validity constraints to padding bytes.
1382///
1383/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1384///
1385/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1386/// its fields are `FromZeros`.
1387// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1388// attribute.
1389#[cfg(any(feature = "derive", test))]
1390#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1391pub use zerocopy_derive::FromZeros;
1392/// Analyzes whether a type is [`Immutable`].
1393///
1394/// This derive analyzes, at compile time, whether the annotated type satisfies
1395/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1396/// sound to do so. This derive can be applied to structs, enums, and unions;
1397/// e.g.:
1398///
1399/// ```
1400/// # use zerocopy_derive::Immutable;
1401/// #[derive(Immutable)]
1402/// struct MyStruct {
1403/// # /*
1404///     ...
1405/// # */
1406/// }
1407///
1408/// #[derive(Immutable)]
1409/// enum MyEnum {
1410/// #   Variant0,
1411/// # /*
1412///     ...
1413/// # */
1414/// }
1415///
1416/// #[derive(Immutable)]
1417/// union MyUnion {
1418/// #   variant: u8,
1419/// # /*
1420///     ...
1421/// # */
1422/// }
1423/// ```
1424///
1425/// # Analysis
1426///
1427/// *This section describes, roughly, the analysis performed by this derive to
1428/// determine whether it is sound to implement `Immutable` for a given type.
1429/// Unless you are modifying the implementation of this derive, you don't need
1430/// to read this section.*
1431///
1432/// If a type has the following properties, then this derive can implement
1433/// `Immutable` for that type:
1434///
1435/// - All fields must be `Immutable`.
1436///
1437/// This analysis is subject to change. Unsafe code may *only* rely on the
1438/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1439/// implementation details of this derive.
1440///
1441/// [safety conditions]: trait@Immutable#safety
1442#[cfg(any(feature = "derive", test))]
1443#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1444pub use zerocopy_derive::Immutable;
1445
1446/// Types which are free from interior mutability.
1447///
1448/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1449/// by ownership or an exclusive (`&mut`) borrow.
1450///
1451/// # Implementation
1452///
1453/// **Do not implement this trait yourself!** Instead, use
1454/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1455/// e.g.:
1456///
1457/// ```
1458/// # use zerocopy_derive::Immutable;
1459/// #[derive(Immutable)]
1460/// struct MyStruct {
1461/// # /*
1462///     ...
1463/// # */
1464/// }
1465///
1466/// #[derive(Immutable)]
1467/// enum MyEnum {
1468/// # /*
1469///     ...
1470/// # */
1471/// }
1472///
1473/// #[derive(Immutable)]
1474/// union MyUnion {
1475/// #   variant: u8,
1476/// # /*
1477///     ...
1478/// # */
1479/// }
1480/// ```
1481///
1482/// This derive performs a sophisticated, compile-time safety analysis to
1483/// determine whether a type is `Immutable`.
1484///
1485/// # Safety
1486///
1487/// Unsafe code outside of this crate must not make any assumptions about `T`
1488/// based on `T: Immutable`. We reserve the right to relax the requirements for
1489/// `Immutable` in the future, and if unsafe code outside of this crate makes
1490/// assumptions based on `T: Immutable`, future relaxations may cause that code
1491/// to become unsound.
1492///
1493// # Safety (Internal)
1494//
1495// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1496// `t: &T`, `t` does not permit interior mutation of its referent. Because
1497// [`UnsafeCell`] is the only type which permits interior mutation, it is
1498// sufficient (though not necessary) to guarantee that `T` contains no
1499// `UnsafeCell`s.
1500//
1501// [`UnsafeCell`]: core::cell::UnsafeCell
1502#[cfg_attr(
1503    feature = "derive",
1504    doc = "[derive]: zerocopy_derive::Immutable",
1505    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1506)]
1507#[cfg_attr(
1508    not(feature = "derive"),
1509    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1510    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1511)]
1512#[cfg_attr(
1513    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1514    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1515)]
1516pub unsafe trait Immutable {
1517    // The `Self: Sized` bound makes it so that `Immutable` is still object
1518    // safe.
1519    #[doc(hidden)]
1520    fn only_derive_is_allowed_to_implement_this_trait()
1521    where
1522        Self: Sized;
1523}
1524
1525/// Implements [`TryFromBytes`].
1526///
1527/// This derive synthesizes the runtime checks required to check whether a
1528/// sequence of initialized bytes corresponds to a valid instance of a type.
1529/// This derive can be applied to structs, enums, and unions; e.g.:
1530///
1531/// ```
1532/// # use zerocopy_derive::{TryFromBytes, Immutable};
1533/// #[derive(TryFromBytes)]
1534/// struct MyStruct {
1535/// # /*
1536///     ...
1537/// # */
1538/// }
1539///
1540/// #[derive(TryFromBytes)]
1541/// #[repr(u8)]
1542/// enum MyEnum {
1543/// #   V00,
1544/// # /*
1545///     ...
1546/// # */
1547/// }
1548///
1549/// #[derive(TryFromBytes, Immutable)]
1550/// union MyUnion {
1551/// #   variant: u8,
1552/// # /*
1553///     ...
1554/// # */
1555/// }
1556/// ```
1557///
1558/// # Portability
1559///
1560/// To ensure consistent endianness for enums with multi-byte representations,
1561/// explicitly specify and convert each discriminant using `.to_le()` or
1562/// `.to_be()`; e.g.:
1563///
1564/// ```
1565/// # use zerocopy_derive::TryFromBytes;
1566/// // `DataStoreVersion` is encoded in little-endian.
1567/// #[derive(TryFromBytes)]
1568/// #[repr(u32)]
1569/// pub enum DataStoreVersion {
1570///     /// Version 1 of the data store.
1571///     V1 = 9u32.to_le(),
1572///
1573///     /// Version 2 of the data store.
1574///     V2 = 10u32.to_le(),
1575/// }
1576/// ```
1577///
1578/// [safety conditions]: trait@TryFromBytes#safety
1579#[cfg(any(feature = "derive", test))]
1580#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1581pub use zerocopy_derive::TryFromBytes;
1582
1583/// Types for which some bit patterns are valid.
1584///
1585/// A memory region of the appropriate length which contains initialized bytes
1586/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1587/// bytes corresponds to a [*valid instance*] of that type. For example,
1588/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1589/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1590/// `1`.
1591///
1592/// # Implementation
1593///
1594/// **Do not implement this trait yourself!** Instead, use
1595/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1596///
1597/// ```
1598/// # use zerocopy_derive::{TryFromBytes, Immutable};
1599/// #[derive(TryFromBytes)]
1600/// struct MyStruct {
1601/// # /*
1602///     ...
1603/// # */
1604/// }
1605///
1606/// #[derive(TryFromBytes)]
1607/// #[repr(u8)]
1608/// enum MyEnum {
1609/// #   V00,
1610/// # /*
1611///     ...
1612/// # */
1613/// }
1614///
1615/// #[derive(TryFromBytes, Immutable)]
1616/// union MyUnion {
1617/// #   variant: u8,
1618/// # /*
1619///     ...
1620/// # */
1621/// }
1622/// ```
1623///
1624/// This derive ensures that the runtime check of whether bytes correspond to a
1625/// valid instance is sound. You **must** implement this trait via the derive.
1626///
1627/// # What is a "valid instance"?
1628///
1629/// In Rust, each type has *bit validity*, which refers to the set of bit
1630/// patterns which may appear in an instance of that type. It is impossible for
1631/// safe Rust code to produce values which violate bit validity (ie, values
1632/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1633/// invalid value, this is considered [undefined behavior].
1634///
1635/// Rust's bit validity rules are currently being decided, which means that some
1636/// types have three classes of bit patterns: those which are definitely valid,
1637/// and whose validity is documented in the language; those which may or may not
1638/// be considered valid at some point in the future; and those which are
1639/// definitely invalid.
1640///
1641/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1642/// be valid if its validity is a documented guarantee provided by the
1643/// language.
1644///
1645/// For most use cases, Rust's current guarantees align with programmers'
1646/// intuitions about what ought to be valid. As a result, zerocopy's
1647/// conservatism should not affect most users.
1648///
1649/// If you are negatively affected by lack of support for a particular type,
1650/// we encourage you to let us know by [filing an issue][github-repo].
1651///
1652/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1653///
1654/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1655/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1656/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1657/// IntoBytes`, there exist values of `t: T` such that
1658/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1659/// generally assume that values produced by `IntoBytes` will necessarily be
1660/// accepted as valid by `TryFromBytes`.
1661///
1662/// # Safety
1663///
1664/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1665/// or representation of `T`. It merely provides the ability to perform a
1666/// validity check at runtime via methods like [`try_ref_from_bytes`].
1667///
1668/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1669/// Future releases of zerocopy may make backwards-breaking changes to these
1670/// items, including changes that only affect soundness, which may cause code
1671/// which uses those items to silently become unsound.
1672///
1673/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1674/// [github-repo]: https://github.com/google/zerocopy
1675/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1676/// [*valid instance*]: #what-is-a-valid-instance
1677#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1678#[cfg_attr(
1679    not(feature = "derive"),
1680    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1681)]
1682#[cfg_attr(
1683    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1684    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1685)]
1686pub unsafe trait TryFromBytes {
1687    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1688    // safe.
1689    #[doc(hidden)]
1690    fn only_derive_is_allowed_to_implement_this_trait()
1691    where
1692        Self: Sized;
1693
1694    /// Does a given memory range contain a valid instance of `Self`?
1695    ///
1696    /// # Safety
1697    ///
1698    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1699    /// `*candidate` contains a valid `Self`.
1700    ///
1701    /// # Panics
1702    ///
1703    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1704    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1705    /// panicking. (We support user-defined validation routines; so long as
1706    /// these routines are not required to be `unsafe`, there is no way to
1707    /// ensure that these do not generate panics.)
1708    ///
1709    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1710    /// either panic or fail to compile if called on a pointer with [`Shared`]
1711    /// aliasing when `Self: !Immutable`.
1712    ///
1713    /// [`UnsafeCell`]: core::cell::UnsafeCell
1714    /// [`Shared`]: invariant::Shared
1715    #[doc(hidden)]
1716    fn is_bit_valid<A>(candidate: Maybe<'_, Self, A>) -> bool
1717    where
1718        A: invariant::Alignment;
1719
1720    /// Attempts to interpret the given `source` as a `&Self`.
1721    ///
1722    /// If the bytes of `source` are a valid instance of `Self`, this method
1723    /// returns a reference to those bytes interpreted as a `Self`. If the
1724    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1725    /// `source` is not appropriately aligned, or if `source` is not a valid
1726    /// instance of `Self`, this returns `Err`. If [`Self:
1727    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1728    /// error][ConvertError::from].
1729    ///
1730    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1731    ///
1732    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1733    /// [self-unaligned]: Unaligned
1734    /// [slice-dst]: KnownLayout#dynamically-sized-types
1735    ///
1736    /// # Compile-Time Assertions
1737    ///
1738    /// This method cannot yet be used on unsized types whose dynamically-sized
1739    /// component is zero-sized. Attempting to use this method on such types
1740    /// results in a compile-time assertion error; e.g.:
1741    ///
1742    /// ```compile_fail,E0080
1743    /// use zerocopy::*;
1744    /// # use zerocopy_derive::*;
1745    ///
1746    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1747    /// #[repr(C)]
1748    /// struct ZSTy {
1749    ///     leading_sized: u16,
1750    ///     trailing_dst: [()],
1751    /// }
1752    ///
1753    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1754    /// ```
1755    ///
1756    /// # Examples
1757    ///
1758    /// ```
1759    /// use zerocopy::TryFromBytes;
1760    /// # use zerocopy_derive::*;
1761    ///
1762    /// // The only valid value of this type is the byte `0xC0`
1763    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1764    /// #[repr(u8)]
1765    /// enum C0 { xC0 = 0xC0 }
1766    ///
1767    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1768    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1769    /// #[repr(C)]
1770    /// struct C0C0(C0, C0);
1771    ///
1772    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1773    /// #[repr(C)]
1774    /// struct Packet {
1775    ///     magic_number: C0C0,
1776    ///     mug_size: u8,
1777    ///     temperature: u8,
1778    ///     marshmallows: [[u8; 2]],
1779    /// }
1780    ///
1781    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1782    ///
1783    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1784    ///
1785    /// assert_eq!(packet.mug_size, 240);
1786    /// assert_eq!(packet.temperature, 77);
1787    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1788    ///
1789    /// // These bytes are not valid instance of `Packet`.
1790    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1791    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1792    /// ```
1793    ///
1794    #[doc = codegen_header!("try_ref_from_bytes")]
1795    ///
1796    /// This abstraction for reinterpreting a buffer of bytes as a structured
1797    /// type is safe and cheap, but does not necessarily have zero runtime cost.
1798    /// The below code generation benchmark exercises this routine on a
1799    /// destination type whose complex layout places complex requirements on the
1800    /// source:
1801    ///
1802    /// - the source must begin an even memory address
1803    /// - the source has a minimum length of 4 bytes
1804    /// - the source has a total length divisible by 2
1805    /// - the source begins with the bytes `0xC0C0`
1806    ///
1807    /// These conditions must all be checked at runtime in this example, but the
1808    /// codegen you experience in practice will depend on optimization level,
1809    /// the layout of the destination type, and what the compiler can prove
1810    /// about the source.
1811    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_bytes")]
1812    #[must_use = "has no side effects"]
1813    #[inline]
1814    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1815    where
1816        Self: KnownLayout + Immutable,
1817    {
1818        static_assert_dst_is_not_zst!(Self);
1819        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1820            Ok(source) => {
1821                // This call may panic. If that happens, it doesn't cause any soundness
1822                // issues, as we have not generated any invalid state which we need to
1823                // fix before returning.
1824                match source.try_into_valid() {
1825                    Ok(valid) => Ok(valid.as_ref()),
1826                    Err(e) => {
1827                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1828                    }
1829                }
1830            }
1831            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1832        }
1833    }
1834
1835    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1836    ///
1837    /// This method computes the [largest possible size of `Self`][valid-size]
1838    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1839    /// instance of `Self`, this method returns a reference to those bytes
1840    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1841    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1842    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1843    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1844    /// alignment error][ConvertError::from].
1845    ///
1846    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1847    ///
1848    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1849    /// [self-unaligned]: Unaligned
1850    /// [slice-dst]: KnownLayout#dynamically-sized-types
1851    ///
1852    /// # Compile-Time Assertions
1853    ///
1854    /// This method cannot yet be used on unsized types whose dynamically-sized
1855    /// component is zero-sized. Attempting to use this method on such types
1856    /// results in a compile-time assertion error; e.g.:
1857    ///
1858    /// ```compile_fail,E0080
1859    /// use zerocopy::*;
1860    /// # use zerocopy_derive::*;
1861    ///
1862    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1863    /// #[repr(C)]
1864    /// struct ZSTy {
1865    ///     leading_sized: u16,
1866    ///     trailing_dst: [()],
1867    /// }
1868    ///
1869    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1870    /// ```
1871    ///
1872    /// # Examples
1873    ///
1874    /// ```
1875    /// use zerocopy::TryFromBytes;
1876    /// # use zerocopy_derive::*;
1877    ///
1878    /// // The only valid value of this type is the byte `0xC0`
1879    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1880    /// #[repr(u8)]
1881    /// enum C0 { xC0 = 0xC0 }
1882    ///
1883    /// // The only valid value of this type is the bytes `0xC0C0`.
1884    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1885    /// #[repr(C)]
1886    /// struct C0C0(C0, C0);
1887    ///
1888    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1889    /// #[repr(C)]
1890    /// struct Packet {
1891    ///     magic_number: C0C0,
1892    ///     mug_size: u8,
1893    ///     temperature: u8,
1894    ///     marshmallows: [[u8; 2]],
1895    /// }
1896    ///
1897    /// // These are more bytes than are needed to encode a `Packet`.
1898    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1899    ///
1900    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1901    ///
1902    /// assert_eq!(packet.mug_size, 240);
1903    /// assert_eq!(packet.temperature, 77);
1904    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1905    /// assert_eq!(suffix, &[6u8][..]);
1906    ///
1907    /// // These bytes are not valid instance of `Packet`.
1908    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1909    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1910    /// ```
1911    ///
1912    #[doc = codegen_header!("try_ref_from_prefix")]
1913    ///
1914    /// This abstraction for reinterpreting a buffer of bytes as a structured
1915    /// type is safe and cheap, but does not necessarily have zero runtime cost.
1916    /// The below code generation benchmark exercises this routine on a
1917    /// destination type whose complex layout places complex requirements on the
1918    /// source:
1919    ///
1920    /// - the source must begin an even memory address
1921    /// - the source has a minimum length of 4 bytes
1922    /// - the source begins with the bytes `0xC0C0`
1923    ///
1924    /// These conditions must all be checked at runtime in this example, but the
1925    /// codegen you experience in practice will depend on optimization level,
1926    /// the layout of the destination type, and what the compiler can prove
1927    /// about the source.
1928    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_prefix")]
1929    #[must_use = "has no side effects"]
1930    #[inline]
1931    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1932    where
1933        Self: KnownLayout + Immutable,
1934    {
1935        static_assert_dst_is_not_zst!(Self);
1936        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1937    }
1938
1939    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1940    ///
1941    /// This method computes the [largest possible size of `Self`][valid-size]
1942    /// that can fit in the trailing bytes of `source`. If that suffix is a
1943    /// valid instance of `Self`, this method returns a reference to those bytes
1944    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1945    /// are insufficient bytes, or if the suffix of `source` would not be
1946    /// appropriately aligned, or if the suffix is not a valid instance of
1947    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1948    /// can [infallibly discard the alignment error][ConvertError::from].
1949    ///
1950    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1951    ///
1952    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1953    /// [self-unaligned]: Unaligned
1954    /// [slice-dst]: KnownLayout#dynamically-sized-types
1955    ///
1956    /// # Compile-Time Assertions
1957    ///
1958    /// This method cannot yet be used on unsized types whose dynamically-sized
1959    /// component is zero-sized. Attempting to use this method on such types
1960    /// results in a compile-time assertion error; e.g.:
1961    ///
1962    /// ```compile_fail,E0080
1963    /// use zerocopy::*;
1964    /// # use zerocopy_derive::*;
1965    ///
1966    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1967    /// #[repr(C)]
1968    /// struct ZSTy {
1969    ///     leading_sized: u16,
1970    ///     trailing_dst: [()],
1971    /// }
1972    ///
1973    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1974    /// ```
1975    ///
1976    /// # Examples
1977    ///
1978    /// ```
1979    /// use zerocopy::TryFromBytes;
1980    /// # use zerocopy_derive::*;
1981    ///
1982    /// // The only valid value of this type is the byte `0xC0`
1983    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1984    /// #[repr(u8)]
1985    /// enum C0 { xC0 = 0xC0 }
1986    ///
1987    /// // The only valid value of this type is the bytes `0xC0C0`.
1988    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1989    /// #[repr(C)]
1990    /// struct C0C0(C0, C0);
1991    ///
1992    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1993    /// #[repr(C)]
1994    /// struct Packet {
1995    ///     magic_number: C0C0,
1996    ///     mug_size: u8,
1997    ///     temperature: u8,
1998    ///     marshmallows: [[u8; 2]],
1999    /// }
2000    ///
2001    /// // These are more bytes than are needed to encode a `Packet`.
2002    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2003    ///
2004    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
2005    ///
2006    /// assert_eq!(packet.mug_size, 240);
2007    /// assert_eq!(packet.temperature, 77);
2008    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2009    /// assert_eq!(prefix, &[0u8][..]);
2010    ///
2011    /// // These bytes are not valid instance of `Packet`.
2012    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2013    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
2014    /// ```
2015    ///
2016    #[doc = codegen_header!("try_ref_from_suffix")]
2017    ///
2018    /// This abstraction for reinterpreting a buffer of bytes as a structured
2019    /// type is safe and cheap, but does not necessarily have zero runtime cost.
2020    /// The below code generation benchmark exercises this routine on a
2021    /// destination type whose complex layout places complex requirements on the
2022    /// source's largest [valid-size] suffix for `Self`, which must:
2023    ///
2024    /// - begin at an even memory address
2025    /// - have a minimum length of 4 bytes
2026    /// - begin with the bytes `0xC0C0`
2027    ///
2028    /// These conditions must all be checked at runtime in this example, but the
2029    /// codegen you experience in practice will depend on optimization level,
2030    /// the layout of the destination type, and what the compiler can prove
2031    /// about the source.
2032    ///
2033    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2034    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_suffix")]
2035    #[must_use = "has no side effects"]
2036    #[inline]
2037    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2038    where
2039        Self: KnownLayout + Immutable,
2040    {
2041        static_assert_dst_is_not_zst!(Self);
2042        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2043    }
2044
2045    /// Attempts to interpret the given `source` as a `&mut Self` without
2046    /// copying.
2047    ///
2048    /// If the bytes of `source` are a valid instance of `Self`, this method
2049    /// returns a reference to those bytes interpreted as a `Self`. If the
2050    /// length of `source` is not a [valid size of `Self`][valid-size], or if
2051    /// `source` is not appropriately aligned, or if `source` is not a valid
2052    /// instance of `Self`, this returns `Err`. If [`Self:
2053    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2054    /// error][ConvertError::from].
2055    ///
2056    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2057    ///
2058    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2059    /// [self-unaligned]: Unaligned
2060    /// [slice-dst]: KnownLayout#dynamically-sized-types
2061    ///
2062    /// # Compile-Time Assertions
2063    ///
2064    /// This method cannot yet be used on unsized types whose dynamically-sized
2065    /// component is zero-sized. Attempting to use this method on such types
2066    /// results in a compile-time assertion error; e.g.:
2067    ///
2068    /// ```compile_fail,E0080
2069    /// use zerocopy::*;
2070    /// # use zerocopy_derive::*;
2071    ///
2072    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2073    /// #[repr(C, packed)]
2074    /// struct ZSTy {
2075    ///     leading_sized: [u8; 2],
2076    ///     trailing_dst: [()],
2077    /// }
2078    ///
2079    /// let mut source = [85, 85];
2080    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
2081    /// ```
2082    ///
2083    /// # Examples
2084    ///
2085    /// ```
2086    /// use zerocopy::TryFromBytes;
2087    /// # use zerocopy_derive::*;
2088    ///
2089    /// // The only valid value of this type is the byte `0xC0`
2090    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2091    /// #[repr(u8)]
2092    /// enum C0 { xC0 = 0xC0 }
2093    ///
2094    /// // The only valid value of this type is the bytes `0xC0C0`.
2095    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2096    /// #[repr(C)]
2097    /// struct C0C0(C0, C0);
2098    ///
2099    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2100    /// #[repr(C, packed)]
2101    /// struct Packet {
2102    ///     magic_number: C0C0,
2103    ///     mug_size: u8,
2104    ///     temperature: u8,
2105    ///     marshmallows: [[u8; 2]],
2106    /// }
2107    ///
2108    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2109    ///
2110    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2111    ///
2112    /// assert_eq!(packet.mug_size, 240);
2113    /// assert_eq!(packet.temperature, 77);
2114    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2115    ///
2116    /// packet.temperature = 111;
2117    ///
2118    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2119    ///
2120    /// // These bytes are not valid instance of `Packet`.
2121    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2122    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2123    /// ```
2124    ///
2125    #[doc = codegen_header!("try_mut_from_bytes")]
2126    ///
2127    /// See [`TryFromBytes::try_ref_from_bytes`](#method.try_ref_from_bytes.codegen).
2128    #[must_use = "has no side effects"]
2129    #[inline]
2130    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2131    where
2132        Self: KnownLayout + IntoBytes,
2133    {
2134        static_assert_dst_is_not_zst!(Self);
2135        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2136            Ok(source) => {
2137                // This call may panic. If that happens, it doesn't cause any soundness
2138                // issues, as we have not generated any invalid state which we need to
2139                // fix before returning.
2140                match source.try_into_valid() {
2141                    Ok(source) => Ok(source.as_mut()),
2142                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2143                }
2144            }
2145            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2146        }
2147    }
2148
2149    /// Attempts to interpret the prefix of the given `source` as a `&mut
2150    /// Self`.
2151    ///
2152    /// This method computes the [largest possible size of `Self`][valid-size]
2153    /// that can fit in the leading bytes of `source`. If that prefix is a valid
2154    /// instance of `Self`, this method returns a reference to those bytes
2155    /// interpreted as `Self`, and a reference to the remaining bytes. If there
2156    /// are insufficient bytes, or if `source` is not appropriately aligned, or
2157    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2158    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2159    /// alignment error][ConvertError::from].
2160    ///
2161    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2162    ///
2163    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2164    /// [self-unaligned]: Unaligned
2165    /// [slice-dst]: KnownLayout#dynamically-sized-types
2166    ///
2167    /// # Compile-Time Assertions
2168    ///
2169    /// This method cannot yet be used on unsized types whose dynamically-sized
2170    /// component is zero-sized. Attempting to use this method on such types
2171    /// results in a compile-time assertion error; e.g.:
2172    ///
2173    /// ```compile_fail,E0080
2174    /// use zerocopy::*;
2175    /// # use zerocopy_derive::*;
2176    ///
2177    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2178    /// #[repr(C, packed)]
2179    /// struct ZSTy {
2180    ///     leading_sized: [u8; 2],
2181    ///     trailing_dst: [()],
2182    /// }
2183    ///
2184    /// let mut source = [85, 85];
2185    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
2186    /// ```
2187    ///
2188    /// # Examples
2189    ///
2190    /// ```
2191    /// use zerocopy::TryFromBytes;
2192    /// # use zerocopy_derive::*;
2193    ///
2194    /// // The only valid value of this type is the byte `0xC0`
2195    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2196    /// #[repr(u8)]
2197    /// enum C0 { xC0 = 0xC0 }
2198    ///
2199    /// // The only valid value of this type is the bytes `0xC0C0`.
2200    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2201    /// #[repr(C)]
2202    /// struct C0C0(C0, C0);
2203    ///
2204    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2205    /// #[repr(C, packed)]
2206    /// struct Packet {
2207    ///     magic_number: C0C0,
2208    ///     mug_size: u8,
2209    ///     temperature: u8,
2210    ///     marshmallows: [[u8; 2]],
2211    /// }
2212    ///
2213    /// // These are more bytes than are needed to encode a `Packet`.
2214    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2215    ///
2216    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2217    ///
2218    /// assert_eq!(packet.mug_size, 240);
2219    /// assert_eq!(packet.temperature, 77);
2220    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2221    /// assert_eq!(suffix, &[6u8][..]);
2222    ///
2223    /// packet.temperature = 111;
2224    /// suffix[0] = 222;
2225    ///
2226    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2227    ///
2228    /// // These bytes are not valid instance of `Packet`.
2229    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2230    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2231    /// ```
2232    ///
2233    #[doc = codegen_header!("try_mut_from_prefix")]
2234    ///
2235    /// See [`TryFromBytes::try_ref_from_prefix`](#method.try_ref_from_prefix.codegen).
2236    #[must_use = "has no side effects"]
2237    #[inline]
2238    fn try_mut_from_prefix(
2239        source: &mut [u8],
2240    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2241    where
2242        Self: KnownLayout + IntoBytes,
2243    {
2244        static_assert_dst_is_not_zst!(Self);
2245        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2246    }
2247
2248    /// Attempts to interpret the suffix of the given `source` as a `&mut
2249    /// Self`.
2250    ///
2251    /// This method computes the [largest possible size of `Self`][valid-size]
2252    /// that can fit in the trailing bytes of `source`. If that suffix is a
2253    /// valid instance of `Self`, this method returns a reference to those bytes
2254    /// interpreted as `Self`, and a reference to the preceding bytes. If there
2255    /// are insufficient bytes, or if the suffix of `source` would not be
2256    /// appropriately aligned, or if the suffix is not a valid instance of
2257    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2258    /// can [infallibly discard the alignment error][ConvertError::from].
2259    ///
2260    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2261    ///
2262    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2263    /// [self-unaligned]: Unaligned
2264    /// [slice-dst]: KnownLayout#dynamically-sized-types
2265    ///
2266    /// # Compile-Time Assertions
2267    ///
2268    /// This method cannot yet be used on unsized types whose dynamically-sized
2269    /// component is zero-sized. Attempting to use this method on such types
2270    /// results in a compile-time assertion error; e.g.:
2271    ///
2272    /// ```compile_fail,E0080
2273    /// use zerocopy::*;
2274    /// # use zerocopy_derive::*;
2275    ///
2276    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2277    /// #[repr(C, packed)]
2278    /// struct ZSTy {
2279    ///     leading_sized: u16,
2280    ///     trailing_dst: [()],
2281    /// }
2282    ///
2283    /// let mut source = [85, 85];
2284    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2285    /// ```
2286    ///
2287    /// # Examples
2288    ///
2289    /// ```
2290    /// use zerocopy::TryFromBytes;
2291    /// # use zerocopy_derive::*;
2292    ///
2293    /// // The only valid value of this type is the byte `0xC0`
2294    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2295    /// #[repr(u8)]
2296    /// enum C0 { xC0 = 0xC0 }
2297    ///
2298    /// // The only valid value of this type is the bytes `0xC0C0`.
2299    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2300    /// #[repr(C)]
2301    /// struct C0C0(C0, C0);
2302    ///
2303    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2304    /// #[repr(C, packed)]
2305    /// struct Packet {
2306    ///     magic_number: C0C0,
2307    ///     mug_size: u8,
2308    ///     temperature: u8,
2309    ///     marshmallows: [[u8; 2]],
2310    /// }
2311    ///
2312    /// // These are more bytes than are needed to encode a `Packet`.
2313    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2314    ///
2315    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2316    ///
2317    /// assert_eq!(packet.mug_size, 240);
2318    /// assert_eq!(packet.temperature, 77);
2319    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2320    /// assert_eq!(prefix, &[0u8][..]);
2321    ///
2322    /// prefix[0] = 111;
2323    /// packet.temperature = 222;
2324    ///
2325    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2326    ///
2327    /// // These bytes are not valid instance of `Packet`.
2328    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2329    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2330    /// ```
2331    ///
2332    #[doc = codegen_header!("try_mut_from_suffix")]
2333    ///
2334    /// See [`TryFromBytes::try_ref_from_suffix`](#method.try_ref_from_suffix.codegen).
2335    #[must_use = "has no side effects"]
2336    #[inline]
2337    fn try_mut_from_suffix(
2338        source: &mut [u8],
2339    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2340    where
2341        Self: KnownLayout + IntoBytes,
2342    {
2343        static_assert_dst_is_not_zst!(Self);
2344        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2345    }
2346
2347    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2348    /// equal to `count`.
2349    ///
2350    /// This method attempts to return a reference to `source` interpreted as a
2351    /// `Self` with `count` trailing elements. If the length of `source` is not
2352    /// equal to the size of `Self` with `count` elements, if `source` is not
2353    /// appropriately aligned, or if `source` does not contain a valid instance
2354    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2355    /// you can [infallibly discard the alignment error][ConvertError::from].
2356    ///
2357    /// [self-unaligned]: Unaligned
2358    /// [slice-dst]: KnownLayout#dynamically-sized-types
2359    ///
2360    /// # Examples
2361    ///
2362    /// ```
2363    /// # #![allow(non_camel_case_types)] // For C0::xC0
2364    /// use zerocopy::TryFromBytes;
2365    /// # use zerocopy_derive::*;
2366    ///
2367    /// // The only valid value of this type is the byte `0xC0`
2368    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2369    /// #[repr(u8)]
2370    /// enum C0 { xC0 = 0xC0 }
2371    ///
2372    /// // The only valid value of this type is the bytes `0xC0C0`.
2373    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2374    /// #[repr(C)]
2375    /// struct C0C0(C0, C0);
2376    ///
2377    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2378    /// #[repr(C)]
2379    /// struct Packet {
2380    ///     magic_number: C0C0,
2381    ///     mug_size: u8,
2382    ///     temperature: u8,
2383    ///     marshmallows: [[u8; 2]],
2384    /// }
2385    ///
2386    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2387    ///
2388    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2389    ///
2390    /// assert_eq!(packet.mug_size, 240);
2391    /// assert_eq!(packet.temperature, 77);
2392    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2393    ///
2394    /// // These bytes are not valid instance of `Packet`.
2395    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2396    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2397    /// ```
2398    ///
2399    /// Since an explicit `count` is provided, this method supports types with
2400    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2401    /// which do not take an explicit count do not support such types.
2402    ///
2403    /// ```
2404    /// use core::num::NonZeroU16;
2405    /// use zerocopy::*;
2406    /// # use zerocopy_derive::*;
2407    ///
2408    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2409    /// #[repr(C)]
2410    /// struct ZSTy {
2411    ///     leading_sized: NonZeroU16,
2412    ///     trailing_dst: [()],
2413    /// }
2414    ///
2415    /// let src = 0xCAFEu16.as_bytes();
2416    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2417    /// assert_eq!(zsty.trailing_dst.len(), 42);
2418    /// ```
2419    ///
2420    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2421    ///
2422    #[doc = codegen_header!("try_ref_from_bytes_with_elems")]
2423    ///
2424    /// This abstraction for reinterpreting a buffer of bytes as a structured
2425    /// type is safe and cheap, but does not necessarily have zero runtime cost.
2426    /// The below code generation benchmark exercises this routine on a
2427    /// destination type whose complex layout places complex requirements on the
2428    /// source:
2429    ///
2430    /// - the source must begin an even memory address
2431    /// - the source has a total length that exactly fits a `Self` with a
2432    ///   trailing slice length of `elems`
2433    /// - the source begins with the bytes `0xC0C0`
2434    ///
2435    /// These conditions must all be checked at runtime in this example, but the
2436    /// codegen you experience in practice will depend on optimization level,
2437    /// the layout of the destination type, and what the compiler can prove
2438    /// about the source.
2439    ///
2440    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_bytes_with_elems")]
2441    #[must_use = "has no side effects"]
2442    #[inline]
2443    fn try_ref_from_bytes_with_elems(
2444        source: &[u8],
2445        count: usize,
2446    ) -> Result<&Self, TryCastError<&[u8], Self>>
2447    where
2448        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2449    {
2450        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2451        {
2452            Ok(source) => {
2453                // This call may panic. If that happens, it doesn't cause any soundness
2454                // issues, as we have not generated any invalid state which we need to
2455                // fix before returning.
2456                match source.try_into_valid() {
2457                    Ok(source) => Ok(source.as_ref()),
2458                    Err(e) => {
2459                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2460                    }
2461                }
2462            }
2463            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2464        }
2465    }
2466
2467    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2468    /// a DST length equal to `count`.
2469    ///
2470    /// This method attempts to return a reference to the prefix of `source`
2471    /// interpreted as a `Self` with `count` trailing elements, and a reference
2472    /// to the remaining bytes. If the length of `source` is less than the size
2473    /// of `Self` with `count` elements, if `source` is not appropriately
2474    /// aligned, or if the prefix of `source` does not contain a valid instance
2475    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2476    /// you can [infallibly discard the alignment error][ConvertError::from].
2477    ///
2478    /// [self-unaligned]: Unaligned
2479    /// [slice-dst]: KnownLayout#dynamically-sized-types
2480    ///
2481    /// # Examples
2482    ///
2483    /// ```
2484    /// # #![allow(non_camel_case_types)] // For C0::xC0
2485    /// use zerocopy::TryFromBytes;
2486    /// # use zerocopy_derive::*;
2487    ///
2488    /// // The only valid value of this type is the byte `0xC0`
2489    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2490    /// #[repr(u8)]
2491    /// enum C0 { xC0 = 0xC0 }
2492    ///
2493    /// // The only valid value of this type is the bytes `0xC0C0`.
2494    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2495    /// #[repr(C)]
2496    /// struct C0C0(C0, C0);
2497    ///
2498    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2499    /// #[repr(C)]
2500    /// struct Packet {
2501    ///     magic_number: C0C0,
2502    ///     mug_size: u8,
2503    ///     temperature: u8,
2504    ///     marshmallows: [[u8; 2]],
2505    /// }
2506    ///
2507    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2508    ///
2509    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2510    ///
2511    /// assert_eq!(packet.mug_size, 240);
2512    /// assert_eq!(packet.temperature, 77);
2513    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2514    /// assert_eq!(suffix, &[8u8][..]);
2515    ///
2516    /// // These bytes are not valid instance of `Packet`.
2517    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2518    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2519    /// ```
2520    ///
2521    /// Since an explicit `count` is provided, this method supports types with
2522    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2523    /// which do not take an explicit count do not support such types.
2524    ///
2525    /// ```
2526    /// use core::num::NonZeroU16;
2527    /// use zerocopy::*;
2528    /// # use zerocopy_derive::*;
2529    ///
2530    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2531    /// #[repr(C)]
2532    /// struct ZSTy {
2533    ///     leading_sized: NonZeroU16,
2534    ///     trailing_dst: [()],
2535    /// }
2536    ///
2537    /// let src = 0xCAFEu16.as_bytes();
2538    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2539    /// assert_eq!(zsty.trailing_dst.len(), 42);
2540    /// ```
2541    ///
2542    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2543    ///
2544    #[doc = codegen_header!("try_ref_from_prefix_with_elems")]
2545    ///
2546    /// This abstraction for reinterpreting a buffer of bytes as a structured
2547    /// type is safe and cheap, but does not necessarily have zero runtime cost.
2548    /// The below code generation benchmark exercises this routine on a
2549    /// destination type whose complex layout places complex requirements on the
2550    /// source:
2551    ///
2552    /// - the source must begin an even memory address
2553    /// - the source has a prefix that fits a `Self` with a trailing slice
2554    ///   length of `count`
2555    /// - the source begins with the bytes `0xC0C0`
2556    ///
2557    /// These conditions must all be checked at runtime in this example, but the
2558    /// codegen you experience in practice will depend on optimization level,
2559    /// the layout of the destination type, and what the compiler can prove
2560    /// about the source.
2561    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_prefix_with_elems")]
2562    #[must_use = "has no side effects"]
2563    #[inline]
2564    fn try_ref_from_prefix_with_elems(
2565        source: &[u8],
2566        count: usize,
2567    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2568    where
2569        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2570    {
2571        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2572    }
2573
2574    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2575    /// a DST length equal to `count`.
2576    ///
2577    /// This method attempts to return a reference to the suffix of `source`
2578    /// interpreted as a `Self` with `count` trailing elements, and a reference
2579    /// to the preceding bytes. If the length of `source` is less than the size
2580    /// of `Self` with `count` elements, if the suffix of `source` is not
2581    /// appropriately aligned, or if the suffix of `source` does not contain a
2582    /// valid instance of `Self`, this returns `Err`. If [`Self:
2583    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2584    /// error][ConvertError::from].
2585    ///
2586    /// [self-unaligned]: Unaligned
2587    /// [slice-dst]: KnownLayout#dynamically-sized-types
2588    ///
2589    /// # Examples
2590    ///
2591    /// ```
2592    /// # #![allow(non_camel_case_types)] // For C0::xC0
2593    /// use zerocopy::TryFromBytes;
2594    /// # use zerocopy_derive::*;
2595    ///
2596    /// // The only valid value of this type is the byte `0xC0`
2597    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2598    /// #[repr(u8)]
2599    /// enum C0 { xC0 = 0xC0 }
2600    ///
2601    /// // The only valid value of this type is the bytes `0xC0C0`.
2602    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2603    /// #[repr(C)]
2604    /// struct C0C0(C0, C0);
2605    ///
2606    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2607    /// #[repr(C)]
2608    /// struct Packet {
2609    ///     magic_number: C0C0,
2610    ///     mug_size: u8,
2611    ///     temperature: u8,
2612    ///     marshmallows: [[u8; 2]],
2613    /// }
2614    ///
2615    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2616    ///
2617    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2618    ///
2619    /// assert_eq!(packet.mug_size, 240);
2620    /// assert_eq!(packet.temperature, 77);
2621    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2622    /// assert_eq!(prefix, &[123u8][..]);
2623    ///
2624    /// // These bytes are not valid instance of `Packet`.
2625    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2626    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2627    /// ```
2628    ///
2629    /// Since an explicit `count` is provided, this method supports types with
2630    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2631    /// which do not take an explicit count do not support such types.
2632    ///
2633    /// ```
2634    /// use core::num::NonZeroU16;
2635    /// use zerocopy::*;
2636    /// # use zerocopy_derive::*;
2637    ///
2638    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2639    /// #[repr(C)]
2640    /// struct ZSTy {
2641    ///     leading_sized: NonZeroU16,
2642    ///     trailing_dst: [()],
2643    /// }
2644    ///
2645    /// let src = 0xCAFEu16.as_bytes();
2646    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2647    /// assert_eq!(zsty.trailing_dst.len(), 42);
2648    /// ```
2649    ///
2650    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2651    ///
2652    #[doc = codegen_header!("try_ref_from_suffix_with_elems")]
2653    ///
2654    /// This abstraction for reinterpreting a buffer of bytes as a structured
2655    /// type is safe and cheap, but does not necessarily have zero runtime cost.
2656    /// The below code generation benchmark exercises this routine on a
2657    /// destination type whose complex layout places complex requirements on the
2658    /// [valid-size] suffix for a `Self` of trailing slice length `count`, which
2659    /// must:
2660    ///
2661    /// - begin at an even memory address
2662    /// - have a minimum length of 4 bytes
2663    /// - begin with the bytes `0xC0C0`
2664    ///
2665    /// These conditions must all be checked at runtime in this example, but the
2666    /// codegen you experience in practice will depend on optimization level,
2667    /// the layout of the destination type, and what the compiler can prove
2668    /// about the source.
2669    ///
2670    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2671    #[doc = codegen_tabs!(format = "coco", bench = "try_ref_from_suffix_with_elems")]
2672    #[must_use = "has no side effects"]
2673    #[inline]
2674    fn try_ref_from_suffix_with_elems(
2675        source: &[u8],
2676        count: usize,
2677    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2678    where
2679        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2680    {
2681        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2682    }
2683
2684    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2685    /// length equal to `count`.
2686    ///
2687    /// This method attempts to return a reference to `source` interpreted as a
2688    /// `Self` with `count` trailing elements. If the length of `source` is not
2689    /// equal to the size of `Self` with `count` elements, if `source` is not
2690    /// appropriately aligned, or if `source` does not contain a valid instance
2691    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2692    /// you can [infallibly discard the alignment error][ConvertError::from].
2693    ///
2694    /// [self-unaligned]: Unaligned
2695    /// [slice-dst]: KnownLayout#dynamically-sized-types
2696    ///
2697    /// # Examples
2698    ///
2699    /// ```
2700    /// # #![allow(non_camel_case_types)] // For C0::xC0
2701    /// use zerocopy::TryFromBytes;
2702    /// # use zerocopy_derive::*;
2703    ///
2704    /// // The only valid value of this type is the byte `0xC0`
2705    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2706    /// #[repr(u8)]
2707    /// enum C0 { xC0 = 0xC0 }
2708    ///
2709    /// // The only valid value of this type is the bytes `0xC0C0`.
2710    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2711    /// #[repr(C)]
2712    /// struct C0C0(C0, C0);
2713    ///
2714    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2715    /// #[repr(C, packed)]
2716    /// struct Packet {
2717    ///     magic_number: C0C0,
2718    ///     mug_size: u8,
2719    ///     temperature: u8,
2720    ///     marshmallows: [[u8; 2]],
2721    /// }
2722    ///
2723    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2724    ///
2725    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2726    ///
2727    /// assert_eq!(packet.mug_size, 240);
2728    /// assert_eq!(packet.temperature, 77);
2729    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2730    ///
2731    /// packet.temperature = 111;
2732    ///
2733    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2734    ///
2735    /// // These bytes are not valid instance of `Packet`.
2736    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2737    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2738    /// ```
2739    ///
2740    /// Since an explicit `count` is provided, this method supports types with
2741    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2742    /// which do not take an explicit count do not support such types.
2743    ///
2744    /// ```
2745    /// use core::num::NonZeroU16;
2746    /// use zerocopy::*;
2747    /// # use zerocopy_derive::*;
2748    ///
2749    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2750    /// #[repr(C, packed)]
2751    /// struct ZSTy {
2752    ///     leading_sized: NonZeroU16,
2753    ///     trailing_dst: [()],
2754    /// }
2755    ///
2756    /// let mut src = 0xCAFEu16;
2757    /// let src = src.as_mut_bytes();
2758    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2759    /// assert_eq!(zsty.trailing_dst.len(), 42);
2760    /// ```
2761    ///
2762    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2763    ///  
2764    #[doc = codegen_header!("try_mut_from_bytes_with_elems")]
2765    ///
2766    /// See [`TryFromBytes::try_ref_from_bytes_with_elems`](#method.try_ref_from_bytes_with_elems.codegen).
2767    #[must_use = "has no side effects"]
2768    #[inline]
2769    fn try_mut_from_bytes_with_elems(
2770        source: &mut [u8],
2771        count: usize,
2772    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2773    where
2774        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2775    {
2776        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2777        {
2778            Ok(source) => {
2779                // This call may panic. If that happens, it doesn't cause any soundness
2780                // issues, as we have not generated any invalid state which we need to
2781                // fix before returning.
2782                match source.try_into_valid() {
2783                    Ok(source) => Ok(source.as_mut()),
2784                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2785                }
2786            }
2787            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2788        }
2789    }
2790
2791    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2792    /// with a DST length equal to `count`.
2793    ///
2794    /// This method attempts to return a reference to the prefix of `source`
2795    /// interpreted as a `Self` with `count` trailing elements, and a reference
2796    /// to the remaining bytes. If the length of `source` is less than the size
2797    /// of `Self` with `count` elements, if `source` is not appropriately
2798    /// aligned, or if the prefix of `source` does not contain a valid instance
2799    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2800    /// you can [infallibly discard the alignment error][ConvertError::from].
2801    ///
2802    /// [self-unaligned]: Unaligned
2803    /// [slice-dst]: KnownLayout#dynamically-sized-types
2804    ///
2805    /// # Examples
2806    ///
2807    /// ```
2808    /// # #![allow(non_camel_case_types)] // For C0::xC0
2809    /// use zerocopy::TryFromBytes;
2810    /// # use zerocopy_derive::*;
2811    ///
2812    /// // The only valid value of this type is the byte `0xC0`
2813    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2814    /// #[repr(u8)]
2815    /// enum C0 { xC0 = 0xC0 }
2816    ///
2817    /// // The only valid value of this type is the bytes `0xC0C0`.
2818    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2819    /// #[repr(C)]
2820    /// struct C0C0(C0, C0);
2821    ///
2822    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2823    /// #[repr(C, packed)]
2824    /// struct Packet {
2825    ///     magic_number: C0C0,
2826    ///     mug_size: u8,
2827    ///     temperature: u8,
2828    ///     marshmallows: [[u8; 2]],
2829    /// }
2830    ///
2831    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2832    ///
2833    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2834    ///
2835    /// assert_eq!(packet.mug_size, 240);
2836    /// assert_eq!(packet.temperature, 77);
2837    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2838    /// assert_eq!(suffix, &[8u8][..]);
2839    ///
2840    /// packet.temperature = 111;
2841    /// suffix[0] = 222;
2842    ///
2843    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2844    ///
2845    /// // These bytes are not valid instance of `Packet`.
2846    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2847    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2848    /// ```
2849    ///
2850    /// Since an explicit `count` is provided, this method supports types with
2851    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2852    /// which do not take an explicit count do not support such types.
2853    ///
2854    /// ```
2855    /// use core::num::NonZeroU16;
2856    /// use zerocopy::*;
2857    /// # use zerocopy_derive::*;
2858    ///
2859    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2860    /// #[repr(C, packed)]
2861    /// struct ZSTy {
2862    ///     leading_sized: NonZeroU16,
2863    ///     trailing_dst: [()],
2864    /// }
2865    ///
2866    /// let mut src = 0xCAFEu16;
2867    /// let src = src.as_mut_bytes();
2868    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2869    /// assert_eq!(zsty.trailing_dst.len(), 42);
2870    /// ```
2871    ///
2872    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2873    ///
2874    #[doc = codegen_header!("try_mut_from_prefix_with_elems")]
2875    ///
2876    /// See [`TryFromBytes::try_ref_from_prefix_with_elems`](#method.try_ref_from_prefix_with_elems.codegen).
2877    #[must_use = "has no side effects"]
2878    #[inline]
2879    fn try_mut_from_prefix_with_elems(
2880        source: &mut [u8],
2881        count: usize,
2882    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2883    where
2884        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2885    {
2886        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2887    }
2888
2889    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2890    /// with a DST length equal to `count`.
2891    ///
2892    /// This method attempts to return a reference to the suffix of `source`
2893    /// interpreted as a `Self` with `count` trailing elements, and a reference
2894    /// to the preceding bytes. If the length of `source` is less than the size
2895    /// of `Self` with `count` elements, if the suffix of `source` is not
2896    /// appropriately aligned, or if the suffix of `source` does not contain a
2897    /// valid instance of `Self`, this returns `Err`. If [`Self:
2898    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2899    /// error][ConvertError::from].
2900    ///
2901    /// [self-unaligned]: Unaligned
2902    /// [slice-dst]: KnownLayout#dynamically-sized-types
2903    ///
2904    /// # Examples
2905    ///
2906    /// ```
2907    /// # #![allow(non_camel_case_types)] // For C0::xC0
2908    /// use zerocopy::TryFromBytes;
2909    /// # use zerocopy_derive::*;
2910    ///
2911    /// // The only valid value of this type is the byte `0xC0`
2912    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2913    /// #[repr(u8)]
2914    /// enum C0 { xC0 = 0xC0 }
2915    ///
2916    /// // The only valid value of this type is the bytes `0xC0C0`.
2917    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2918    /// #[repr(C)]
2919    /// struct C0C0(C0, C0);
2920    ///
2921    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2922    /// #[repr(C, packed)]
2923    /// struct Packet {
2924    ///     magic_number: C0C0,
2925    ///     mug_size: u8,
2926    ///     temperature: u8,
2927    ///     marshmallows: [[u8; 2]],
2928    /// }
2929    ///
2930    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2931    ///
2932    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2933    ///
2934    /// assert_eq!(packet.mug_size, 240);
2935    /// assert_eq!(packet.temperature, 77);
2936    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2937    /// assert_eq!(prefix, &[123u8][..]);
2938    ///
2939    /// prefix[0] = 111;
2940    /// packet.temperature = 222;
2941    ///
2942    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2943    ///
2944    /// // These bytes are not valid instance of `Packet`.
2945    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2946    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2947    /// ```
2948    ///
2949    /// Since an explicit `count` is provided, this method supports types with
2950    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2951    /// which do not take an explicit count do not support such types.
2952    ///
2953    /// ```
2954    /// use core::num::NonZeroU16;
2955    /// use zerocopy::*;
2956    /// # use zerocopy_derive::*;
2957    ///
2958    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2959    /// #[repr(C, packed)]
2960    /// struct ZSTy {
2961    ///     leading_sized: NonZeroU16,
2962    ///     trailing_dst: [()],
2963    /// }
2964    ///
2965    /// let mut src = 0xCAFEu16;
2966    /// let src = src.as_mut_bytes();
2967    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2968    /// assert_eq!(zsty.trailing_dst.len(), 42);
2969    /// ```
2970    ///
2971    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2972    ///
2973    #[doc = codegen_header!("try_mut_from_suffix_with_elems")]
2974    ///
2975    /// See [`TryFromBytes::try_ref_from_suffix_with_elems`](#method.try_ref_from_suffix_with_elems.codegen).
2976    #[must_use = "has no side effects"]
2977    #[inline]
2978    fn try_mut_from_suffix_with_elems(
2979        source: &mut [u8],
2980        count: usize,
2981    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2982    where
2983        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2984    {
2985        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2986    }
2987
2988    /// Attempts to read the given `source` as a `Self`.
2989    ///
2990    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2991    /// instance of `Self`, this returns `Err`.
2992    ///
2993    /// # Examples
2994    ///
2995    /// ```
2996    /// use zerocopy::TryFromBytes;
2997    /// # use zerocopy_derive::*;
2998    ///
2999    /// // The only valid value of this type is the byte `0xC0`
3000    /// #[derive(TryFromBytes)]
3001    /// #[repr(u8)]
3002    /// enum C0 { xC0 = 0xC0 }
3003    ///
3004    /// // The only valid value of this type is the bytes `0xC0C0`.
3005    /// #[derive(TryFromBytes)]
3006    /// #[repr(C)]
3007    /// struct C0C0(C0, C0);
3008    ///
3009    /// #[derive(TryFromBytes)]
3010    /// #[repr(C)]
3011    /// struct Packet {
3012    ///     magic_number: C0C0,
3013    ///     mug_size: u8,
3014    ///     temperature: u8,
3015    /// }
3016    ///
3017    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
3018    ///
3019    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
3020    ///
3021    /// assert_eq!(packet.mug_size, 240);
3022    /// assert_eq!(packet.temperature, 77);
3023    ///
3024    /// // These bytes are not valid instance of `Packet`.
3025    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
3026    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
3027    /// ```
3028    ///
3029    /// # Performance Considerations
3030    ///
3031    /// In this version of zerocopy, this method reads the `source` into a
3032    /// well-aligned stack allocation and *then* validates that the allocation
3033    /// is a valid `Self`. This ensures that validation can be performed using
3034    /// aligned reads (which carry a performance advantage over unaligned reads
3035    /// on many platforms) at the cost of an unconditional copy.
3036    #[must_use = "has no side effects"]
3037    #[inline]
3038    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
3039    where
3040        Self: Sized,
3041    {
3042        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3043
3044        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
3045            Ok(candidate) => candidate,
3046            Err(e) => {
3047                return Err(TryReadError::Size(e.with_dst()));
3048            }
3049        };
3050        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3051        // its bytes are initialized.
3052        unsafe { try_read_from(source, candidate) }
3053    }
3054
3055    /// Attempts to read a `Self` from the prefix of the given `source`.
3056    ///
3057    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
3058    /// of `source`, returning that `Self` and any remaining bytes. If
3059    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3060    /// of `Self`, it returns `Err`.
3061    ///
3062    /// # Examples
3063    ///
3064    /// ```
3065    /// use zerocopy::TryFromBytes;
3066    /// # use zerocopy_derive::*;
3067    ///
3068    /// // The only valid value of this type is the byte `0xC0`
3069    /// #[derive(TryFromBytes)]
3070    /// #[repr(u8)]
3071    /// enum C0 { xC0 = 0xC0 }
3072    ///
3073    /// // The only valid value of this type is the bytes `0xC0C0`.
3074    /// #[derive(TryFromBytes)]
3075    /// #[repr(C)]
3076    /// struct C0C0(C0, C0);
3077    ///
3078    /// #[derive(TryFromBytes)]
3079    /// #[repr(C)]
3080    /// struct Packet {
3081    ///     magic_number: C0C0,
3082    ///     mug_size: u8,
3083    ///     temperature: u8,
3084    /// }
3085    ///
3086    /// // These are more bytes than are needed to encode a `Packet`.
3087    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3088    ///
3089    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
3090    ///
3091    /// assert_eq!(packet.mug_size, 240);
3092    /// assert_eq!(packet.temperature, 77);
3093    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
3094    ///
3095    /// // These bytes are not valid instance of `Packet`.
3096    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3097    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
3098    /// ```
3099    ///
3100    /// # Performance Considerations
3101    ///
3102    /// In this version of zerocopy, this method reads the `source` into a
3103    /// well-aligned stack allocation and *then* validates that the allocation
3104    /// is a valid `Self`. This ensures that validation can be performed using
3105    /// aligned reads (which carry a performance advantage over unaligned reads
3106    /// on many platforms) at the cost of an unconditional copy.
3107    #[must_use = "has no side effects"]
3108    #[inline]
3109    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
3110    where
3111        Self: Sized,
3112    {
3113        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3114
3115        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
3116            Ok(candidate) => candidate,
3117            Err(e) => {
3118                return Err(TryReadError::Size(e.with_dst()));
3119            }
3120        };
3121        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3122        // its bytes are initialized.
3123        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
3124    }
3125
3126    /// Attempts to read a `Self` from the suffix of the given `source`.
3127    ///
3128    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
3129    /// of `source`, returning that `Self` and any preceding bytes. If
3130    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3131    /// of `Self`, it returns `Err`.
3132    ///
3133    /// # Examples
3134    ///
3135    /// ```
3136    /// # #![allow(non_camel_case_types)] // For C0::xC0
3137    /// use zerocopy::TryFromBytes;
3138    /// # use zerocopy_derive::*;
3139    ///
3140    /// // The only valid value of this type is the byte `0xC0`
3141    /// #[derive(TryFromBytes)]
3142    /// #[repr(u8)]
3143    /// enum C0 { xC0 = 0xC0 }
3144    ///
3145    /// // The only valid value of this type is the bytes `0xC0C0`.
3146    /// #[derive(TryFromBytes)]
3147    /// #[repr(C)]
3148    /// struct C0C0(C0, C0);
3149    ///
3150    /// #[derive(TryFromBytes)]
3151    /// #[repr(C)]
3152    /// struct Packet {
3153    ///     magic_number: C0C0,
3154    ///     mug_size: u8,
3155    ///     temperature: u8,
3156    /// }
3157    ///
3158    /// // These are more bytes than are needed to encode a `Packet`.
3159    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
3160    ///
3161    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
3162    ///
3163    /// assert_eq!(packet.mug_size, 240);
3164    /// assert_eq!(packet.temperature, 77);
3165    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3166    ///
3167    /// // These bytes are not valid instance of `Packet`.
3168    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
3169    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
3170    /// ```
3171    ///
3172    /// # Performance Considerations
3173    ///
3174    /// In this version of zerocopy, this method reads the `source` into a
3175    /// well-aligned stack allocation and *then* validates that the allocation
3176    /// is a valid `Self`. This ensures that validation can be performed using
3177    /// aligned reads (which carry a performance advantage over unaligned reads
3178    /// on many platforms) at the cost of an unconditional copy.
3179    #[must_use = "has no side effects"]
3180    #[inline]
3181    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
3182    where
3183        Self: Sized,
3184    {
3185        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3186
3187        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
3188            Ok(candidate) => candidate,
3189            Err(e) => {
3190                return Err(TryReadError::Size(e.with_dst()));
3191            }
3192        };
3193        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3194        // its bytes are initialized.
3195        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3196    }
3197}
3198
3199#[inline(always)]
3200fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3201    source: &[u8],
3202    cast_type: CastType,
3203    meta: Option<T::PointerMetadata>,
3204) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3205    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3206        Ok((source, prefix_suffix)) => {
3207            // This call may panic. If that happens, it doesn't cause any soundness
3208            // issues, as we have not generated any invalid state which we need to
3209            // fix before returning.
3210            match source.try_into_valid() {
3211                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3212                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3213            }
3214        }
3215        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3216    }
3217}
3218
3219#[inline(always)]
3220fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3221    candidate: &mut [u8],
3222    cast_type: CastType,
3223    meta: Option<T::PointerMetadata>,
3224) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3225    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3226        Ok((candidate, prefix_suffix)) => {
3227            // This call may panic. If that happens, it doesn't cause any soundness
3228            // issues, as we have not generated any invalid state which we need to
3229            // fix before returning.
3230            match candidate.try_into_valid() {
3231                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3232                Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3233            }
3234        }
3235        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3236    }
3237}
3238
3239#[inline(always)]
3240fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3241    (u, t)
3242}
3243
3244/// # Safety
3245///
3246/// All bytes of `candidate` must be initialized.
3247#[inline(always)]
3248unsafe fn try_read_from<S, T: TryFromBytes>(
3249    source: S,
3250    mut candidate: CoreMaybeUninit<T>,
3251) -> Result<T, TryReadError<S, T>> {
3252    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3253    // to add a `T: Immutable` bound.
3254    let c_ptr = Ptr::from_mut(&mut candidate);
3255    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3256    // `candidate`, which the caller promises is entirely initialized. Since
3257    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3258    // no values written to an `Initialized` `c_ptr` can violate its validity.
3259    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3260    // via `c_ptr` so long as it is live, so we don't need to worry about the
3261    // fact that `c_ptr` may have more restricted validity than `candidate`.
3262    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3263    let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3264
3265    // Since we don't have `T: KnownLayout`, we hack around that by using
3266    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3267    //
3268    // This call may panic. If that happens, it doesn't cause any soundness
3269    // issues, as we have not generated any invalid state which we need to fix
3270    // before returning.
3271    if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3272        return Err(ValidityError::new(source).into());
3273    }
3274
3275    fn _assert_same_size_and_validity<T>()
3276    where
3277        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3278        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3279    {
3280    }
3281
3282    _assert_same_size_and_validity::<T>();
3283
3284    // SAFETY: We just validated that `candidate` contains a valid
3285    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3286    // guaranteed by the preceding type assertion.
3287    Ok(unsafe { candidate.assume_init() })
3288}
3289
3290/// Types for which a sequence of `0` bytes is a valid instance.
3291///
3292/// Any memory region of the appropriate length which is guaranteed to contain
3293/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3294/// overhead. This is useful whenever memory is known to be in a zeroed state,
3295/// such memory returned from some allocation routines.
3296///
3297/// # Warning: Padding bytes
3298///
3299/// Note that, when a value is moved or copied, only the non-padding bytes of
3300/// that value are guaranteed to be preserved. It is unsound to assume that
3301/// values written to padding bytes are preserved after a move or copy. For more
3302/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3303///
3304/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3305///
3306/// # Implementation
3307///
3308/// **Do not implement this trait yourself!** Instead, use
3309/// [`#[derive(FromZeros)]`][derive]; e.g.:
3310///
3311/// ```
3312/// # use zerocopy_derive::{FromZeros, Immutable};
3313/// #[derive(FromZeros)]
3314/// struct MyStruct {
3315/// # /*
3316///     ...
3317/// # */
3318/// }
3319///
3320/// #[derive(FromZeros)]
3321/// #[repr(u8)]
3322/// enum MyEnum {
3323/// #   Variant0,
3324/// # /*
3325///     ...
3326/// # */
3327/// }
3328///
3329/// #[derive(FromZeros, Immutable)]
3330/// union MyUnion {
3331/// #   variant: u8,
3332/// # /*
3333///     ...
3334/// # */
3335/// }
3336/// ```
3337///
3338/// This derive performs a sophisticated, compile-time safety analysis to
3339/// determine whether a type is `FromZeros`.
3340///
3341/// # Safety
3342///
3343/// *This section describes what is required in order for `T: FromZeros`, and
3344/// what unsafe code may assume of such types. If you don't plan on implementing
3345/// `FromZeros` manually, and you don't plan on writing unsafe code that
3346/// operates on `FromZeros` types, then you don't need to read this section.*
3347///
3348/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3349/// `T` whose bytes are all initialized to zero. If a type is marked as
3350/// `FromZeros` which violates this contract, it may cause undefined behavior.
3351///
3352/// `#[derive(FromZeros)]` only permits [types which satisfy these
3353/// requirements][derive-analysis].
3354///
3355#[cfg_attr(
3356    feature = "derive",
3357    doc = "[derive]: zerocopy_derive::FromZeros",
3358    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3359)]
3360#[cfg_attr(
3361    not(feature = "derive"),
3362    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3363    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3364)]
3365#[cfg_attr(
3366    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3367    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3368)]
3369pub unsafe trait FromZeros: TryFromBytes {
3370    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3371    // safe.
3372    #[doc(hidden)]
3373    fn only_derive_is_allowed_to_implement_this_trait()
3374    where
3375        Self: Sized;
3376
3377    /// Overwrites `self` with zeros.
3378    ///
3379    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3380    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3381    /// drop the current value and replace it with a new one — it simply
3382    /// modifies the bytes of the existing value.
3383    ///
3384    /// # Examples
3385    ///
3386    /// ```
3387    /// # use zerocopy::FromZeros;
3388    /// # use zerocopy_derive::*;
3389    /// #
3390    /// #[derive(FromZeros)]
3391    /// #[repr(C)]
3392    /// struct PacketHeader {
3393    ///     src_port: [u8; 2],
3394    ///     dst_port: [u8; 2],
3395    ///     length: [u8; 2],
3396    ///     checksum: [u8; 2],
3397    /// }
3398    ///
3399    /// let mut header = PacketHeader {
3400    ///     src_port: 100u16.to_be_bytes(),
3401    ///     dst_port: 200u16.to_be_bytes(),
3402    ///     length: 300u16.to_be_bytes(),
3403    ///     checksum: 400u16.to_be_bytes(),
3404    /// };
3405    ///
3406    /// header.zero();
3407    ///
3408    /// assert_eq!(header.src_port, [0, 0]);
3409    /// assert_eq!(header.dst_port, [0, 0]);
3410    /// assert_eq!(header.length, [0, 0]);
3411    /// assert_eq!(header.checksum, [0, 0]);
3412    /// ```
3413    #[inline(always)]
3414    fn zero(&mut self) {
3415        let slf: *mut Self = self;
3416        let len = mem::size_of_val(self);
3417        // SAFETY:
3418        // - `self` is guaranteed by the type system to be valid for writes of
3419        //   size `size_of_val(self)`.
3420        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3421        //   as required by `u8`.
3422        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3423        //   of `Self.`
3424        //
3425        // FIXME(#429): Add references to docs and quotes.
3426        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3427    }
3428
3429    /// Creates an instance of `Self` from zeroed bytes.
3430    ///
3431    /// # Examples
3432    ///
3433    /// ```
3434    /// # use zerocopy::FromZeros;
3435    /// # use zerocopy_derive::*;
3436    /// #
3437    /// #[derive(FromZeros)]
3438    /// #[repr(C)]
3439    /// struct PacketHeader {
3440    ///     src_port: [u8; 2],
3441    ///     dst_port: [u8; 2],
3442    ///     length: [u8; 2],
3443    ///     checksum: [u8; 2],
3444    /// }
3445    ///
3446    /// let header: PacketHeader = FromZeros::new_zeroed();
3447    ///
3448    /// assert_eq!(header.src_port, [0, 0]);
3449    /// assert_eq!(header.dst_port, [0, 0]);
3450    /// assert_eq!(header.length, [0, 0]);
3451    /// assert_eq!(header.checksum, [0, 0]);
3452    /// ```
3453    #[must_use = "has no side effects"]
3454    #[inline(always)]
3455    fn new_zeroed() -> Self
3456    where
3457        Self: Sized,
3458    {
3459        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3460        unsafe { mem::zeroed() }
3461    }
3462
3463    /// Creates a `Box<Self>` from zeroed bytes.
3464    ///
3465    /// This function is useful for allocating large values on the heap and
3466    /// zero-initializing them, without ever creating a temporary instance of
3467    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3468    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3469    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3470    ///
3471    /// On systems that use a heap implementation that supports allocating from
3472    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3473    /// have performance benefits.
3474    ///
3475    /// # Errors
3476    ///
3477    /// Returns an error on allocation failure. Allocation failure is guaranteed
3478    /// never to cause a panic or an abort.
3479    #[must_use = "has no side effects (other than allocation)"]
3480    #[cfg(any(feature = "alloc", test))]
3481    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3482    #[inline]
3483    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3484    where
3485        Self: Sized,
3486    {
3487        // If `T` is a ZST, then return a proper boxed instance of it. There is
3488        // no allocation, but `Box` does require a correct dangling pointer.
3489        let layout = Layout::new::<Self>();
3490        if layout.size() == 0 {
3491            // Construct the `Box` from a dangling pointer to avoid calling
3492            // `Self::new_zeroed`. This ensures that stack space is never
3493            // allocated for `Self` even on lower opt-levels where this branch
3494            // might not get optimized out.
3495
3496            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3497            // requirements are that the pointer is non-null and sufficiently
3498            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3499            // is sufficiently aligned. Since the produced pointer is a
3500            // `NonNull`, it is non-null.
3501            //
3502            // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3503            //
3504            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3505            //
3506            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3507            //
3508            //   Creates a new `NonNull` that is dangling, but well-aligned.
3509            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3510        }
3511
3512        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3513        #[allow(clippy::undocumented_unsafe_blocks)]
3514        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3515        if ptr.is_null() {
3516            return Err(AllocError);
3517        }
3518        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3519        #[allow(clippy::undocumented_unsafe_blocks)]
3520        Ok(unsafe { Box::from_raw(ptr) })
3521    }
3522
3523    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3524    ///
3525    /// This function is useful for allocating large values of `[Self]` on the
3526    /// heap and zero-initializing them, without ever creating a temporary
3527    /// instance of `[Self; _]` on the stack. For example,
3528    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3529    /// the heap; it does not require storing the slice on the stack.
3530    ///
3531    /// On systems that use a heap implementation that supports allocating from
3532    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3533    /// benefits.
3534    ///
3535    /// If `Self` is a zero-sized type, then this function will return a
3536    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3537    /// actual information, but its `len()` property will report the correct
3538    /// value.
3539    ///
3540    /// # Errors
3541    ///
3542    /// Returns an error on allocation failure. Allocation failure is
3543    /// guaranteed never to cause a panic or an abort.
3544    #[must_use = "has no side effects (other than allocation)"]
3545    #[cfg(feature = "alloc")]
3546    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3547    #[inline]
3548    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3549    where
3550        Self: KnownLayout<PointerMetadata = usize>,
3551    {
3552        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3553        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3554        // (and, consequently, the `Box` derived from it) is a valid instance of
3555        // `Self`, because `Self` is `FromZeros`.
3556        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3557    }
3558
3559    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3560    #[doc(hidden)]
3561    #[cfg(feature = "alloc")]
3562    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3563    #[must_use = "has no side effects (other than allocation)"]
3564    #[inline(always)]
3565    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3566    where
3567        Self: Sized,
3568    {
3569        <[Self]>::new_box_zeroed_with_elems(len)
3570    }
3571
3572    /// Creates a `Vec<Self>` from zeroed bytes.
3573    ///
3574    /// This function is useful for allocating large values of `Vec`s and
3575    /// zero-initializing them, without ever creating a temporary instance of
3576    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3577    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3578    /// heap; it does not require storing intermediate values on the stack.
3579    ///
3580    /// On systems that use a heap implementation that supports allocating from
3581    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3582    ///
3583    /// If `Self` is a zero-sized type, then this function will return a
3584    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3585    /// actual information, but its `len()` property will report the correct
3586    /// value.
3587    ///
3588    /// # Errors
3589    ///
3590    /// Returns an error on allocation failure. Allocation failure is
3591    /// guaranteed never to cause a panic or an abort.
3592    #[must_use = "has no side effects (other than allocation)"]
3593    #[cfg(feature = "alloc")]
3594    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3595    #[inline(always)]
3596    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3597    where
3598        Self: Sized,
3599    {
3600        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3601    }
3602
3603    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3604    /// the vector. The new items are initialized with zeros.
3605    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3606    #[cfg(feature = "alloc")]
3607    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3608    #[inline(always)]
3609    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3610    where
3611        Self: Sized,
3612    {
3613        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3614        // panic condition is not satisfied.
3615        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3616    }
3617
3618    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3619    /// items are initialized with zeros.
3620    ///
3621    /// # Panics
3622    ///
3623    /// Panics if `position > v.len()`.
3624    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3625    #[cfg(feature = "alloc")]
3626    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3627    #[inline]
3628    fn insert_vec_zeroed(
3629        v: &mut Vec<Self>,
3630        position: usize,
3631        additional: usize,
3632    ) -> Result<(), AllocError>
3633    where
3634        Self: Sized,
3635    {
3636        assert!(position <= v.len());
3637        // We only conditionally compile on versions on which `try_reserve` is
3638        // stable; the Clippy lint is a false positive.
3639        v.try_reserve(additional).map_err(|_| AllocError)?;
3640        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3641        // * `ptr.add(position)`
3642        // * `position + additional`
3643        // * `v.len() + additional`
3644        //
3645        // `v.len() - position` cannot overflow because we asserted that
3646        // `position <= v.len()`.
3647        #[allow(clippy::multiple_unsafe_ops_per_block)]
3648        unsafe {
3649            // This is a potentially overlapping copy.
3650            let ptr = v.as_mut_ptr();
3651            #[allow(clippy::arithmetic_side_effects)]
3652            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3653            ptr.add(position).write_bytes(0, additional);
3654            #[allow(clippy::arithmetic_side_effects)]
3655            v.set_len(v.len() + additional);
3656        }
3657
3658        Ok(())
3659    }
3660}
3661
3662/// Analyzes whether a type is [`FromBytes`].
3663///
3664/// This derive analyzes, at compile time, whether the annotated type satisfies
3665/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3666/// supertraits if it is sound to do so. This derive can be applied to structs,
3667/// enums, and unions;
3668/// e.g.:
3669///
3670/// ```
3671/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3672/// #[derive(FromBytes)]
3673/// struct MyStruct {
3674/// # /*
3675///     ...
3676/// # */
3677/// }
3678///
3679/// #[derive(FromBytes)]
3680/// #[repr(u8)]
3681/// enum MyEnum {
3682/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3683/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3684/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3685/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3686/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3687/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3688/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3689/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3690/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3691/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3692/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3693/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3694/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3695/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3696/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3697/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3698/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3699/// #   VFF,
3700/// # /*
3701///     ...
3702/// # */
3703/// }
3704///
3705/// #[derive(FromBytes, Immutable)]
3706/// union MyUnion {
3707/// #   variant: u8,
3708/// # /*
3709///     ...
3710/// # */
3711/// }
3712/// ```
3713///
3714/// [safety conditions]: trait@FromBytes#safety
3715///
3716/// # Analysis
3717///
3718/// *This section describes, roughly, the analysis performed by this derive to
3719/// determine whether it is sound to implement `FromBytes` for a given type.
3720/// Unless you are modifying the implementation of this derive, or attempting to
3721/// manually implement `FromBytes` for a type yourself, you don't need to read
3722/// this section.*
3723///
3724/// If a type has the following properties, then this derive can implement
3725/// `FromBytes` for that type:
3726///
3727/// - If the type is a struct, all of its fields must be `FromBytes`.
3728/// - If the type is an enum:
3729///   - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3730///     or `i16`.
3731///   - The maximum number of discriminants must be used (so that every possible
3732///     bit pattern is a valid one).
3733///   - Its fields must be `FromBytes`.
3734///
3735/// This analysis is subject to change. Unsafe code may *only* rely on the
3736/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3737/// implementation details of this derive.
3738///
3739/// ## Why isn't an explicit representation required for structs?
3740///
3741/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3742/// that structs are marked with `#[repr(C)]`.
3743///
3744/// Per the [Rust reference](reference),
3745///
3746/// > The representation of a type can change the padding between fields, but
3747/// > does not change the layout of the fields themselves.
3748///
3749/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3750///
3751/// Since the layout of structs only consists of padding bytes and field bytes,
3752/// a struct is soundly `FromBytes` if:
3753/// 1. its padding is soundly `FromBytes`, and
3754/// 2. its fields are soundly `FromBytes`.
3755///
3756/// The answer to the first question is always yes: padding bytes do not have
3757/// any validity constraints. A [discussion] of this question in the Unsafe Code
3758/// Guidelines Working Group concluded that it would be virtually unimaginable
3759/// for future versions of rustc to add validity constraints to padding bytes.
3760///
3761/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3762///
3763/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3764/// its fields are `FromBytes`.
3765#[cfg(any(feature = "derive", test))]
3766#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3767pub use zerocopy_derive::FromBytes;
3768
3769/// Types for which any bit pattern is valid.
3770///
3771/// Any memory region of the appropriate length which contains initialized bytes
3772/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3773/// useful for efficiently parsing bytes as structured data.
3774///
3775/// # Warning: Padding bytes
3776///
3777/// Note that, when a value is moved or copied, only the non-padding bytes of
3778/// that value are guaranteed to be preserved. It is unsound to assume that
3779/// values written to padding bytes are preserved after a move or copy. For
3780/// example, the following is unsound:
3781///
3782/// ```rust,no_run
3783/// use core::mem::{size_of, transmute};
3784/// use zerocopy::FromZeros;
3785/// # use zerocopy_derive::*;
3786///
3787/// // Assume `Foo` is a type with padding bytes.
3788/// #[derive(FromZeros, Default)]
3789/// struct Foo {
3790/// # /*
3791///     ...
3792/// # */
3793/// }
3794///
3795/// let mut foo: Foo = Foo::default();
3796/// FromZeros::zero(&mut foo);
3797/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3798/// // those writes are not guaranteed to be preserved in padding bytes when
3799/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3800/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3801/// ```
3802///
3803/// # Implementation
3804///
3805/// **Do not implement this trait yourself!** Instead, use
3806/// [`#[derive(FromBytes)]`][derive]; e.g.:
3807///
3808/// ```
3809/// # use zerocopy_derive::{FromBytes, Immutable};
3810/// #[derive(FromBytes)]
3811/// struct MyStruct {
3812/// # /*
3813///     ...
3814/// # */
3815/// }
3816///
3817/// #[derive(FromBytes)]
3818/// #[repr(u8)]
3819/// enum MyEnum {
3820/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3821/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3822/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3823/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3824/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3825/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3826/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3827/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3828/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3829/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3830/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3831/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3832/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3833/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3834/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3835/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3836/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3837/// #   VFF,
3838/// # /*
3839///     ...
3840/// # */
3841/// }
3842///
3843/// #[derive(FromBytes, Immutable)]
3844/// union MyUnion {
3845/// #   variant: u8,
3846/// # /*
3847///     ...
3848/// # */
3849/// }
3850/// ```
3851///
3852/// This derive performs a sophisticated, compile-time safety analysis to
3853/// determine whether a type is `FromBytes`.
3854///
3855/// # Safety
3856///
3857/// *This section describes what is required in order for `T: FromBytes`, and
3858/// what unsafe code may assume of such types. If you don't plan on implementing
3859/// `FromBytes` manually, and you don't plan on writing unsafe code that
3860/// operates on `FromBytes` types, then you don't need to read this section.*
3861///
3862/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3863/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3864/// words, any byte value which is not uninitialized). If a type is marked as
3865/// `FromBytes` which violates this contract, it may cause undefined behavior.
3866///
3867/// `#[derive(FromBytes)]` only permits [types which satisfy these
3868/// requirements][derive-analysis].
3869///
3870#[cfg_attr(
3871    feature = "derive",
3872    doc = "[derive]: zerocopy_derive::FromBytes",
3873    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3874)]
3875#[cfg_attr(
3876    not(feature = "derive"),
3877    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3878    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3879)]
3880#[cfg_attr(
3881    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3882    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3883)]
3884pub unsafe trait FromBytes: FromZeros {
3885    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3886    // safe.
3887    #[doc(hidden)]
3888    fn only_derive_is_allowed_to_implement_this_trait()
3889    where
3890        Self: Sized;
3891
3892    /// Interprets the given `source` as a `&Self`.
3893    ///
3894    /// This method attempts to return a reference to `source` interpreted as a
3895    /// `Self`. If the length of `source` is not a [valid size of
3896    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3897    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3898    /// [infallibly discard the alignment error][size-error-from].
3899    ///
3900    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3901    ///
3902    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3903    /// [self-unaligned]: Unaligned
3904    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3905    /// [slice-dst]: KnownLayout#dynamically-sized-types
3906    ///
3907    /// # Compile-Time Assertions
3908    ///
3909    /// This method cannot yet be used on unsized types whose dynamically-sized
3910    /// component is zero-sized. Attempting to use this method on such types
3911    /// results in a compile-time assertion error; e.g.:
3912    ///
3913    /// ```compile_fail,E0080
3914    /// use zerocopy::*;
3915    /// # use zerocopy_derive::*;
3916    ///
3917    /// #[derive(FromBytes, Immutable, KnownLayout)]
3918    /// #[repr(C)]
3919    /// struct ZSTy {
3920    ///     leading_sized: u16,
3921    ///     trailing_dst: [()],
3922    /// }
3923    ///
3924    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3925    /// ```
3926    ///
3927    /// # Examples
3928    ///
3929    /// ```
3930    /// use zerocopy::FromBytes;
3931    /// # use zerocopy_derive::*;
3932    ///
3933    /// #[derive(FromBytes, KnownLayout, Immutable)]
3934    /// #[repr(C)]
3935    /// struct PacketHeader {
3936    ///     src_port: [u8; 2],
3937    ///     dst_port: [u8; 2],
3938    ///     length: [u8; 2],
3939    ///     checksum: [u8; 2],
3940    /// }
3941    ///
3942    /// #[derive(FromBytes, KnownLayout, Immutable)]
3943    /// #[repr(C)]
3944    /// struct Packet {
3945    ///     header: PacketHeader,
3946    ///     body: [u8],
3947    /// }
3948    ///
3949    /// // These bytes encode a `Packet`.
3950    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3951    ///
3952    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3953    ///
3954    /// assert_eq!(packet.header.src_port, [0, 1]);
3955    /// assert_eq!(packet.header.dst_port, [2, 3]);
3956    /// assert_eq!(packet.header.length, [4, 5]);
3957    /// assert_eq!(packet.header.checksum, [6, 7]);
3958    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3959    /// ```
3960    ///
3961    #[doc = codegen_header!("ref_from_bytes")]
3962    ///
3963    /// This abstraction for reinterpreting a buffer of bytes as a structured
3964    /// type is safe and cheap, but does not necessarily have zero runtime cost.
3965    /// The below code generation benchmark exercises this routine on a
3966    /// destination type whose complex layout places complex requirements on the
3967    /// source:
3968    ///
3969    /// - the source must begin an even memory address
3970    /// - the source has a minimum length of 4 bytes
3971    /// - the source has a total length divisible by 2
3972    ///
3973    /// These conditions must all be checked at runtime in this example, but the
3974    /// codegen you experience in practice will depend on optimization level,
3975    /// the layout of the destination type, and what the compiler can prove
3976    /// about the source.
3977    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_bytes")]
3978    #[must_use = "has no side effects"]
3979    #[inline]
3980    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3981    where
3982        Self: KnownLayout + Immutable,
3983    {
3984        static_assert_dst_is_not_zst!(Self);
3985        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3986            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3987            Err(err) => Err(err.map_src(|src| src.as_ref())),
3988        }
3989    }
3990
3991    /// Interprets the prefix of the given `source` as a `&Self` without
3992    /// copying.
3993    ///
3994    /// This method computes the [largest possible size of `Self`][valid-size]
3995    /// that can fit in the leading bytes of `source`, then attempts to return
3996    /// both a reference to those bytes interpreted as a `Self`, and a reference
3997    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3998    /// is not appropriately aligned, this returns `Err`. If [`Self:
3999    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4000    /// error][size-error-from].
4001    ///
4002    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4003    ///
4004    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4005    /// [self-unaligned]: Unaligned
4006    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4007    /// [slice-dst]: KnownLayout#dynamically-sized-types
4008    ///
4009    /// # Compile-Time Assertions
4010    ///
4011    /// This method cannot yet be used on unsized types whose dynamically-sized
4012    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
4013    /// support such types. Attempting to use this method on such types results
4014    /// in a compile-time assertion error; e.g.:
4015    ///
4016    /// ```compile_fail,E0080
4017    /// use zerocopy::*;
4018    /// # use zerocopy_derive::*;
4019    ///
4020    /// #[derive(FromBytes, Immutable, KnownLayout)]
4021    /// #[repr(C)]
4022    /// struct ZSTy {
4023    ///     leading_sized: u16,
4024    ///     trailing_dst: [()],
4025    /// }
4026    ///
4027    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
4028    /// ```
4029    ///
4030    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
4031    ///
4032    /// # Examples
4033    ///
4034    /// ```
4035    /// use zerocopy::FromBytes;
4036    /// # use zerocopy_derive::*;
4037    ///
4038    /// #[derive(FromBytes, KnownLayout, Immutable)]
4039    /// #[repr(C)]
4040    /// struct PacketHeader {
4041    ///     src_port: [u8; 2],
4042    ///     dst_port: [u8; 2],
4043    ///     length: [u8; 2],
4044    ///     checksum: [u8; 2],
4045    /// }
4046    ///
4047    /// #[derive(FromBytes, KnownLayout, Immutable)]
4048    /// #[repr(C)]
4049    /// struct Packet {
4050    ///     header: PacketHeader,
4051    ///     body: [[u8; 2]],
4052    /// }
4053    ///
4054    /// // These are more bytes than are needed to encode a `Packet`.
4055    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
4056    ///
4057    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
4058    ///
4059    /// assert_eq!(packet.header.src_port, [0, 1]);
4060    /// assert_eq!(packet.header.dst_port, [2, 3]);
4061    /// assert_eq!(packet.header.length, [4, 5]);
4062    /// assert_eq!(packet.header.checksum, [6, 7]);
4063    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
4064    /// assert_eq!(suffix, &[14u8][..]);
4065    /// ```
4066    ///
4067    #[doc = codegen_header!("ref_from_prefix")]
4068    ///
4069    /// This abstraction for reinterpreting a buffer of bytes as a structured
4070    /// type is safe and cheap, but does not necessarily have zero runtime cost.
4071    /// The below code generation benchmark exercises this routine on a
4072    /// destination type whose complex layout places complex requirements on the
4073    /// source:
4074    ///
4075    /// - the source must begin an even memory address
4076    /// - the source has a minimum length of 4 bytes
4077    ///
4078    /// These conditions must all be checked at runtime in this example, but the
4079    /// codegen you experience in practice will depend on optimization level,
4080    /// the layout of the destination type, and what the compiler can prove
4081    /// about the source.
4082    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_prefix")]
4083    #[must_use = "has no side effects"]
4084    #[inline]
4085    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4086    where
4087        Self: KnownLayout + Immutable,
4088    {
4089        static_assert_dst_is_not_zst!(Self);
4090        ref_from_prefix_suffix(source, None, CastType::Prefix)
4091    }
4092
4093    /// Interprets the suffix of the given bytes as a `&Self`.
4094    ///
4095    /// This method computes the [largest possible size of `Self`][valid-size]
4096    /// that can fit in the trailing bytes of `source`, then attempts to return
4097    /// both a reference to those bytes interpreted as a `Self`, and a reference
4098    /// to the preceding bytes. If there are insufficient bytes, or if that
4099    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4100    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4101    /// alignment error][size-error-from].
4102    ///
4103    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4104    ///
4105    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4106    /// [self-unaligned]: Unaligned
4107    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4108    /// [slice-dst]: KnownLayout#dynamically-sized-types
4109    ///
4110    /// # Compile-Time Assertions
4111    ///
4112    /// This method cannot yet be used on unsized types whose dynamically-sized
4113    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
4114    /// support such types. Attempting to use this method on such types results
4115    /// in a compile-time assertion error; e.g.:
4116    ///
4117    /// ```compile_fail,E0080
4118    /// use zerocopy::*;
4119    /// # use zerocopy_derive::*;
4120    ///
4121    /// #[derive(FromBytes, Immutable, KnownLayout)]
4122    /// #[repr(C)]
4123    /// struct ZSTy {
4124    ///     leading_sized: u16,
4125    ///     trailing_dst: [()],
4126    /// }
4127    ///
4128    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
4129    /// ```
4130    ///
4131    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
4132    ///
4133    /// # Examples
4134    ///
4135    /// ```
4136    /// use zerocopy::FromBytes;
4137    /// # use zerocopy_derive::*;
4138    ///
4139    /// #[derive(FromBytes, Immutable, KnownLayout)]
4140    /// #[repr(C)]
4141    /// struct PacketTrailer {
4142    ///     frame_check_sequence: [u8; 4],
4143    /// }
4144    ///
4145    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4146    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4147    ///
4148    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
4149    ///
4150    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
4151    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4152    /// ```
4153    ///
4154    #[doc = codegen_header!("ref_from_suffix")]
4155    ///
4156    /// This abstraction for reinterpreting a buffer of bytes as a structured
4157    /// type is safe and cheap, but does not necessarily have zero runtime cost.
4158    /// The below code generation benchmark exercises this routine on a
4159    /// destination type whose complex layout places complex requirements on the
4160    /// source's largest [valid-size] suffix for `Self`, which must:
4161    ///
4162    /// - begin at an even memory address
4163    /// - have a minimum length of 4 bytes
4164    ///
4165    /// These conditions must all be checked at runtime in this example, but the
4166    /// codegen you experience in practice will depend on optimization level,
4167    /// the layout of the destination type, and what the compiler can prove
4168    /// about the source.
4169    ///
4170    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4171    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_suffix")]
4172    #[must_use = "has no side effects"]
4173    #[inline]
4174    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4175    where
4176        Self: Immutable + KnownLayout,
4177    {
4178        static_assert_dst_is_not_zst!(Self);
4179        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4180    }
4181
4182    /// Interprets the given `source` as a `&mut Self`.
4183    ///
4184    /// This method attempts to return a reference to `source` interpreted as a
4185    /// `Self`. If the length of `source` is not a [valid size of
4186    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
4187    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
4188    /// [infallibly discard the alignment error][size-error-from].
4189    ///
4190    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4191    ///
4192    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4193    /// [self-unaligned]: Unaligned
4194    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4195    /// [slice-dst]: KnownLayout#dynamically-sized-types
4196    ///
4197    /// # Compile-Time Assertions
4198    ///
4199    /// This method cannot yet be used on unsized types whose dynamically-sized
4200    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
4201    /// support such types. Attempting to use this method on such types results
4202    /// in a compile-time assertion error; e.g.:
4203    ///
4204    /// ```compile_fail,E0080
4205    /// use zerocopy::*;
4206    /// # use zerocopy_derive::*;
4207    ///
4208    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4209    /// #[repr(C, packed)]
4210    /// struct ZSTy {
4211    ///     leading_sized: [u8; 2],
4212    ///     trailing_dst: [()],
4213    /// }
4214    ///
4215    /// let mut source = [85, 85];
4216    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
4217    /// ```
4218    ///
4219    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
4220    ///
4221    /// # Examples
4222    ///
4223    /// ```
4224    /// use zerocopy::FromBytes;
4225    /// # use zerocopy_derive::*;
4226    ///
4227    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4228    /// #[repr(C)]
4229    /// struct PacketHeader {
4230    ///     src_port: [u8; 2],
4231    ///     dst_port: [u8; 2],
4232    ///     length: [u8; 2],
4233    ///     checksum: [u8; 2],
4234    /// }
4235    ///
4236    /// // These bytes encode a `PacketHeader`.
4237    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4238    ///
4239    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
4240    ///
4241    /// assert_eq!(header.src_port, [0, 1]);
4242    /// assert_eq!(header.dst_port, [2, 3]);
4243    /// assert_eq!(header.length, [4, 5]);
4244    /// assert_eq!(header.checksum, [6, 7]);
4245    ///
4246    /// header.checksum = [0, 0];
4247    ///
4248    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4249    ///
4250    /// ```
4251    ///
4252    #[doc = codegen_header!("mut_from_bytes")]
4253    ///
4254    /// See [`FromBytes::ref_from_bytes`](#method.ref_from_bytes.codegen).
4255    #[must_use = "has no side effects"]
4256    #[inline]
4257    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4258    where
4259        Self: IntoBytes + KnownLayout,
4260    {
4261        static_assert_dst_is_not_zst!(Self);
4262        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4263            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4264            Err(err) => Err(err.map_src(|src| src.as_mut())),
4265        }
4266    }
4267
4268    /// Interprets the prefix of the given `source` as a `&mut Self` without
4269    /// copying.
4270    ///
4271    /// This method computes the [largest possible size of `Self`][valid-size]
4272    /// that can fit in the leading bytes of `source`, then attempts to return
4273    /// both a reference to those bytes interpreted as a `Self`, and a reference
4274    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4275    /// is not appropriately aligned, this returns `Err`. If [`Self:
4276    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4277    /// error][size-error-from].
4278    ///
4279    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4280    ///
4281    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4282    /// [self-unaligned]: Unaligned
4283    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4284    /// [slice-dst]: KnownLayout#dynamically-sized-types
4285    ///
4286    /// # Compile-Time Assertions
4287    ///
4288    /// This method cannot yet be used on unsized types whose dynamically-sized
4289    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4290    /// support such types. Attempting to use this method on such types results
4291    /// in a compile-time assertion error; e.g.:
4292    ///
4293    /// ```compile_fail,E0080
4294    /// use zerocopy::*;
4295    /// # use zerocopy_derive::*;
4296    ///
4297    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4298    /// #[repr(C, packed)]
4299    /// struct ZSTy {
4300    ///     leading_sized: [u8; 2],
4301    ///     trailing_dst: [()],
4302    /// }
4303    ///
4304    /// let mut source = [85, 85];
4305    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
4306    /// ```
4307    ///
4308    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4309    ///
4310    /// # Examples
4311    ///
4312    /// ```
4313    /// use zerocopy::FromBytes;
4314    /// # use zerocopy_derive::*;
4315    ///
4316    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4317    /// #[repr(C)]
4318    /// struct PacketHeader {
4319    ///     src_port: [u8; 2],
4320    ///     dst_port: [u8; 2],
4321    ///     length: [u8; 2],
4322    ///     checksum: [u8; 2],
4323    /// }
4324    ///
4325    /// // These are more bytes than are needed to encode a `PacketHeader`.
4326    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4327    ///
4328    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4329    ///
4330    /// assert_eq!(header.src_port, [0, 1]);
4331    /// assert_eq!(header.dst_port, [2, 3]);
4332    /// assert_eq!(header.length, [4, 5]);
4333    /// assert_eq!(header.checksum, [6, 7]);
4334    /// assert_eq!(body, &[8, 9][..]);
4335    ///
4336    /// header.checksum = [0, 0];
4337    /// body.fill(1);
4338    ///
4339    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4340    /// ```
4341    ///
4342    #[doc = codegen_header!("mut_from_prefix")]
4343    ///
4344    /// See [`FromBytes::ref_from_prefix`](#method.ref_from_prefix.codegen).
4345    #[must_use = "has no side effects"]
4346    #[inline]
4347    fn mut_from_prefix(
4348        source: &mut [u8],
4349    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4350    where
4351        Self: IntoBytes + KnownLayout,
4352    {
4353        static_assert_dst_is_not_zst!(Self);
4354        mut_from_prefix_suffix(source, None, CastType::Prefix)
4355    }
4356
4357    /// Interprets the suffix of the given `source` as a `&mut Self` without
4358    /// copying.
4359    ///
4360    /// This method computes the [largest possible size of `Self`][valid-size]
4361    /// that can fit in the trailing bytes of `source`, then attempts to return
4362    /// both a reference to those bytes interpreted as a `Self`, and a reference
4363    /// to the preceding bytes. If there are insufficient bytes, or if that
4364    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4365    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4366    /// alignment error][size-error-from].
4367    ///
4368    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4369    ///
4370    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4371    /// [self-unaligned]: Unaligned
4372    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4373    /// [slice-dst]: KnownLayout#dynamically-sized-types
4374    ///
4375    /// # Compile-Time Assertions
4376    ///
4377    /// This method cannot yet be used on unsized types whose dynamically-sized
4378    /// component is zero-sized. Attempting to use this method on such types
4379    /// results in a compile-time assertion error; e.g.:
4380    ///
4381    /// ```compile_fail,E0080
4382    /// use zerocopy::*;
4383    /// # use zerocopy_derive::*;
4384    ///
4385    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4386    /// #[repr(C, packed)]
4387    /// struct ZSTy {
4388    ///     leading_sized: [u8; 2],
4389    ///     trailing_dst: [()],
4390    /// }
4391    ///
4392    /// let mut source = [85, 85];
4393    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
4394    /// ```
4395    ///
4396    /// # Examples
4397    ///
4398    /// ```
4399    /// use zerocopy::FromBytes;
4400    /// # use zerocopy_derive::*;
4401    ///
4402    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4403    /// #[repr(C)]
4404    /// struct PacketTrailer {
4405    ///     frame_check_sequence: [u8; 4],
4406    /// }
4407    ///
4408    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4409    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4410    ///
4411    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4412    ///
4413    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4414    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4415    ///
4416    /// prefix.fill(0);
4417    /// trailer.frame_check_sequence.fill(1);
4418    ///
4419    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4420    /// ```
4421    ///
4422    #[doc = codegen_header!("mut_from_suffix")]
4423    ///
4424    /// See [`FromBytes::ref_from_suffix`](#method.ref_from_suffix.codegen).
4425    #[must_use = "has no side effects"]
4426    #[inline]
4427    fn mut_from_suffix(
4428        source: &mut [u8],
4429    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4430    where
4431        Self: IntoBytes + KnownLayout,
4432    {
4433        static_assert_dst_is_not_zst!(Self);
4434        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4435    }
4436
4437    /// Interprets the given `source` as a `&Self` with a DST length equal to
4438    /// `count`.
4439    ///
4440    /// This method attempts to return a reference to `source` interpreted as a
4441    /// `Self` with `count` trailing elements. If the length of `source` is not
4442    /// equal to the size of `Self` with `count` elements, or if `source` is not
4443    /// appropriately aligned, this returns `Err`. If [`Self:
4444    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4445    /// error][size-error-from].
4446    ///
4447    /// [self-unaligned]: Unaligned
4448    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4449    ///
4450    /// # Examples
4451    ///
4452    /// ```
4453    /// use zerocopy::FromBytes;
4454    /// # use zerocopy_derive::*;
4455    ///
4456    /// # #[derive(Debug, PartialEq, Eq)]
4457    /// #[derive(FromBytes, Immutable)]
4458    /// #[repr(C)]
4459    /// struct Pixel {
4460    ///     r: u8,
4461    ///     g: u8,
4462    ///     b: u8,
4463    ///     a: u8,
4464    /// }
4465    ///
4466    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4467    ///
4468    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4469    ///
4470    /// assert_eq!(pixels, &[
4471    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4472    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4473    /// ]);
4474    ///
4475    /// ```
4476    ///
4477    /// Since an explicit `count` is provided, this method supports types with
4478    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4479    /// which do not take an explicit count do not support such types.
4480    ///
4481    /// ```
4482    /// use zerocopy::*;
4483    /// # use zerocopy_derive::*;
4484    ///
4485    /// #[derive(FromBytes, Immutable, KnownLayout)]
4486    /// #[repr(C)]
4487    /// struct ZSTy {
4488    ///     leading_sized: [u8; 2],
4489    ///     trailing_dst: [()],
4490    /// }
4491    ///
4492    /// let src = &[85, 85][..];
4493    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4494    /// assert_eq!(zsty.trailing_dst.len(), 42);
4495    /// ```
4496    ///
4497    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4498    ///
4499    #[doc = codegen_header!("ref_from_bytes_with_elems")]
4500    ///
4501    /// This abstraction for reinterpreting a buffer of bytes as a structured
4502    /// type is safe and cheap, but does not necessarily have zero runtime cost.
4503    /// The below code generation benchmark exercises this routine on a
4504    /// destination type whose complex layout places complex requirements on the
4505    /// source:
4506    ///
4507    /// - the source must begin an even memory address
4508    /// - the source has a total length that exactly fits a `Self` with a
4509    ///   trailing slice length of `elems`
4510    ///
4511    /// These conditions must all be checked at runtime in this example, but the
4512    /// codegen you experience in practice will depend on optimization level,
4513    /// the layout of the destination type, and what the compiler can prove
4514    /// about the source.
4515    ///
4516    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_bytes_with_elems")]
4517    #[must_use = "has no side effects"]
4518    #[inline]
4519    fn ref_from_bytes_with_elems(
4520        source: &[u8],
4521        count: usize,
4522    ) -> Result<&Self, CastError<&[u8], Self>>
4523    where
4524        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4525    {
4526        let source = Ptr::from_ref(source);
4527        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4528        match maybe_slf {
4529            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4530            Err(err) => Err(err.map_src(|s| s.as_ref())),
4531        }
4532    }
4533
4534    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4535    /// equal to `count`.
4536    ///
4537    /// This method attempts to return a reference to the prefix of `source`
4538    /// interpreted as a `Self` with `count` trailing elements, and a reference
4539    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4540    /// is not appropriately aligned, this returns `Err`. If [`Self:
4541    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4542    /// error][size-error-from].
4543    ///
4544    /// [self-unaligned]: Unaligned
4545    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4546    ///
4547    /// # Examples
4548    ///
4549    /// ```
4550    /// use zerocopy::FromBytes;
4551    /// # use zerocopy_derive::*;
4552    ///
4553    /// # #[derive(Debug, PartialEq, Eq)]
4554    /// #[derive(FromBytes, Immutable)]
4555    /// #[repr(C)]
4556    /// struct Pixel {
4557    ///     r: u8,
4558    ///     g: u8,
4559    ///     b: u8,
4560    ///     a: u8,
4561    /// }
4562    ///
4563    /// // These are more bytes than are needed to encode two `Pixel`s.
4564    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4565    ///
4566    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4567    ///
4568    /// assert_eq!(pixels, &[
4569    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4570    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4571    /// ]);
4572    ///
4573    /// assert_eq!(suffix, &[8, 9]);
4574    /// ```
4575    ///
4576    /// Since an explicit `count` is provided, this method supports types with
4577    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4578    /// which do not take an explicit count do not support such types.
4579    ///
4580    /// ```
4581    /// use zerocopy::*;
4582    /// # use zerocopy_derive::*;
4583    ///
4584    /// #[derive(FromBytes, Immutable, KnownLayout)]
4585    /// #[repr(C)]
4586    /// struct ZSTy {
4587    ///     leading_sized: [u8; 2],
4588    ///     trailing_dst: [()],
4589    /// }
4590    ///
4591    /// let src = &[85, 85][..];
4592    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4593    /// assert_eq!(zsty.trailing_dst.len(), 42);
4594    /// ```
4595    ///
4596    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4597    ///
4598    #[doc = codegen_header!("ref_from_prefix_with_elems")]
4599    ///
4600    /// This abstraction for reinterpreting a buffer of bytes as a structured
4601    /// type is safe and cheap, but does not necessarily have zero runtime cost.
4602    /// The below code generation benchmark exercises this routine on a
4603    /// destination type whose complex layout places complex requirements on the
4604    /// source:
4605    ///
4606    /// - the source must begin an even memory address
4607    /// - the source has a prefix that fits a `Self` with a trailing slice
4608    ///   length of `count`
4609    ///
4610    /// These conditions must all be checked at runtime in this example, but the
4611    /// codegen you experience in practice will depend on optimization level,
4612    /// the layout of the destination type, and what the compiler can prove
4613    /// about the source.
4614    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_prefix_with_elems")]
4615    #[must_use = "has no side effects"]
4616    #[inline]
4617    fn ref_from_prefix_with_elems(
4618        source: &[u8],
4619        count: usize,
4620    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4621    where
4622        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4623    {
4624        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4625    }
4626
4627    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4628    /// equal to `count`.
4629    ///
4630    /// This method attempts to return a reference to the suffix of `source`
4631    /// interpreted as a `Self` with `count` trailing elements, and a reference
4632    /// to the preceding bytes. If there are insufficient bytes, or if that
4633    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4634    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4635    /// alignment error][size-error-from].
4636    ///
4637    /// [self-unaligned]: Unaligned
4638    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4639    ///
4640    /// # Examples
4641    ///
4642    /// ```
4643    /// use zerocopy::FromBytes;
4644    /// # use zerocopy_derive::*;
4645    ///
4646    /// # #[derive(Debug, PartialEq, Eq)]
4647    /// #[derive(FromBytes, Immutable)]
4648    /// #[repr(C)]
4649    /// struct Pixel {
4650    ///     r: u8,
4651    ///     g: u8,
4652    ///     b: u8,
4653    ///     a: u8,
4654    /// }
4655    ///
4656    /// // These are more bytes than are needed to encode two `Pixel`s.
4657    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4658    ///
4659    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4660    ///
4661    /// assert_eq!(prefix, &[0, 1]);
4662    ///
4663    /// assert_eq!(pixels, &[
4664    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4665    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4666    /// ]);
4667    /// ```
4668    ///
4669    /// Since an explicit `count` is provided, this method supports types with
4670    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4671    /// which do not take an explicit count do not support such types.
4672    ///
4673    /// ```
4674    /// use zerocopy::*;
4675    /// # use zerocopy_derive::*;
4676    ///
4677    /// #[derive(FromBytes, Immutable, KnownLayout)]
4678    /// #[repr(C)]
4679    /// struct ZSTy {
4680    ///     leading_sized: [u8; 2],
4681    ///     trailing_dst: [()],
4682    /// }
4683    ///
4684    /// let src = &[85, 85][..];
4685    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4686    /// assert_eq!(zsty.trailing_dst.len(), 42);
4687    /// ```
4688    ///
4689    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4690    ///
4691    #[doc = codegen_header!("ref_from_suffix_with_elems")]
4692    ///
4693    /// This abstraction for reinterpreting a buffer of bytes as a structured
4694    /// type is safe and cheap, but does not necessarily have zero runtime cost.
4695    /// The below code generation benchmark exercises this routine on a
4696    /// destination type whose complex layout places complex requirements on the
4697    /// [valid-size] suffix for a `Self` of trailing slice length `count`, which
4698    /// must:
4699    ///
4700    /// - begin at an even memory address
4701    /// - have a minimum length of 4 bytes
4702    ///
4703    /// These conditions must all be checked at runtime in this example, but the
4704    /// codegen you experience in practice will depend on optimization level,
4705    /// the layout of the destination type, and what the compiler can prove
4706    /// about the source.
4707    ///
4708    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4709    #[doc = codegen_tabs!(format = "coco", bench = "ref_from_suffix_with_elems")]
4710    #[must_use = "has no side effects"]
4711    #[inline]
4712    fn ref_from_suffix_with_elems(
4713        source: &[u8],
4714        count: usize,
4715    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4716    where
4717        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4718    {
4719        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4720    }
4721
4722    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4723    /// to `count`.
4724    ///
4725    /// This method attempts to return a reference to `source` interpreted as a
4726    /// `Self` with `count` trailing elements. If the length of `source` is not
4727    /// equal to the size of `Self` with `count` elements, or if `source` is not
4728    /// appropriately aligned, this returns `Err`. If [`Self:
4729    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4730    /// error][size-error-from].
4731    ///
4732    /// [self-unaligned]: Unaligned
4733    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4734    ///
4735    /// # Examples
4736    ///
4737    /// ```
4738    /// use zerocopy::FromBytes;
4739    /// # use zerocopy_derive::*;
4740    ///
4741    /// # #[derive(Debug, PartialEq, Eq)]
4742    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4743    /// #[repr(C)]
4744    /// struct Pixel {
4745    ///     r: u8,
4746    ///     g: u8,
4747    ///     b: u8,
4748    ///     a: u8,
4749    /// }
4750    ///
4751    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4752    ///
4753    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4754    ///
4755    /// assert_eq!(pixels, &[
4756    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4757    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4758    /// ]);
4759    ///
4760    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4761    ///
4762    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4763    /// ```
4764    ///
4765    /// Since an explicit `count` is provided, this method supports types with
4766    /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4767    /// which do not take an explicit count do not support such types.
4768    ///
4769    /// ```
4770    /// use zerocopy::*;
4771    /// # use zerocopy_derive::*;
4772    ///
4773    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4774    /// #[repr(C, packed)]
4775    /// struct ZSTy {
4776    ///     leading_sized: [u8; 2],
4777    ///     trailing_dst: [()],
4778    /// }
4779    ///
4780    /// let src = &mut [85, 85][..];
4781    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4782    /// assert_eq!(zsty.trailing_dst.len(), 42);
4783    /// ```
4784    ///
4785    /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4786    ///
4787    #[doc = codegen_header!("mut_from_bytes_with_elems")]
4788    ///
4789    /// See [`TryFromBytes::ref_from_bytes_with_elems`](#method.ref_from_bytes_with_elems.codegen).
4790    #[must_use = "has no side effects"]
4791    #[inline]
4792    fn mut_from_bytes_with_elems(
4793        source: &mut [u8],
4794        count: usize,
4795    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4796    where
4797        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4798    {
4799        let source = Ptr::from_mut(source);
4800        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4801        match maybe_slf {
4802            Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4803            Err(err) => Err(err.map_src(|s| s.as_mut())),
4804        }
4805    }
4806
4807    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4808    /// length equal to `count`.
4809    ///
4810    /// This method attempts to return a reference to the prefix of `source`
4811    /// interpreted as a `Self` with `count` trailing elements, and a reference
4812    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4813    /// is not appropriately aligned, this returns `Err`. If [`Self:
4814    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4815    /// error][size-error-from].
4816    ///
4817    /// [self-unaligned]: Unaligned
4818    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4819    ///
4820    /// # Examples
4821    ///
4822    /// ```
4823    /// use zerocopy::FromBytes;
4824    /// # use zerocopy_derive::*;
4825    ///
4826    /// # #[derive(Debug, PartialEq, Eq)]
4827    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4828    /// #[repr(C)]
4829    /// struct Pixel {
4830    ///     r: u8,
4831    ///     g: u8,
4832    ///     b: u8,
4833    ///     a: u8,
4834    /// }
4835    ///
4836    /// // These are more bytes than are needed to encode two `Pixel`s.
4837    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4838    ///
4839    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4840    ///
4841    /// assert_eq!(pixels, &[
4842    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4843    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4844    /// ]);
4845    ///
4846    /// assert_eq!(suffix, &[8, 9]);
4847    ///
4848    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4849    /// suffix.fill(1);
4850    ///
4851    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4852    /// ```
4853    ///
4854    /// Since an explicit `count` is provided, this method supports types with
4855    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4856    /// which do not take an explicit count do not support such types.
4857    ///
4858    /// ```
4859    /// use zerocopy::*;
4860    /// # use zerocopy_derive::*;
4861    ///
4862    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4863    /// #[repr(C, packed)]
4864    /// struct ZSTy {
4865    ///     leading_sized: [u8; 2],
4866    ///     trailing_dst: [()],
4867    /// }
4868    ///
4869    /// let src = &mut [85, 85][..];
4870    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4871    /// assert_eq!(zsty.trailing_dst.len(), 42);
4872    /// ```
4873    ///
4874    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4875    ///
4876    #[doc = codegen_header!("mut_from_prefix_with_elems")]
4877    ///
4878    /// See [`TryFromBytes::ref_from_prefix_with_elems`](#method.ref_from_prefix_with_elems.codegen).
4879    #[must_use = "has no side effects"]
4880    #[inline]
4881    fn mut_from_prefix_with_elems(
4882        source: &mut [u8],
4883        count: usize,
4884    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4885    where
4886        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4887    {
4888        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4889    }
4890
4891    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4892    /// length equal to `count`.
4893    ///
4894    /// This method attempts to return a reference to the suffix of `source`
4895    /// interpreted as a `Self` with `count` trailing elements, and a reference
4896    /// to the remaining bytes. If there are insufficient bytes, or if that
4897    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4898    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4899    /// alignment error][size-error-from].
4900    ///
4901    /// [self-unaligned]: Unaligned
4902    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4903    ///
4904    /// # Examples
4905    ///
4906    /// ```
4907    /// use zerocopy::FromBytes;
4908    /// # use zerocopy_derive::*;
4909    ///
4910    /// # #[derive(Debug, PartialEq, Eq)]
4911    /// #[derive(FromBytes, IntoBytes, Immutable)]
4912    /// #[repr(C)]
4913    /// struct Pixel {
4914    ///     r: u8,
4915    ///     g: u8,
4916    ///     b: u8,
4917    ///     a: u8,
4918    /// }
4919    ///
4920    /// // These are more bytes than are needed to encode two `Pixel`s.
4921    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4922    ///
4923    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4924    ///
4925    /// assert_eq!(prefix, &[0, 1]);
4926    ///
4927    /// assert_eq!(pixels, &[
4928    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4929    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4930    /// ]);
4931    ///
4932    /// prefix.fill(9);
4933    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4934    ///
4935    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4936    /// ```
4937    ///
4938    /// Since an explicit `count` is provided, this method supports types with
4939    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4940    /// which do not take an explicit count do not support such types.
4941    ///
4942    /// ```
4943    /// use zerocopy::*;
4944    /// # use zerocopy_derive::*;
4945    ///
4946    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4947    /// #[repr(C, packed)]
4948    /// struct ZSTy {
4949    ///     leading_sized: [u8; 2],
4950    ///     trailing_dst: [()],
4951    /// }
4952    ///
4953    /// let src = &mut [85, 85][..];
4954    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4955    /// assert_eq!(zsty.trailing_dst.len(), 42);
4956    /// ```
4957    ///
4958    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4959    ///
4960    #[doc = codegen_header!("mut_from_suffix_with_elems")]
4961    ///
4962    /// See [`TryFromBytes::ref_from_suffix_with_elems`](#method.ref_from_suffix_with_elems.codegen).
4963    #[must_use = "has no side effects"]
4964    #[inline]
4965    fn mut_from_suffix_with_elems(
4966        source: &mut [u8],
4967        count: usize,
4968    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4969    where
4970        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4971    {
4972        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4973    }
4974
4975    /// Reads a copy of `Self` from the given `source`.
4976    ///
4977    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4978    ///
4979    /// # Examples
4980    ///
4981    /// ```
4982    /// use zerocopy::FromBytes;
4983    /// # use zerocopy_derive::*;
4984    ///
4985    /// #[derive(FromBytes)]
4986    /// #[repr(C)]
4987    /// struct PacketHeader {
4988    ///     src_port: [u8; 2],
4989    ///     dst_port: [u8; 2],
4990    ///     length: [u8; 2],
4991    ///     checksum: [u8; 2],
4992    /// }
4993    ///
4994    /// // These bytes encode a `PacketHeader`.
4995    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4996    ///
4997    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4998    ///
4999    /// assert_eq!(header.src_port, [0, 1]);
5000    /// assert_eq!(header.dst_port, [2, 3]);
5001    /// assert_eq!(header.length, [4, 5]);
5002    /// assert_eq!(header.checksum, [6, 7]);
5003    /// ```
5004    #[must_use = "has no side effects"]
5005    #[inline]
5006    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
5007    where
5008        Self: Sized,
5009    {
5010        match Ref::<_, Unalign<Self>>::sized_from(source) {
5011            Ok(r) => Ok(Ref::read(&r).into_inner()),
5012            Err(CastError::Size(e)) => Err(e.with_dst()),
5013            Err(CastError::Alignment(_)) => {
5014                // SAFETY: `Unalign<Self>` is trivially aligned, so
5015                // `Ref::sized_from` cannot fail due to unmet alignment
5016                // requirements.
5017                unsafe { core::hint::unreachable_unchecked() }
5018            }
5019            Err(CastError::Validity(i)) => match i {},
5020        }
5021    }
5022
5023    /// Reads a copy of `Self` from the prefix of the given `source`.
5024    ///
5025    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
5026    /// of `source`, returning that `Self` and any remaining bytes. If
5027    /// `source.len() < size_of::<Self>()`, it returns `Err`.
5028    ///
5029    /// # Examples
5030    ///
5031    /// ```
5032    /// use zerocopy::FromBytes;
5033    /// # use zerocopy_derive::*;
5034    ///
5035    /// #[derive(FromBytes)]
5036    /// #[repr(C)]
5037    /// struct PacketHeader {
5038    ///     src_port: [u8; 2],
5039    ///     dst_port: [u8; 2],
5040    ///     length: [u8; 2],
5041    ///     checksum: [u8; 2],
5042    /// }
5043    ///
5044    /// // These are more bytes than are needed to encode a `PacketHeader`.
5045    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5046    ///
5047    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
5048    ///
5049    /// assert_eq!(header.src_port, [0, 1]);
5050    /// assert_eq!(header.dst_port, [2, 3]);
5051    /// assert_eq!(header.length, [4, 5]);
5052    /// assert_eq!(header.checksum, [6, 7]);
5053    /// assert_eq!(body, [8, 9]);
5054    /// ```
5055    #[must_use = "has no side effects"]
5056    #[inline]
5057    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
5058    where
5059        Self: Sized,
5060    {
5061        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
5062            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
5063            Err(CastError::Size(e)) => Err(e.with_dst()),
5064            Err(CastError::Alignment(_)) => {
5065                // SAFETY: `Unalign<Self>` is trivially aligned, so
5066                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
5067                // requirements.
5068                unsafe { core::hint::unreachable_unchecked() }
5069            }
5070            Err(CastError::Validity(i)) => match i {},
5071        }
5072    }
5073
5074    /// Reads a copy of `Self` from the suffix of the given `source`.
5075    ///
5076    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
5077    /// of `source`, returning that `Self` and any preceding bytes. If
5078    /// `source.len() < size_of::<Self>()`, it returns `Err`.
5079    ///
5080    /// # Examples
5081    ///
5082    /// ```
5083    /// use zerocopy::FromBytes;
5084    /// # use zerocopy_derive::*;
5085    ///
5086    /// #[derive(FromBytes)]
5087    /// #[repr(C)]
5088    /// struct PacketTrailer {
5089    ///     frame_check_sequence: [u8; 4],
5090    /// }
5091    ///
5092    /// // These are more bytes than are needed to encode a `PacketTrailer`.
5093    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5094    ///
5095    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
5096    ///
5097    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
5098    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
5099    /// ```
5100    #[must_use = "has no side effects"]
5101    #[inline]
5102    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
5103    where
5104        Self: Sized,
5105    {
5106        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
5107            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
5108            Err(CastError::Size(e)) => Err(e.with_dst()),
5109            Err(CastError::Alignment(_)) => {
5110                // SAFETY: `Unalign<Self>` is trivially aligned, so
5111                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
5112                // requirements.
5113                unsafe { core::hint::unreachable_unchecked() }
5114            }
5115            Err(CastError::Validity(i)) => match i {},
5116        }
5117    }
5118
5119    /// Reads a copy of `self` from an `io::Read`.
5120    ///
5121    /// This is useful for interfacing with operating system byte sinks (files,
5122    /// sockets, etc.).
5123    ///
5124    /// # Examples
5125    ///
5126    /// ```no_run
5127    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
5128    /// use std::fs::File;
5129    /// # use zerocopy_derive::*;
5130    ///
5131    /// #[derive(FromBytes)]
5132    /// #[repr(C)]
5133    /// struct BitmapFileHeader {
5134    ///     signature: [u8; 2],
5135    ///     size: U32,
5136    ///     reserved: U64,
5137    ///     offset: U64,
5138    /// }
5139    ///
5140    /// let mut file = File::open("image.bin").unwrap();
5141    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
5142    /// ```
5143    #[cfg(feature = "std")]
5144    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5145    #[inline(always)]
5146    fn read_from_io<R>(mut src: R) -> io::Result<Self>
5147    where
5148        Self: Sized,
5149        R: io::Read,
5150    {
5151        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
5152        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
5153        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
5154        // will not necessarily preserve zeros written to those padding byte
5155        // locations, and so `buf` could contain uninitialized bytes.
5156        let mut buf = CoreMaybeUninit::<Self>::uninit();
5157        buf.zero();
5158
5159        let ptr = Ptr::from_mut(&mut buf);
5160        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
5161        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
5162        // cannot be used to write values which will violate `buf`'s bit
5163        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
5164        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
5165        // cannot be violated even though `buf` may have more permissive bit
5166        // validity than `ptr`.
5167        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
5168        let ptr = ptr.as_bytes();
5169        src.read_exact(ptr.as_mut())?;
5170        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
5171        // `FromBytes`.
5172        Ok(unsafe { buf.assume_init() })
5173    }
5174
5175    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
5176    #[doc(hidden)]
5177    #[must_use = "has no side effects"]
5178    #[inline(always)]
5179    fn ref_from(source: &[u8]) -> Option<&Self>
5180    where
5181        Self: KnownLayout + Immutable,
5182    {
5183        Self::ref_from_bytes(source).ok()
5184    }
5185
5186    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
5187    #[doc(hidden)]
5188    #[must_use = "has no side effects"]
5189    #[inline(always)]
5190    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
5191    where
5192        Self: KnownLayout + IntoBytes,
5193    {
5194        Self::mut_from_bytes(source).ok()
5195    }
5196
5197    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
5198    #[doc(hidden)]
5199    #[must_use = "has no side effects"]
5200    #[inline(always)]
5201    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
5202    where
5203        Self: Sized + Immutable,
5204    {
5205        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
5206    }
5207
5208    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
5209    #[doc(hidden)]
5210    #[must_use = "has no side effects"]
5211    #[inline(always)]
5212    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
5213    where
5214        Self: Sized + Immutable,
5215    {
5216        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
5217    }
5218
5219    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
5220    #[doc(hidden)]
5221    #[must_use = "has no side effects"]
5222    #[inline(always)]
5223    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
5224    where
5225        Self: Sized + IntoBytes,
5226    {
5227        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
5228    }
5229
5230    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
5231    #[doc(hidden)]
5232    #[must_use = "has no side effects"]
5233    #[inline(always)]
5234    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
5235    where
5236        Self: Sized + IntoBytes,
5237    {
5238        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
5239    }
5240
5241    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
5242    #[doc(hidden)]
5243    #[must_use = "has no side effects"]
5244    #[inline(always)]
5245    fn read_from(source: &[u8]) -> Option<Self>
5246    where
5247        Self: Sized,
5248    {
5249        Self::read_from_bytes(source).ok()
5250    }
5251}
5252
5253/// Interprets the given affix of the given bytes as a `&Self`.
5254///
5255/// This method computes the largest possible size of `Self` that can fit in the
5256/// prefix or suffix bytes of `source`, then attempts to return both a reference
5257/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5258/// If there are insufficient bytes, or if that affix of `source` is not
5259/// appropriately aligned, this returns `Err`.
5260#[inline(always)]
5261fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
5262    source: &[u8],
5263    meta: Option<T::PointerMetadata>,
5264    cast_type: CastType,
5265) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
5266    let (slf, prefix_suffix) = Ptr::from_ref(source)
5267        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
5268        .map_err(|err| err.map_src(|s| s.as_ref()))?;
5269    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
5270}
5271
5272/// Interprets the given affix of the given bytes as a `&mut Self` without
5273/// copying.
5274///
5275/// This method computes the largest possible size of `Self` that can fit in the
5276/// prefix or suffix bytes of `source`, then attempts to return both a reference
5277/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5278/// If there are insufficient bytes, or if that affix of `source` is not
5279/// appropriately aligned, this returns `Err`.
5280#[inline(always)]
5281fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
5282    source: &mut [u8],
5283    meta: Option<T::PointerMetadata>,
5284    cast_type: CastType,
5285) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
5286    let (slf, prefix_suffix) = Ptr::from_mut(source)
5287        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
5288        .map_err(|err| err.map_src(|s| s.as_mut()))?;
5289    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
5290}
5291
5292/// Analyzes whether a type is [`IntoBytes`].
5293///
5294/// This derive analyzes, at compile time, whether the annotated type satisfies
5295/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
5296/// sound to do so. This derive can be applied to structs and enums (see below
5297/// for union support); e.g.:
5298///
5299/// ```
5300/// # use zerocopy_derive::{IntoBytes};
5301/// #[derive(IntoBytes)]
5302/// #[repr(C)]
5303/// struct MyStruct {
5304/// # /*
5305///     ...
5306/// # */
5307/// }
5308///
5309/// #[derive(IntoBytes)]
5310/// #[repr(u8)]
5311/// enum MyEnum {
5312/// #   Variant,
5313/// # /*
5314///     ...
5315/// # */
5316/// }
5317/// ```
5318///
5319/// [safety conditions]: trait@IntoBytes#safety
5320///
5321/// # Error Messages
5322///
5323/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
5324/// for `IntoBytes` is implemented, you may get an error like this:
5325///
5326/// ```text
5327/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5328///   --> lib.rs:23:10
5329///    |
5330///  1 | #[derive(IntoBytes)]
5331///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5332///    |
5333///    = help: the following implementations were found:
5334///                   <() as PaddingFree<T, false>>
5335/// ```
5336///
5337/// This error indicates that the type being annotated has padding bytes, which
5338/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5339/// fields by using types in the [`byteorder`] module, wrapping field types in
5340/// [`Unalign`], adding explicit struct fields where those padding bytes would
5341/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5342/// layout] for more information about type layout and padding.
5343///
5344/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5345///
5346/// # Unions
5347///
5348/// Currently, union bit validity is [up in the air][union-validity], and so
5349/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5350/// However, implementing `IntoBytes` on a union type is likely sound on all
5351/// existing Rust toolchains - it's just that it may become unsound in the
5352/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5353/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5354///
5355/// ```shell
5356/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5357/// ```
5358///
5359/// However, it is your responsibility to ensure that this derive is sound on
5360/// the specific versions of the Rust toolchain you are using! We make no
5361/// stability or soundness guarantees regarding this cfg, and may remove it at
5362/// any point.
5363///
5364/// We are actively working with Rust to stabilize the necessary language
5365/// guarantees to support this in a forwards-compatible way, which will enable
5366/// us to remove the cfg gate. As part of this effort, we need to know how much
5367/// demand there is for this feature. If you would like to use `IntoBytes` on
5368/// unions, [please let us know][discussion].
5369///
5370/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5371/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5372///
5373/// # Analysis
5374///
5375/// *This section describes, roughly, the analysis performed by this derive to
5376/// determine whether it is sound to implement `IntoBytes` for a given type.
5377/// Unless you are modifying the implementation of this derive, or attempting to
5378/// manually implement `IntoBytes` for a type yourself, you don't need to read
5379/// this section.*
5380///
5381/// If a type has the following properties, then this derive can implement
5382/// `IntoBytes` for that type:
5383///
5384/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5385///     - if the type is `repr(transparent)` or `repr(packed)`, it is
5386///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5387///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5388///       if its field is [`IntoBytes`]; else,
5389///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
5390///       is sized and has no padding bytes; else,
5391///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
5392/// - If the type is an enum:
5393///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5394///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5395///   - It must have no padding bytes.
5396///   - Its fields must be [`IntoBytes`].
5397///
5398/// This analysis is subject to change. Unsafe code may *only* rely on the
5399/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5400/// implementation details of this derive.
5401///
5402/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5403#[cfg(any(feature = "derive", test))]
5404#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5405pub use zerocopy_derive::IntoBytes;
5406
5407/// Types that can be converted to an immutable slice of initialized bytes.
5408///
5409/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5410/// same size. This is useful for efficiently serializing structured data as raw
5411/// bytes.
5412///
5413/// # Implementation
5414///
5415/// **Do not implement this trait yourself!** Instead, use
5416/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5417///
5418/// ```
5419/// # use zerocopy_derive::IntoBytes;
5420/// #[derive(IntoBytes)]
5421/// #[repr(C)]
5422/// struct MyStruct {
5423/// # /*
5424///     ...
5425/// # */
5426/// }
5427///
5428/// #[derive(IntoBytes)]
5429/// #[repr(u8)]
5430/// enum MyEnum {
5431/// #   Variant0,
5432/// # /*
5433///     ...
5434/// # */
5435/// }
5436/// ```
5437///
5438/// This derive performs a sophisticated, compile-time safety analysis to
5439/// determine whether a type is `IntoBytes`. See the [derive
5440/// documentation][derive] for guidance on how to interpret error messages
5441/// produced by the derive's analysis.
5442///
5443/// # Safety
5444///
5445/// *This section describes what is required in order for `T: IntoBytes`, and
5446/// what unsafe code may assume of such types. If you don't plan on implementing
5447/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5448/// operates on `IntoBytes` types, then you don't need to read this section.*
5449///
5450/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5451/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5452/// marked as `IntoBytes` which violates this contract, it may cause undefined
5453/// behavior.
5454///
5455/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5456/// requirements][derive-analysis].
5457///
5458#[cfg_attr(
5459    feature = "derive",
5460    doc = "[derive]: zerocopy_derive::IntoBytes",
5461    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5462)]
5463#[cfg_attr(
5464    not(feature = "derive"),
5465    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5466    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5467)]
5468#[cfg_attr(
5469    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5470    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5471)]
5472pub unsafe trait IntoBytes {
5473    // The `Self: Sized` bound makes it so that this function doesn't prevent
5474    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5475    // prevent object safety, but those provide a benefit in exchange for object
5476    // safety. If at some point we remove those methods, change their type
5477    // signatures, or move them out of this trait so that `IntoBytes` is object
5478    // safe again, it's important that this function not prevent object safety.
5479    #[doc(hidden)]
5480    fn only_derive_is_allowed_to_implement_this_trait()
5481    where
5482        Self: Sized;
5483
5484    /// Gets the bytes of this value.
5485    ///
5486    /// # Examples
5487    ///
5488    /// ```
5489    /// use zerocopy::IntoBytes;
5490    /// # use zerocopy_derive::*;
5491    ///
5492    /// #[derive(IntoBytes, Immutable)]
5493    /// #[repr(C)]
5494    /// struct PacketHeader {
5495    ///     src_port: [u8; 2],
5496    ///     dst_port: [u8; 2],
5497    ///     length: [u8; 2],
5498    ///     checksum: [u8; 2],
5499    /// }
5500    ///
5501    /// let header = PacketHeader {
5502    ///     src_port: [0, 1],
5503    ///     dst_port: [2, 3],
5504    ///     length: [4, 5],
5505    ///     checksum: [6, 7],
5506    /// };
5507    ///
5508    /// let bytes = header.as_bytes();
5509    ///
5510    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5511    /// ```
5512    #[must_use = "has no side effects"]
5513    #[inline(always)]
5514    fn as_bytes(&self) -> &[u8]
5515    where
5516        Self: Immutable,
5517    {
5518        // Note that this method does not have a `Self: Sized` bound;
5519        // `size_of_val` works for unsized values too.
5520        let len = mem::size_of_val(self);
5521        let slf: *const Self = self;
5522
5523        // SAFETY:
5524        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5525        //   many bytes because...
5526        //   - `slf` is the same pointer as `self`, and `self` is a reference
5527        //     which points to an object whose size is `len`. Thus...
5528        //     - The entire region of `len` bytes starting at `slf` is contained
5529        //       within a single allocation.
5530        //     - `slf` is non-null.
5531        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5532        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5533        //   initialized.
5534        // - Since `slf` is derived from `self`, and `self` is an immutable
5535        //   reference, the only other references to this memory region that
5536        //   could exist are other immutable references, which by `Self:
5537        //   Immutable` don't permit mutation.
5538        // - The total size of the resulting slice is no larger than
5539        //   `isize::MAX` because no allocation produced by safe code can be
5540        //   larger than `isize::MAX`.
5541        //
5542        // FIXME(#429): Add references to docs and quotes.
5543        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5544    }
5545
5546    /// Gets the bytes of this value mutably.
5547    ///
5548    /// # Examples
5549    ///
5550    /// ```
5551    /// use zerocopy::IntoBytes;
5552    /// # use zerocopy_derive::*;
5553    ///
5554    /// # #[derive(Eq, PartialEq, Debug)]
5555    /// #[derive(FromBytes, IntoBytes, Immutable)]
5556    /// #[repr(C)]
5557    /// struct PacketHeader {
5558    ///     src_port: [u8; 2],
5559    ///     dst_port: [u8; 2],
5560    ///     length: [u8; 2],
5561    ///     checksum: [u8; 2],
5562    /// }
5563    ///
5564    /// let mut header = PacketHeader {
5565    ///     src_port: [0, 1],
5566    ///     dst_port: [2, 3],
5567    ///     length: [4, 5],
5568    ///     checksum: [6, 7],
5569    /// };
5570    ///
5571    /// let bytes = header.as_mut_bytes();
5572    ///
5573    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5574    ///
5575    /// bytes.reverse();
5576    ///
5577    /// assert_eq!(header, PacketHeader {
5578    ///     src_port: [7, 6],
5579    ///     dst_port: [5, 4],
5580    ///     length: [3, 2],
5581    ///     checksum: [1, 0],
5582    /// });
5583    /// ```
5584    #[must_use = "has no side effects"]
5585    #[inline(always)]
5586    fn as_mut_bytes(&mut self) -> &mut [u8]
5587    where
5588        Self: FromBytes,
5589    {
5590        // Note that this method does not have a `Self: Sized` bound;
5591        // `size_of_val` works for unsized values too.
5592        let len = mem::size_of_val(self);
5593        let slf: *mut Self = self;
5594
5595        // SAFETY:
5596        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5597        //   size_of::<u8>()` many bytes because...
5598        //   - `slf` is the same pointer as `self`, and `self` is a reference
5599        //     which points to an object whose size is `len`. Thus...
5600        //     - The entire region of `len` bytes starting at `slf` is contained
5601        //       within a single allocation.
5602        //     - `slf` is non-null.
5603        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5604        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5605        //   initialized.
5606        // - `Self: FromBytes` ensures that no write to this memory region
5607        //   could result in it containing an invalid `Self`.
5608        // - Since `slf` is derived from `self`, and `self` is a mutable
5609        //   reference, no other references to this memory region can exist.
5610        // - The total size of the resulting slice is no larger than
5611        //   `isize::MAX` because no allocation produced by safe code can be
5612        //   larger than `isize::MAX`.
5613        //
5614        // FIXME(#429): Add references to docs and quotes.
5615        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5616    }
5617
5618    /// Writes a copy of `self` to `dst`.
5619    ///
5620    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5621    ///
5622    /// # Examples
5623    ///
5624    /// ```
5625    /// use zerocopy::IntoBytes;
5626    /// # use zerocopy_derive::*;
5627    ///
5628    /// #[derive(IntoBytes, Immutable)]
5629    /// #[repr(C)]
5630    /// struct PacketHeader {
5631    ///     src_port: [u8; 2],
5632    ///     dst_port: [u8; 2],
5633    ///     length: [u8; 2],
5634    ///     checksum: [u8; 2],
5635    /// }
5636    ///
5637    /// let header = PacketHeader {
5638    ///     src_port: [0, 1],
5639    ///     dst_port: [2, 3],
5640    ///     length: [4, 5],
5641    ///     checksum: [6, 7],
5642    /// };
5643    ///
5644    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5645    ///
5646    /// header.write_to(&mut bytes[..]);
5647    ///
5648    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5649    /// ```
5650    ///
5651    /// If too many or too few target bytes are provided, `write_to` returns
5652    /// `Err` and leaves the target bytes unmodified:
5653    ///
5654    /// ```
5655    /// # use zerocopy::IntoBytes;
5656    /// # let header = u128::MAX;
5657    /// let mut excessive_bytes = &mut [0u8; 128][..];
5658    ///
5659    /// let write_result = header.write_to(excessive_bytes);
5660    ///
5661    /// assert!(write_result.is_err());
5662    /// assert_eq!(excessive_bytes, [0u8; 128]);
5663    /// ```
5664    #[must_use = "callers should check the return value to see if the operation succeeded"]
5665    #[inline]
5666    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5667    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5668    where
5669        Self: Immutable,
5670    {
5671        let src = self.as_bytes();
5672        if dst.len() == src.len() {
5673            // SAFETY: Within this branch of the conditional, we have ensured
5674            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5675            // source nor the size of the destination change between the above
5676            // size check and the invocation of `copy_unchecked`.
5677            unsafe { util::copy_unchecked(src, dst) }
5678            Ok(())
5679        } else {
5680            Err(SizeError::new(self))
5681        }
5682    }
5683
5684    /// Writes a copy of `self` to the prefix of `dst`.
5685    ///
5686    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5687    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5688    ///
5689    /// # Examples
5690    ///
5691    /// ```
5692    /// use zerocopy::IntoBytes;
5693    /// # use zerocopy_derive::*;
5694    ///
5695    /// #[derive(IntoBytes, Immutable)]
5696    /// #[repr(C)]
5697    /// struct PacketHeader {
5698    ///     src_port: [u8; 2],
5699    ///     dst_port: [u8; 2],
5700    ///     length: [u8; 2],
5701    ///     checksum: [u8; 2],
5702    /// }
5703    ///
5704    /// let header = PacketHeader {
5705    ///     src_port: [0, 1],
5706    ///     dst_port: [2, 3],
5707    ///     length: [4, 5],
5708    ///     checksum: [6, 7],
5709    /// };
5710    ///
5711    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5712    ///
5713    /// header.write_to_prefix(&mut bytes[..]);
5714    ///
5715    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5716    /// ```
5717    ///
5718    /// If insufficient target bytes are provided, `write_to_prefix` returns
5719    /// `Err` and leaves the target bytes unmodified:
5720    ///
5721    /// ```
5722    /// # use zerocopy::IntoBytes;
5723    /// # let header = u128::MAX;
5724    /// let mut insufficient_bytes = &mut [0, 0][..];
5725    ///
5726    /// let write_result = header.write_to_suffix(insufficient_bytes);
5727    ///
5728    /// assert!(write_result.is_err());
5729    /// assert_eq!(insufficient_bytes, [0, 0]);
5730    /// ```
5731    #[must_use = "callers should check the return value to see if the operation succeeded"]
5732    #[inline]
5733    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5734    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5735    where
5736        Self: Immutable,
5737    {
5738        let src = self.as_bytes();
5739        match dst.get_mut(..src.len()) {
5740            Some(dst) => {
5741                // SAFETY: Within this branch of the `match`, we have ensured
5742                // through fallible subslicing that `dst.len()` is equal to
5743                // `src.len()`. Neither the size of the source nor the size of
5744                // the destination change between the above subslicing operation
5745                // and the invocation of `copy_unchecked`.
5746                unsafe { util::copy_unchecked(src, dst) }
5747                Ok(())
5748            }
5749            None => Err(SizeError::new(self)),
5750        }
5751    }
5752
5753    /// Writes a copy of `self` to the suffix of `dst`.
5754    ///
5755    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5756    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5757    ///
5758    /// # Examples
5759    ///
5760    /// ```
5761    /// use zerocopy::IntoBytes;
5762    /// # use zerocopy_derive::*;
5763    ///
5764    /// #[derive(IntoBytes, Immutable)]
5765    /// #[repr(C)]
5766    /// struct PacketHeader {
5767    ///     src_port: [u8; 2],
5768    ///     dst_port: [u8; 2],
5769    ///     length: [u8; 2],
5770    ///     checksum: [u8; 2],
5771    /// }
5772    ///
5773    /// let header = PacketHeader {
5774    ///     src_port: [0, 1],
5775    ///     dst_port: [2, 3],
5776    ///     length: [4, 5],
5777    ///     checksum: [6, 7],
5778    /// };
5779    ///
5780    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5781    ///
5782    /// header.write_to_suffix(&mut bytes[..]);
5783    ///
5784    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5785    ///
5786    /// let mut insufficient_bytes = &mut [0, 0][..];
5787    ///
5788    /// let write_result = header.write_to_suffix(insufficient_bytes);
5789    ///
5790    /// assert!(write_result.is_err());
5791    /// assert_eq!(insufficient_bytes, [0, 0]);
5792    /// ```
5793    ///
5794    /// If insufficient target bytes are provided, `write_to_suffix` returns
5795    /// `Err` and leaves the target bytes unmodified:
5796    ///
5797    /// ```
5798    /// # use zerocopy::IntoBytes;
5799    /// # let header = u128::MAX;
5800    /// let mut insufficient_bytes = &mut [0, 0][..];
5801    ///
5802    /// let write_result = header.write_to_suffix(insufficient_bytes);
5803    ///
5804    /// assert!(write_result.is_err());
5805    /// assert_eq!(insufficient_bytes, [0, 0]);
5806    /// ```
5807    #[must_use = "callers should check the return value to see if the operation succeeded"]
5808    #[inline]
5809    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5810    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5811    where
5812        Self: Immutable,
5813    {
5814        let src = self.as_bytes();
5815        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5816            start
5817        } else {
5818            return Err(SizeError::new(self));
5819        };
5820        let dst = if let Some(dst) = dst.get_mut(start..) {
5821            dst
5822        } else {
5823            // get_mut() should never return None here. We return a `SizeError`
5824            // rather than .unwrap() because in the event the branch is not
5825            // optimized away, returning a value is generally lighter-weight
5826            // than panicking.
5827            return Err(SizeError::new(self));
5828        };
5829        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5830        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5831        // nor the size of the destination change between the above subslicing
5832        // operation and the invocation of `copy_unchecked`.
5833        unsafe {
5834            util::copy_unchecked(src, dst);
5835        }
5836        Ok(())
5837    }
5838
5839    /// Writes a copy of `self` to an `io::Write`.
5840    ///
5841    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5842    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5843    ///
5844    /// # Examples
5845    ///
5846    /// ```no_run
5847    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5848    /// use std::fs::File;
5849    /// # use zerocopy_derive::*;
5850    ///
5851    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5852    /// #[repr(C, packed)]
5853    /// struct GrayscaleImage {
5854    ///     height: U16,
5855    ///     width: U16,
5856    ///     pixels: [U16],
5857    /// }
5858    ///
5859    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5860    /// let mut file = File::create("image.bin").unwrap();
5861    /// image.write_to_io(&mut file).unwrap();
5862    /// ```
5863    ///
5864    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5865    /// have occurred; e.g.:
5866    ///
5867    /// ```
5868    /// # use zerocopy::IntoBytes;
5869    ///
5870    /// let src = u128::MAX;
5871    /// let mut dst = [0u8; 2];
5872    ///
5873    /// let write_result = src.write_to_io(&mut dst[..]);
5874    ///
5875    /// assert!(write_result.is_err());
5876    /// assert_eq!(dst, [255, 255]);
5877    /// ```
5878    #[cfg(feature = "std")]
5879    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5880    #[inline(always)]
5881    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5882    where
5883        Self: Immutable,
5884        W: io::Write,
5885    {
5886        dst.write_all(self.as_bytes())
5887    }
5888
5889    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5890    #[doc(hidden)]
5891    #[inline]
5892    fn as_bytes_mut(&mut self) -> &mut [u8]
5893    where
5894        Self: FromBytes,
5895    {
5896        self.as_mut_bytes()
5897    }
5898}
5899
5900/// Analyzes whether a type is [`Unaligned`].
5901///
5902/// This derive analyzes, at compile time, whether the annotated type satisfies
5903/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5904/// sound to do so. This derive can be applied to structs, enums, and unions;
5905/// e.g.:
5906///
5907/// ```
5908/// # use zerocopy_derive::Unaligned;
5909/// #[derive(Unaligned)]
5910/// #[repr(C)]
5911/// struct MyStruct {
5912/// # /*
5913///     ...
5914/// # */
5915/// }
5916///
5917/// #[derive(Unaligned)]
5918/// #[repr(u8)]
5919/// enum MyEnum {
5920/// #   Variant0,
5921/// # /*
5922///     ...
5923/// # */
5924/// }
5925///
5926/// #[derive(Unaligned)]
5927/// #[repr(packed)]
5928/// union MyUnion {
5929/// #   variant: u8,
5930/// # /*
5931///     ...
5932/// # */
5933/// }
5934/// ```
5935///
5936/// # Analysis
5937///
5938/// *This section describes, roughly, the analysis performed by this derive to
5939/// determine whether it is sound to implement `Unaligned` for a given type.
5940/// Unless you are modifying the implementation of this derive, or attempting to
5941/// manually implement `Unaligned` for a type yourself, you don't need to read
5942/// this section.*
5943///
5944/// If a type has the following properties, then this derive can implement
5945/// `Unaligned` for that type:
5946///
5947/// - If the type is a struct or union:
5948///   - If `repr(align(N))` is provided, `N` must equal 1.
5949///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5950///     [`Unaligned`].
5951///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5952///     `repr(packed)` or `repr(packed(1))`.
5953/// - If the type is an enum:
5954///   - If `repr(align(N))` is provided, `N` must equal 1.
5955///   - It must be a field-less enum (meaning that all variants have no fields).
5956///   - It must be `repr(i8)` or `repr(u8)`.
5957///
5958/// [safety conditions]: trait@Unaligned#safety
5959#[cfg(any(feature = "derive", test))]
5960#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5961pub use zerocopy_derive::Unaligned;
5962
5963/// Types with no alignment requirement.
5964///
5965/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5966///
5967/// # Implementation
5968///
5969/// **Do not implement this trait yourself!** Instead, use
5970/// [`#[derive(Unaligned)]`][derive]; e.g.:
5971///
5972/// ```
5973/// # use zerocopy_derive::Unaligned;
5974/// #[derive(Unaligned)]
5975/// #[repr(C)]
5976/// struct MyStruct {
5977/// # /*
5978///     ...
5979/// # */
5980/// }
5981///
5982/// #[derive(Unaligned)]
5983/// #[repr(u8)]
5984/// enum MyEnum {
5985/// #   Variant0,
5986/// # /*
5987///     ...
5988/// # */
5989/// }
5990///
5991/// #[derive(Unaligned)]
5992/// #[repr(packed)]
5993/// union MyUnion {
5994/// #   variant: u8,
5995/// # /*
5996///     ...
5997/// # */
5998/// }
5999/// ```
6000///
6001/// This derive performs a sophisticated, compile-time safety analysis to
6002/// determine whether a type is `Unaligned`.
6003///
6004/// # Safety
6005///
6006/// *This section describes what is required in order for `T: Unaligned`, and
6007/// what unsafe code may assume of such types. If you don't plan on implementing
6008/// `Unaligned` manually, and you don't plan on writing unsafe code that
6009/// operates on `Unaligned` types, then you don't need to read this section.*
6010///
6011/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
6012/// reference to `T` at any memory location regardless of alignment. If a type
6013/// is marked as `Unaligned` which violates this contract, it may cause
6014/// undefined behavior.
6015///
6016/// `#[derive(Unaligned)]` only permits [types which satisfy these
6017/// requirements][derive-analysis].
6018///
6019#[cfg_attr(
6020    feature = "derive",
6021    doc = "[derive]: zerocopy_derive::Unaligned",
6022    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
6023)]
6024#[cfg_attr(
6025    not(feature = "derive"),
6026    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
6027    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
6028)]
6029#[cfg_attr(
6030    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
6031    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
6032)]
6033pub unsafe trait Unaligned {
6034    // The `Self: Sized` bound makes it so that `Unaligned` is still object
6035    // safe.
6036    #[doc(hidden)]
6037    fn only_derive_is_allowed_to_implement_this_trait()
6038    where
6039        Self: Sized;
6040}
6041
6042/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
6043///
6044/// This derive can be applied to structs and enums implementing both
6045/// [`Immutable`] and [`IntoBytes`]; e.g.:
6046///
6047/// ```
6048/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6049/// #[derive(ByteEq, Immutable, IntoBytes)]
6050/// #[repr(C)]
6051/// struct MyStruct {
6052/// # /*
6053///     ...
6054/// # */
6055/// }
6056///
6057/// #[derive(ByteEq, Immutable, IntoBytes)]
6058/// #[repr(u8)]
6059/// enum MyEnum {
6060/// #   Variant,
6061/// # /*
6062///     ...
6063/// # */
6064/// }
6065/// ```
6066///
6067/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
6068/// equality by individually comparing each field. Instead, the implementation
6069/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
6070/// `self` and `other` to byte slices and compares those slices for equality.
6071/// This may have performance advantages.
6072#[cfg(any(feature = "derive", test))]
6073#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6074pub use zerocopy_derive::ByteEq;
6075/// Derives an optimized [`Hash`] implementation.
6076///
6077/// This derive can be applied to structs and enums implementing both
6078/// [`Immutable`] and [`IntoBytes`]; e.g.:
6079///
6080/// ```
6081/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
6082/// #[derive(ByteHash, Immutable, IntoBytes)]
6083/// #[repr(C)]
6084/// struct MyStruct {
6085/// # /*
6086///     ...
6087/// # */
6088/// }
6089///
6090/// #[derive(ByteHash, Immutable, IntoBytes)]
6091/// #[repr(u8)]
6092/// enum MyEnum {
6093/// #   Variant,
6094/// # /*
6095///     ...
6096/// # */
6097/// }
6098/// ```
6099///
6100/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
6101/// individually hashing each field and combining the results. Instead, the
6102/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
6103/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
6104/// it in a single call to [`Hasher::write()`]. This may have performance
6105/// advantages.
6106///
6107/// [`Hash`]: core::hash::Hash
6108/// [`Hash::hash()`]: core::hash::Hash::hash()
6109/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
6110#[cfg(any(feature = "derive", test))]
6111#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6112pub use zerocopy_derive::ByteHash;
6113/// Implements [`SplitAt`].
6114///
6115/// This derive can be applied to structs; e.g.:
6116///
6117/// ```
6118/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6119/// #[derive(ByteEq, Immutable, IntoBytes)]
6120/// #[repr(C)]
6121/// struct MyStruct {
6122/// # /*
6123///     ...
6124/// # */
6125/// }
6126/// ```
6127#[cfg(any(feature = "derive", test))]
6128#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6129pub use zerocopy_derive::SplitAt;
6130
6131#[cfg(feature = "alloc")]
6132#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
6133#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6134mod alloc_support {
6135    use super::*;
6136
6137    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
6138    /// vector. The new items are initialized with zeros.
6139    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6140    #[doc(hidden)]
6141    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6142    #[inline(always)]
6143    pub fn extend_vec_zeroed<T: FromZeros>(
6144        v: &mut Vec<T>,
6145        additional: usize,
6146    ) -> Result<(), AllocError> {
6147        <T as FromZeros>::extend_vec_zeroed(v, additional)
6148    }
6149
6150    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
6151    /// items are initialized with zeros.
6152    ///
6153    /// # Panics
6154    ///
6155    /// Panics if `position > v.len()`.
6156    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6157    #[doc(hidden)]
6158    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6159    #[inline(always)]
6160    pub fn insert_vec_zeroed<T: FromZeros>(
6161        v: &mut Vec<T>,
6162        position: usize,
6163        additional: usize,
6164    ) -> Result<(), AllocError> {
6165        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
6166    }
6167}
6168
6169#[cfg(feature = "alloc")]
6170#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6171#[doc(hidden)]
6172pub use alloc_support::*;
6173
6174#[cfg(test)]
6175#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
6176mod tests {
6177    use static_assertions::assert_impl_all;
6178
6179    use super::*;
6180    use crate::util::testutil::*;
6181
6182    // An unsized type.
6183    //
6184    // This is used to test the custom derives of our traits. The `[u8]` type
6185    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
6186    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
6187    #[repr(transparent)]
6188    struct Unsized([u8]);
6189
6190    impl Unsized {
6191        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
6192            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
6193            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
6194            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
6195            // guaranteed by the language spec, we can just change this since
6196            // it's in test code.
6197            //
6198            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
6199            unsafe { mem::transmute(slc) }
6200        }
6201    }
6202
6203    #[test]
6204    fn test_known_layout() {
6205        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6206        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6207        // of `$ty`.
6208        macro_rules! test {
6209            ($ty:ty, $expect:expr) => {
6210                let expect = $expect;
6211                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6212                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6213                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6214            };
6215        }
6216
6217        let layout =
6218            |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
6219                align: NonZeroUsize::new(align).unwrap(),
6220                size_info: match trailing_slice_elem_size {
6221                    None => SizeInfo::Sized { size: offset },
6222                    Some(elem_size) => {
6223                        SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
6224                    }
6225                },
6226                statically_shallow_unpadded,
6227            };
6228
6229        test!((), layout(0, 1, None, false));
6230        test!(u8, layout(1, 1, None, false));
6231        // Use `align_of` because `u64` alignment may be smaller than 8 on some
6232        // platforms.
6233        test!(u64, layout(8, mem::align_of::<u64>(), None, false));
6234        test!(AU64, layout(8, 8, None, false));
6235
6236        test!(Option<&'static ()>, usize::LAYOUT);
6237
6238        test!([()], layout(0, 1, Some(0), true));
6239        test!([u8], layout(0, 1, Some(1), true));
6240        test!(str, layout(0, 1, Some(1), true));
6241    }
6242
6243    #[cfg(feature = "derive")]
6244    #[test]
6245    fn test_known_layout_derive() {
6246        // In this and other files (`late_compile_pass.rs`,
6247        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6248        // modes of `derive(KnownLayout)` for the following combination of
6249        // properties:
6250        //
6251        // +------------+--------------------------------------+-----------+
6252        // |            |      trailing field properties       |           |
6253        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6254        // |------------+----------+----------------+----------+-----------|
6255        // |          N |        N |              N |        N |      KL00 |
6256        // |          N |        N |              N |        Y |      KL01 |
6257        // |          N |        N |              Y |        N |      KL02 |
6258        // |          N |        N |              Y |        Y |      KL03 |
6259        // |          N |        Y |              N |        N |      KL04 |
6260        // |          N |        Y |              N |        Y |      KL05 |
6261        // |          N |        Y |              Y |        N |      KL06 |
6262        // |          N |        Y |              Y |        Y |      KL07 |
6263        // |          Y |        N |              N |        N |      KL08 |
6264        // |          Y |        N |              N |        Y |      KL09 |
6265        // |          Y |        N |              Y |        N |      KL10 |
6266        // |          Y |        N |              Y |        Y |      KL11 |
6267        // |          Y |        Y |              N |        N |      KL12 |
6268        // |          Y |        Y |              N |        Y |      KL13 |
6269        // |          Y |        Y |              Y |        N |      KL14 |
6270        // |          Y |        Y |              Y |        Y |      KL15 |
6271        // +------------+----------+----------------+----------+-----------+
6272
6273        struct NotKnownLayout<T = ()> {
6274            _t: T,
6275        }
6276
6277        #[derive(KnownLayout)]
6278        #[repr(C)]
6279        struct AlignSize<const ALIGN: usize, const SIZE: usize>
6280        where
6281            elain::Align<ALIGN>: elain::Alignment,
6282        {
6283            _align: elain::Align<ALIGN>,
6284            size: [u8; SIZE],
6285        }
6286
6287        type AU16 = AlignSize<2, 2>;
6288        type AU32 = AlignSize<4, 4>;
6289
6290        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6291
6292        let sized_layout = |align, size| DstLayout {
6293            align: NonZeroUsize::new(align).unwrap(),
6294            size_info: SizeInfo::Sized { size },
6295            statically_shallow_unpadded: false,
6296        };
6297
6298        let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
6299            align: NonZeroUsize::new(align).unwrap(),
6300            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
6301            statically_shallow_unpadded,
6302        };
6303
6304        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6305        // |          N |        N |              N |        Y |      KL01 |
6306        #[allow(dead_code)]
6307        #[derive(KnownLayout)]
6308        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6309
6310        let expected = DstLayout::for_type::<KL01>();
6311
6312        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6313        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6314
6315        // ...with `align(N)`:
6316        #[allow(dead_code)]
6317        #[derive(KnownLayout)]
6318        #[repr(align(64))]
6319        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6320
6321        let expected = DstLayout::for_type::<KL01Align>();
6322
6323        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6324        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6325
6326        // ...with `packed`:
6327        #[allow(dead_code)]
6328        #[derive(KnownLayout)]
6329        #[repr(packed)]
6330        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6331
6332        let expected = DstLayout::for_type::<KL01Packed>();
6333
6334        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6335        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6336
6337        // ...with `packed(N)`:
6338        #[allow(dead_code)]
6339        #[derive(KnownLayout)]
6340        #[repr(packed(2))]
6341        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6342
6343        assert_impl_all!(KL01PackedN: KnownLayout);
6344
6345        let expected = DstLayout::for_type::<KL01PackedN>();
6346
6347        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6348        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6349
6350        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6351        // |          N |        N |              Y |        Y |      KL03 |
6352        #[allow(dead_code)]
6353        #[derive(KnownLayout)]
6354        struct KL03(NotKnownLayout, u8);
6355
6356        let expected = DstLayout::for_type::<KL03>();
6357
6358        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6359        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6360
6361        // ... with `align(N)`
6362        #[allow(dead_code)]
6363        #[derive(KnownLayout)]
6364        #[repr(align(64))]
6365        struct KL03Align(NotKnownLayout<AU32>, u8);
6366
6367        let expected = DstLayout::for_type::<KL03Align>();
6368
6369        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6370        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6371
6372        // ... with `packed`:
6373        #[allow(dead_code)]
6374        #[derive(KnownLayout)]
6375        #[repr(packed)]
6376        struct KL03Packed(NotKnownLayout<AU32>, u8);
6377
6378        let expected = DstLayout::for_type::<KL03Packed>();
6379
6380        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6381        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6382
6383        // ... with `packed(N)`
6384        #[allow(dead_code)]
6385        #[derive(KnownLayout)]
6386        #[repr(packed(2))]
6387        struct KL03PackedN(NotKnownLayout<AU32>, u8);
6388
6389        assert_impl_all!(KL03PackedN: KnownLayout);
6390
6391        let expected = DstLayout::for_type::<KL03PackedN>();
6392
6393        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6394        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6395
6396        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6397        // |          N |        Y |              N |        Y |      KL05 |
6398        #[allow(dead_code)]
6399        #[derive(KnownLayout)]
6400        struct KL05<T>(u8, T);
6401
6402        fn _test_kl05<T>(t: T) -> impl KnownLayout {
6403            KL05(0u8, t)
6404        }
6405
6406        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6407        // |          N |        Y |              Y |        Y |      KL07 |
6408        #[allow(dead_code)]
6409        #[derive(KnownLayout)]
6410        struct KL07<T: KnownLayout>(u8, T);
6411
6412        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6413            let _ = KL07(0u8, t);
6414        }
6415
6416        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6417        // |          Y |        N |              Y |        N |      KL10 |
6418        #[allow(dead_code)]
6419        #[derive(KnownLayout)]
6420        #[repr(C)]
6421        struct KL10(NotKnownLayout<AU32>, [u8]);
6422
6423        let expected = DstLayout::new_zst(None)
6424            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6425            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6426            .pad_to_align();
6427
6428        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6429        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6430
6431        // ...with `align(N)`:
6432        #[allow(dead_code)]
6433        #[derive(KnownLayout)]
6434        #[repr(C, align(64))]
6435        struct KL10Align(NotKnownLayout<AU32>, [u8]);
6436
6437        let repr_align = NonZeroUsize::new(64);
6438
6439        let expected = DstLayout::new_zst(repr_align)
6440            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6441            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6442            .pad_to_align();
6443
6444        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6445        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6446
6447        // ...with `packed`:
6448        #[allow(dead_code)]
6449        #[derive(KnownLayout)]
6450        #[repr(C, packed)]
6451        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6452
6453        let repr_packed = NonZeroUsize::new(1);
6454
6455        let expected = DstLayout::new_zst(None)
6456            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6457            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6458            .pad_to_align();
6459
6460        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6461        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6462
6463        // ...with `packed(N)`:
6464        #[allow(dead_code)]
6465        #[derive(KnownLayout)]
6466        #[repr(C, packed(2))]
6467        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6468
6469        let repr_packed = NonZeroUsize::new(2);
6470
6471        let expected = DstLayout::new_zst(None)
6472            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6473            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6474            .pad_to_align();
6475
6476        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6477        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6478
6479        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6480        // |          Y |        N |              Y |        Y |      KL11 |
6481        #[allow(dead_code)]
6482        #[derive(KnownLayout)]
6483        #[repr(C)]
6484        struct KL11(NotKnownLayout<AU64>, u8);
6485
6486        let expected = DstLayout::new_zst(None)
6487            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6488            .extend(<u8 as KnownLayout>::LAYOUT, None)
6489            .pad_to_align();
6490
6491        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6492        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6493
6494        // ...with `align(N)`:
6495        #[allow(dead_code)]
6496        #[derive(KnownLayout)]
6497        #[repr(C, align(64))]
6498        struct KL11Align(NotKnownLayout<AU64>, u8);
6499
6500        let repr_align = NonZeroUsize::new(64);
6501
6502        let expected = DstLayout::new_zst(repr_align)
6503            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6504            .extend(<u8 as KnownLayout>::LAYOUT, None)
6505            .pad_to_align();
6506
6507        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6508        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6509
6510        // ...with `packed`:
6511        #[allow(dead_code)]
6512        #[derive(KnownLayout)]
6513        #[repr(C, packed)]
6514        struct KL11Packed(NotKnownLayout<AU64>, u8);
6515
6516        let repr_packed = NonZeroUsize::new(1);
6517
6518        let expected = DstLayout::new_zst(None)
6519            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6520            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6521            .pad_to_align();
6522
6523        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6524        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6525
6526        // ...with `packed(N)`:
6527        #[allow(dead_code)]
6528        #[derive(KnownLayout)]
6529        #[repr(C, packed(2))]
6530        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6531
6532        let repr_packed = NonZeroUsize::new(2);
6533
6534        let expected = DstLayout::new_zst(None)
6535            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6536            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6537            .pad_to_align();
6538
6539        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6540        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6541
6542        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6543        // |          Y |        Y |              Y |        N |      KL14 |
6544        #[allow(dead_code)]
6545        #[derive(KnownLayout)]
6546        #[repr(C)]
6547        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6548
6549        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6550            _assert_kl(kl)
6551        }
6552
6553        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6554        // |          Y |        Y |              Y |        Y |      KL15 |
6555        #[allow(dead_code)]
6556        #[derive(KnownLayout)]
6557        #[repr(C)]
6558        struct KL15<T: KnownLayout>(u8, T);
6559
6560        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6561            let _ = KL15(0u8, t);
6562        }
6563
6564        // Test a variety of combinations of field types:
6565        //  - ()
6566        //  - u8
6567        //  - AU16
6568        //  - [()]
6569        //  - [u8]
6570        //  - [AU16]
6571
6572        #[allow(clippy::upper_case_acronyms, dead_code)]
6573        #[derive(KnownLayout)]
6574        #[repr(C)]
6575        struct KLTU<T, U: ?Sized>(T, U);
6576
6577        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6578
6579        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6580
6581        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6582
6583        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6584
6585        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6586
6587        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6588
6589        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6590
6591        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6592
6593        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6594
6595        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6596
6597        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6598
6599        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6600
6601        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6602
6603        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6604
6605        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6606
6607        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6608
6609        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6610
6611        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6612
6613        // Test a variety of field counts.
6614
6615        #[derive(KnownLayout)]
6616        #[repr(C)]
6617        struct KLF0;
6618
6619        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6620
6621        #[derive(KnownLayout)]
6622        #[repr(C)]
6623        struct KLF1([u8]);
6624
6625        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6626
6627        #[derive(KnownLayout)]
6628        #[repr(C)]
6629        struct KLF2(NotKnownLayout<u8>, [u8]);
6630
6631        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6632
6633        #[derive(KnownLayout)]
6634        #[repr(C)]
6635        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6636
6637        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6638
6639        #[derive(KnownLayout)]
6640        #[repr(C)]
6641        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6642
6643        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6644    }
6645
6646    #[test]
6647    fn test_object_safety() {
6648        fn _takes_immutable(_: &dyn Immutable) {}
6649        fn _takes_unaligned(_: &dyn Unaligned) {}
6650    }
6651
6652    #[test]
6653    fn test_from_zeros_only() {
6654        // Test types that implement `FromZeros` but not `FromBytes`.
6655
6656        assert!(!bool::new_zeroed());
6657        assert_eq!(char::new_zeroed(), '\0');
6658
6659        #[cfg(feature = "alloc")]
6660        {
6661            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6662            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6663
6664            assert_eq!(
6665                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6666                [false, false, false]
6667            );
6668            assert_eq!(
6669                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6670                ['\0', '\0', '\0']
6671            );
6672
6673            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6674            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6675        }
6676
6677        let mut string = "hello".to_string();
6678        let s: &mut str = string.as_mut();
6679        assert_eq!(s, "hello");
6680        s.zero();
6681        assert_eq!(s, "\0\0\0\0\0");
6682    }
6683
6684    #[test]
6685    fn test_zst_count_preserved() {
6686        // Test that, when an explicit count is provided to for a type with a
6687        // ZST trailing slice element, that count is preserved. This is
6688        // important since, for such types, all element counts result in objects
6689        // of the same size, and so the correct behavior is ambiguous. However,
6690        // preserving the count as requested by the user is the behavior that we
6691        // document publicly.
6692
6693        // FromZeros methods
6694        #[cfg(feature = "alloc")]
6695        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6696        #[cfg(feature = "alloc")]
6697        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6698
6699        // FromBytes methods
6700        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6701        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6702        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6703        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6704        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6705        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6706    }
6707
6708    #[test]
6709    fn test_read_write() {
6710        const VAL: u64 = 0x12345678;
6711        #[cfg(target_endian = "big")]
6712        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6713        #[cfg(target_endian = "little")]
6714        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6715        const ZEROS: [u8; 8] = [0u8; 8];
6716
6717        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6718
6719        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6720        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6721        // zeros.
6722        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6723        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6724        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6725        // The first 8 bytes are all zeros and the second 8 bytes are from
6726        // `VAL_BYTES`
6727        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6728        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6729        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6730
6731        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6732
6733        let mut bytes = [0u8; 8];
6734        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6735        assert_eq!(bytes, VAL_BYTES);
6736        let mut bytes = [0u8; 16];
6737        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6738        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6739        assert_eq!(bytes, want);
6740        let mut bytes = [0u8; 16];
6741        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6742        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6743        assert_eq!(bytes, want);
6744    }
6745
6746    #[test]
6747    #[cfg(feature = "std")]
6748    fn test_read_io_with_padding_soundness() {
6749        // This test is designed to exhibit potential UB in
6750        // `FromBytes::read_from_io`. (see #2319, #2320).
6751
6752        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6753        // will have inter-field padding between `x` and `y`.
6754        #[derive(FromBytes)]
6755        #[repr(C)]
6756        struct WithPadding {
6757            x: u8,
6758            y: u16,
6759        }
6760        struct ReadsInRead;
6761        impl std::io::Read for ReadsInRead {
6762            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6763                // This body branches on every byte of `buf`, ensuring that it
6764                // exhibits UB if any byte of `buf` is uninitialized.
6765                if buf.iter().all(|&x| x == 0) {
6766                    Ok(buf.len())
6767                } else {
6768                    buf.iter_mut().for_each(|x| *x = 0);
6769                    Ok(buf.len())
6770                }
6771            }
6772        }
6773        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6774    }
6775
6776    #[test]
6777    #[cfg(feature = "std")]
6778    fn test_read_write_io() {
6779        let mut long_buffer = [0, 0, 0, 0];
6780        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6781        assert_eq!(long_buffer, [255, 255, 0, 0]);
6782        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6783
6784        let mut short_buffer = [0, 0];
6785        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6786        assert_eq!(short_buffer, [255, 255]);
6787        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6788    }
6789
6790    #[test]
6791    fn test_try_from_bytes_try_read_from() {
6792        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6793        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6794
6795        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6796        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6797
6798        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6799        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6800
6801        // If we don't pass enough bytes, it fails.
6802        assert!(matches!(
6803            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6804            Err(TryReadError::Size(_))
6805        ));
6806        assert!(matches!(
6807            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6808            Err(TryReadError::Size(_))
6809        ));
6810        assert!(matches!(
6811            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6812            Err(TryReadError::Size(_))
6813        ));
6814
6815        // If we pass too many bytes, it fails.
6816        assert!(matches!(
6817            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6818            Err(TryReadError::Size(_))
6819        ));
6820
6821        // If we pass an invalid value, it fails.
6822        assert!(matches!(
6823            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6824            Err(TryReadError::Validity(_))
6825        ));
6826        assert!(matches!(
6827            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6828            Err(TryReadError::Validity(_))
6829        ));
6830        assert!(matches!(
6831            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6832            Err(TryReadError::Validity(_))
6833        ));
6834
6835        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6836        // alignment is 8, and since we read from two adjacent addresses one
6837        // byte apart, it is guaranteed that at least one of them (though
6838        // possibly both) will be misaligned.
6839        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6840        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6841        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6842
6843        assert_eq!(
6844            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6845            Ok((AU64(0), &[][..]))
6846        );
6847        assert_eq!(
6848            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6849            Ok((AU64(0), &[][..]))
6850        );
6851
6852        assert_eq!(
6853            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6854            Ok((&[][..], AU64(0)))
6855        );
6856        assert_eq!(
6857            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6858            Ok((&[][..], AU64(0)))
6859        );
6860    }
6861
6862    #[test]
6863    fn test_ref_from_mut_from_bytes() {
6864        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6865        // success cases. Exhaustive coverage for these methods is covered by
6866        // the `Ref` tests above, which these helper methods defer to.
6867
6868        let mut buf =
6869            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6870
6871        assert_eq!(
6872            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6873            [8, 9, 10, 11, 12, 13, 14, 15]
6874        );
6875        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6876        suffix.0 = 0x0101010101010101;
6877        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6878        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6879        assert_eq!(
6880            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6881            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6882        );
6883        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6884        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6885        suffix.0 = 0x0202020202020202;
6886        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6887        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6888        suffix[0] = 42;
6889        assert_eq!(
6890            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6891            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6892        );
6893        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6894        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6895    }
6896
6897    #[test]
6898    fn test_ref_from_mut_from_bytes_error() {
6899        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6900        // error cases.
6901
6902        // Fail because the buffer is too large.
6903        let mut buf = Align::<[u8; 16], AU64>::default();
6904        // `buf.t` should be aligned to 8, so only the length check should fail.
6905        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6906        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6907        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6908        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6909
6910        // Fail because the buffer is too small.
6911        let mut buf = Align::<[u8; 4], AU64>::default();
6912        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6913        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6914        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6915        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6916        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6917        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6918        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6919        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6920        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6921        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6922        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6923        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6924
6925        // Fail because the alignment is insufficient.
6926        let mut buf = Align::<[u8; 13], AU64>::default();
6927        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6928        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6929        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6930        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6931        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6932        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6933        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6934        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6935    }
6936
6937    #[test]
6938    fn test_to_methods() {
6939        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6940        ///
6941        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6942        /// before `t` has been modified. `post_mutation` is the expected
6943        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6944        /// has had its bits flipped (by applying `^= 0xFF`).
6945        ///
6946        /// `N` is the size of `t` in bytes.
6947        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6948            t: &mut T,
6949            bytes: &[u8],
6950            post_mutation: &T,
6951        ) {
6952            // Test that we can access the underlying bytes, and that we get the
6953            // right bytes and the right number of bytes.
6954            assert_eq!(t.as_bytes(), bytes);
6955
6956            // Test that changes to the underlying byte slices are reflected in
6957            // the original object.
6958            t.as_mut_bytes()[0] ^= 0xFF;
6959            assert_eq!(t, post_mutation);
6960            t.as_mut_bytes()[0] ^= 0xFF;
6961
6962            // `write_to` rejects slices that are too small or too large.
6963            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6964            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6965
6966            // `write_to` works as expected.
6967            let mut bytes = [0; N];
6968            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6969            assert_eq!(bytes, t.as_bytes());
6970
6971            // `write_to_prefix` rejects slices that are too small.
6972            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6973
6974            // `write_to_prefix` works with exact-sized slices.
6975            let mut bytes = [0; N];
6976            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6977            assert_eq!(bytes, t.as_bytes());
6978
6979            // `write_to_prefix` works with too-large slices, and any bytes past
6980            // the prefix aren't modified.
6981            let mut too_many_bytes = vec![0; N + 1];
6982            too_many_bytes[N] = 123;
6983            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6984            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6985            assert_eq!(too_many_bytes[N], 123);
6986
6987            // `write_to_suffix` rejects slices that are too small.
6988            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6989
6990            // `write_to_suffix` works with exact-sized slices.
6991            let mut bytes = [0; N];
6992            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6993            assert_eq!(bytes, t.as_bytes());
6994
6995            // `write_to_suffix` works with too-large slices, and any bytes
6996            // before the suffix aren't modified.
6997            let mut too_many_bytes = vec![0; N + 1];
6998            too_many_bytes[0] = 123;
6999            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
7000            assert_eq!(&too_many_bytes[1..], t.as_bytes());
7001            assert_eq!(too_many_bytes[0], 123);
7002        }
7003
7004        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
7005        #[repr(C)]
7006        struct Foo {
7007            a: u32,
7008            b: Wrapping<u32>,
7009            c: Option<NonZeroU32>,
7010        }
7011
7012        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7013            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7014        } else {
7015            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7016        };
7017        let post_mutation_expected_a =
7018            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7019        test::<_, 12>(
7020            &mut Foo { a: 1, b: Wrapping(2), c: None },
7021            expected_bytes.as_bytes(),
7022            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7023        );
7024        test::<_, 3>(
7025            Unsized::from_mut_slice(&mut [1, 2, 3]),
7026            &[1, 2, 3],
7027            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7028        );
7029    }
7030
7031    #[test]
7032    fn test_array() {
7033        #[derive(FromBytes, IntoBytes, Immutable)]
7034        #[repr(C)]
7035        struct Foo {
7036            a: [u16; 33],
7037        }
7038
7039        let foo = Foo { a: [0xFFFF; 33] };
7040        let expected = [0xFFu8; 66];
7041        assert_eq!(foo.as_bytes(), &expected[..]);
7042    }
7043
7044    #[test]
7045    fn test_new_zeroed() {
7046        assert!(!bool::new_zeroed());
7047        assert_eq!(u64::new_zeroed(), 0);
7048        // This test exists in order to exercise unsafe code, especially when
7049        // running under Miri.
7050        #[allow(clippy::unit_cmp)]
7051        {
7052            assert_eq!(<()>::new_zeroed(), ());
7053        }
7054    }
7055
7056    #[test]
7057    fn test_transparent_packed_generic_struct() {
7058        #[derive(IntoBytes, FromBytes, Unaligned)]
7059        #[repr(transparent)]
7060        #[allow(dead_code)] // We never construct this type
7061        struct Foo<T> {
7062            _t: T,
7063            _phantom: PhantomData<()>,
7064        }
7065
7066        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
7067        assert_impl_all!(Foo<u8>: Unaligned);
7068
7069        #[derive(IntoBytes, FromBytes, Unaligned)]
7070        #[repr(C, packed)]
7071        #[allow(dead_code)] // We never construct this type
7072        struct Bar<T, U> {
7073            _t: T,
7074            _u: U,
7075        }
7076
7077        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
7078    }
7079
7080    #[cfg(feature = "alloc")]
7081    mod alloc {
7082        use super::*;
7083
7084        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7085        #[test]
7086        fn test_extend_vec_zeroed() {
7087            // Test extending when there is an existing allocation.
7088            let mut v = vec![100u16, 200, 300];
7089            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7090            assert_eq!(v.len(), 6);
7091            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
7092            drop(v);
7093
7094            // Test extending when there is no existing allocation.
7095            let mut v: Vec<u64> = Vec::new();
7096            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7097            assert_eq!(v.len(), 3);
7098            assert_eq!(&*v, &[0, 0, 0]);
7099            drop(v);
7100        }
7101
7102        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7103        #[test]
7104        fn test_extend_vec_zeroed_zst() {
7105            // Test extending when there is an existing (fake) allocation.
7106            let mut v = vec![(), (), ()];
7107            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7108            assert_eq!(v.len(), 6);
7109            assert_eq!(&*v, &[(), (), (), (), (), ()]);
7110            drop(v);
7111
7112            // Test extending when there is no existing (fake) allocation.
7113            let mut v: Vec<()> = Vec::new();
7114            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7115            assert_eq!(&*v, &[(), (), ()]);
7116            drop(v);
7117        }
7118
7119        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7120        #[test]
7121        fn test_insert_vec_zeroed() {
7122            // Insert at start (no existing allocation).
7123            let mut v: Vec<u64> = Vec::new();
7124            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7125            assert_eq!(v.len(), 2);
7126            assert_eq!(&*v, &[0, 0]);
7127            drop(v);
7128
7129            // Insert at start.
7130            let mut v = vec![100u64, 200, 300];
7131            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7132            assert_eq!(v.len(), 5);
7133            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
7134            drop(v);
7135
7136            // Insert at middle.
7137            let mut v = vec![100u64, 200, 300];
7138            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7139            assert_eq!(v.len(), 4);
7140            assert_eq!(&*v, &[100, 0, 200, 300]);
7141            drop(v);
7142
7143            // Insert at end.
7144            let mut v = vec![100u64, 200, 300];
7145            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7146            assert_eq!(v.len(), 4);
7147            assert_eq!(&*v, &[100, 200, 300, 0]);
7148            drop(v);
7149        }
7150
7151        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7152        #[test]
7153        fn test_insert_vec_zeroed_zst() {
7154            // Insert at start (no existing fake allocation).
7155            let mut v: Vec<()> = Vec::new();
7156            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7157            assert_eq!(v.len(), 2);
7158            assert_eq!(&*v, &[(), ()]);
7159            drop(v);
7160
7161            // Insert at start.
7162            let mut v = vec![(), (), ()];
7163            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7164            assert_eq!(v.len(), 5);
7165            assert_eq!(&*v, &[(), (), (), (), ()]);
7166            drop(v);
7167
7168            // Insert at middle.
7169            let mut v = vec![(), (), ()];
7170            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7171            assert_eq!(v.len(), 4);
7172            assert_eq!(&*v, &[(), (), (), ()]);
7173            drop(v);
7174
7175            // Insert at end.
7176            let mut v = vec![(), (), ()];
7177            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7178            assert_eq!(v.len(), 4);
7179            assert_eq!(&*v, &[(), (), (), ()]);
7180            drop(v);
7181        }
7182
7183        #[test]
7184        fn test_new_box_zeroed() {
7185            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
7186        }
7187
7188        #[test]
7189        fn test_new_box_zeroed_array() {
7190            drop(<[u32; 0x1000]>::new_box_zeroed());
7191        }
7192
7193        #[test]
7194        fn test_new_box_zeroed_zst() {
7195            // This test exists in order to exercise unsafe code, especially
7196            // when running under Miri.
7197            #[allow(clippy::unit_cmp)]
7198            {
7199                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
7200            }
7201        }
7202
7203        #[test]
7204        fn test_new_box_zeroed_with_elems() {
7205            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
7206            assert_eq!(s.len(), 3);
7207            assert_eq!(&*s, &[0, 0, 0]);
7208            s[1] = 3;
7209            assert_eq!(&*s, &[0, 3, 0]);
7210        }
7211
7212        #[test]
7213        fn test_new_box_zeroed_with_elems_empty() {
7214            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
7215            assert_eq!(s.len(), 0);
7216        }
7217
7218        #[test]
7219        fn test_new_box_zeroed_with_elems_zst() {
7220            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
7221            assert_eq!(s.len(), 3);
7222            assert!(s.get(10).is_none());
7223            // This test exists in order to exercise unsafe code, especially
7224            // when running under Miri.
7225            #[allow(clippy::unit_cmp)]
7226            {
7227                assert_eq!(s[1], ());
7228            }
7229            s[2] = ();
7230        }
7231
7232        #[test]
7233        fn test_new_box_zeroed_with_elems_zst_empty() {
7234            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
7235            assert_eq!(s.len(), 0);
7236        }
7237
7238        #[test]
7239        fn new_box_zeroed_with_elems_errors() {
7240            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
7241
7242            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
7243            assert_eq!(
7244                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
7245                Err(AllocError)
7246            );
7247        }
7248    }
7249
7250    #[test]
7251    #[allow(deprecated)]
7252    fn test_deprecated_from_bytes() {
7253        let val = 0u32;
7254        let bytes = val.as_bytes();
7255
7256        assert!(u32::ref_from(bytes).is_some());
7257        // mut_from needs mut bytes
7258        let mut val = 0u32;
7259        let mut_bytes = val.as_mut_bytes();
7260        assert!(u32::mut_from(mut_bytes).is_some());
7261
7262        assert!(u32::read_from(bytes).is_some());
7263
7264        let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
7265        assert!(slc.is_empty());
7266        assert_eq!(rest.len(), 4);
7267
7268        let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
7269        assert!(slc.is_empty());
7270        assert_eq!(rest.len(), 4);
7271
7272        let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
7273        assert!(slc.is_empty());
7274        assert_eq!(rest.len(), 4);
7275
7276        let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
7277        assert!(slc.is_empty());
7278        assert_eq!(rest.len(), 4);
7279    }
7280
7281    #[test]
7282    fn test_try_ref_from_prefix_suffix() {
7283        use crate::util::testutil::Align;
7284        let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
7285        let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
7286        assert_eq!(*r, 0);
7287        assert_eq!(rest.len(), 0);
7288
7289        let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
7290        assert_eq!(*r, 0);
7291        assert_eq!(rest.len(), 0);
7292    }
7293
7294    #[test]
7295    fn test_raw_dangling() {
7296        use crate::util::AsAddress;
7297        let ptr: NonNull<u32> = u32::raw_dangling();
7298        assert_eq!(AsAddress::addr(ptr), 1);
7299
7300        let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
7301        assert_eq!(AsAddress::addr(ptr), 1);
7302    }
7303
7304    #[test]
7305    fn test_try_ref_from_prefix_with_elems() {
7306        use crate::util::testutil::Align;
7307        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7308        let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
7309        assert_eq!(r.len(), 2);
7310        assert_eq!(rest.len(), 0);
7311    }
7312
7313    #[test]
7314    fn test_try_ref_from_suffix_with_elems() {
7315        use crate::util::testutil::Align;
7316        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7317        let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
7318        assert_eq!(r.len(), 2);
7319        assert_eq!(rest.len(), 0);
7320    }
7321}