#![allow(unknown_lints, non_local_definitions, unexpected_cfgs)]
#![deny(renamed_and_removed_lints)]
#![deny(
anonymous_parameters,
deprecated_in_future,
late_bound_lifetime_arguments,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
path_statements,
patterns_in_fns_without_body,
rust_2018_idioms,
trivial_numeric_casts,
unreachable_pub,
unsafe_op_in_unsafe_fn,
unused_extern_crates,
// We intentionally choose not to deny `unused_qualifications`. When items
// are added to the prelude (e.g., `core::mem::size_of`), this has the
// consequence of making some uses trigger this lint on the latest toolchain
// (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
// does not work on older toolchains.
//
// We tested a more complicated fix in #1413, but ultimately decided that,
// since this lint is just a minor style lint, the complexity isn't worth it
// - it's fine to occasionally have unused qualifications slip through,
// especially since these do not affect our user-facing API in any way.
variant_size_differences
)]
#![cfg_attr(
__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
deny(fuzzy_provenance_casts, lossy_provenance_casts)
)]
#![deny(
clippy::all,
clippy::alloc_instead_of_core,
clippy::arithmetic_side_effects,
clippy::as_underscore,
clippy::assertions_on_result_states,
clippy::as_conversions,
clippy::correctness,
clippy::dbg_macro,
clippy::decimal_literal_representation,
clippy::get_unwrap,
clippy::indexing_slicing,
clippy::missing_inline_in_public_items,
clippy::missing_safety_doc,
clippy::obfuscated_if_else,
clippy::perf,
clippy::print_stdout,
clippy::std_instead_of_core,
clippy::style,
clippy::suspicious,
clippy::todo,
clippy::undocumented_unsafe_blocks,
clippy::unimplemented,
clippy::unnested_or_patterns,
clippy::unwrap_used,
clippy::use_debug
)]
#![deny(
rustdoc::bare_urls,
rustdoc::broken_intra_doc_links,
rustdoc::invalid_codeblock_attributes,
rustdoc::invalid_html_tags,
rustdoc::invalid_rust_codeblocks,
rustdoc::missing_crate_level_docs,
rustdoc::private_intra_doc_links
)]
#![cfg_attr(any(test, kani), allow(
// In tests, you get line numbers and have access to source code, so panic
// messages are less important. You also often unwrap a lot, which would
// make expect'ing instead very verbose.
clippy::unwrap_used,
// In tests, there's no harm to "panic risks" - the worst that can happen is
// that your test will fail, and you'll fix it. By contrast, panic risks in
// production code introduce the possibly of code panicking unexpectedly "in
// the field".
clippy::arithmetic_side_effects,
clippy::indexing_slicing,
))]
#![cfg_attr(not(test), no_std)]
#![cfg_attr(
all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
feature(stdarch_x86_avx512)
)]
#![cfg_attr(
all(feature = "simd-nightly", target_arch = "arm"),
feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
)]
#![cfg_attr(
all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
feature(stdarch_powerpc)
)]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(
__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
feature(layout_for_ptr, strict_provenance)
)]
#[cfg(any(feature = "derive", test))]
extern crate self as zerocopy;
#[macro_use]
mod macros;
#[cfg(feature = "byteorder")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
pub mod byteorder;
#[doc(hidden)]
pub mod macro_util;
mod post_monomorphization_compile_fail_tests;
mod util;
mod wrappers;
#[cfg(feature = "byteorder")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
pub use crate::byteorder::*;
pub use crate::wrappers::*;
#[cfg(any(feature = "derive", test))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
pub use zerocopy_derive::Unaligned;
#[cfg(any(feature = "derive", test))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
#[doc(hidden)]
pub use zerocopy_derive::KnownLayout;
use core::{
cell::{self, RefMut},
cmp::Ordering,
fmt::{self, Debug, Display, Formatter},
hash::Hasher,
marker::PhantomData,
mem::{self, ManuallyDrop, MaybeUninit},
num::{
NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
},
ops::{Deref, DerefMut},
ptr::{self, NonNull},
slice,
};
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
use alloc::{boxed::Box, vec::Vec};
#[cfg(any(feature = "alloc", kani))]
use core::alloc::Layout;
#[doc(hidden)]
pub use crate::util::ptr::Ptr;
#[allow(unused_imports)]
use crate::util::polyfills::NonNullExt as _;
#[rustversion::nightly]
#[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))]
const _: () = {
#[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""]
const _WARNING: () = ();
#[warn(deprecated)]
_WARNING
};
const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8;
#[doc(hidden)]
#[allow(missing_debug_implementations, missing_copy_implementations)]
#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
pub struct DstLayout {
align: NonZeroUsize,
size_info: SizeInfo,
}
#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
enum SizeInfo<E = usize> {
Sized { _size: usize },
SliceDst(TrailingSliceLayout<E>),
}
#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
struct TrailingSliceLayout<E = usize> {
_offset: usize,
_elem_size: E,
}
impl SizeInfo {
#[allow(unused)]
const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> {
Some(match *self {
SizeInfo::Sized { _size } => SizeInfo::Sized { _size },
SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
if let Some(_elem_size) = NonZeroUsize::new(_elem_size) {
SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
} else {
return None;
}
}
})
}
}
#[doc(hidden)]
#[derive(Copy, Clone)]
#[cfg_attr(test, derive(Debug))]
#[allow(missing_debug_implementations)]
pub enum _CastType {
_Prefix,
_Suffix,
}
impl DstLayout {
const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) {
Some(min_align) => min_align,
None => unreachable!(),
};
const THEORETICAL_MAX_ALIGN: NonZeroUsize =
match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) {
Some(max_align) => max_align,
None => unreachable!(),
};
#[cfg(not(kani))]
const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) {
Some(max_align) => max_align,
None => unreachable!(),
};
#[doc(hidden)]
#[inline]
pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout {
let align = match repr_align {
Some(align) => align,
None => Self::MIN_ALIGN,
};
assert!(align.is_power_of_two());
DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } }
}
#[doc(hidden)]
#[inline]
pub const fn for_type<T>() -> DstLayout {
DstLayout {
align: match NonZeroUsize::new(mem::align_of::<T>()) {
Some(align) => align,
None => unreachable!(),
},
size_info: SizeInfo::Sized { _size: mem::size_of::<T>() },
}
}
const fn for_slice<T>() -> DstLayout {
DstLayout {
align: match NonZeroUsize::new(mem::align_of::<T>()) {
Some(align) => align,
None => unreachable!(),
},
size_info: SizeInfo::SliceDst(TrailingSliceLayout {
_offset: 0,
_elem_size: mem::size_of::<T>(),
}),
}
}
#[doc(hidden)]
#[inline]
pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self {
use util::{core_layout::padding_needed_for, max, min};
let max_align = match repr_packed {
Some(max_align) => max_align,
None => Self::THEORETICAL_MAX_ALIGN,
};
assert!(max_align.is_power_of_two());
#[cfg(not(kani))]
{
debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
if let Some(repr_packed) = repr_packed {
debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
}
}
let field_align = min(field.align, max_align);
let align = max(self.align, field_align);
let size_info = match self.size_info {
SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."),
SizeInfo::Sized { _size: preceding_size } => {
let padding = padding_needed_for(preceding_size, field_align);
let offset = match preceding_size.checked_add(padding) {
Some(offset) => offset,
None => panic!("Adding padding to `self`'s size overflows `usize`."),
};
match field.size_info {
SizeInfo::Sized { _size: field_size } => {
let size = match offset.checked_add(field_size) {
Some(size) => size,
None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
};
SizeInfo::Sized { _size: size }
}
SizeInfo::SliceDst(TrailingSliceLayout {
_offset: trailing_offset,
_elem_size,
}) => {
let offset = match offset.checked_add(trailing_offset) {
Some(offset) => offset,
None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
};
SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size })
}
}
}
};
DstLayout { align, size_info }
}
#[doc(hidden)]
#[inline]
pub const fn pad_to_align(self) -> Self {
use util::core_layout::padding_needed_for;
let size_info = match self.size_info {
SizeInfo::Sized { _size: unpadded_size } => {
let padding = padding_needed_for(unpadded_size, self.align);
let size = match unpadded_size.checked_add(padding) {
Some(size) => size,
None => panic!("Adding padding caused size to overflow `usize`."),
};
SizeInfo::Sized { _size: size }
}
size_info @ SizeInfo::SliceDst(_) => size_info,
};
DstLayout { align: self.align, size_info }
}
#[allow(unused)]
const fn validate_cast_and_convert_metadata(
&self,
addr: usize,
bytes_len: usize,
cast_type: _CastType,
) -> Option<(usize, usize)> {
macro_rules! __debug_assert {
($e:expr $(, $msg:expr)?) => {
debug_assert!({
#[allow(clippy::arithmetic_side_effects)]
let e = $e;
e
} $(, $msg)?);
};
}
let size_info = match self.size_info.try_to_nonzero_elem_size() {
Some(size_info) => size_info,
None => panic!("attempted to cast to slice type with zero-sized element"),
};
__debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
{
let offset = match cast_type {
_CastType::_Prefix => 0,
_CastType::_Suffix => bytes_len,
};
#[allow(clippy::arithmetic_side_effects)]
if (addr + offset) % self.align.get() != 0 {
return None;
}
}
let (elems, self_bytes) = match size_info {
SizeInfo::Sized { _size: size } => {
if size > bytes_len {
return None;
}
(0, size)
}
SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => {
let max_total_bytes =
util::round_down_to_next_multiple_of_alignment(bytes_len, self.align);
let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) {
Some(max) => max,
None => return None,
};
#[allow(clippy::arithmetic_side_effects)]
let elems = max_slice_and_padding_bytes / elem_size.get();
#[allow(clippy::arithmetic_side_effects)]
let without_padding = offset + elems * elem_size.get();
#[allow(clippy::arithmetic_side_effects)]
let self_bytes = without_padding
+ util::core_layout::padding_needed_for(without_padding, self.align);
(elems, self_bytes)
}
};
__debug_assert!(self_bytes <= bytes_len);
let split_at = match cast_type {
_CastType::_Prefix => self_bytes,
#[allow(clippy::arithmetic_side_effects)]
_CastType::_Suffix => bytes_len - self_bytes,
};
Some((elems, split_at))
}
}
#[doc(hidden)] pub unsafe trait KnownLayout {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
#[doc(hidden)]
const LAYOUT: DstLayout;
#[doc(hidden)]
fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>;
}
unsafe impl<T: KnownLayout> KnownLayout for [T] {
#[allow(clippy::missing_inline_in_public_items)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized,
{
}
const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
#[inline(always)]
fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
#[allow(unstable_name_collisions)]
NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
}
}
#[rustfmt::skip]
impl_known_layout!(
(),
u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
bool, char,
NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
);
#[rustfmt::skip]
impl_known_layout!(
T => Option<T>,
T: ?Sized => PhantomData<T>,
T => Wrapping<T>,
T => MaybeUninit<T>,
T: ?Sized => *const T,
T: ?Sized => *mut T,
);
impl_known_layout!(const N: usize, T => [T; N]);
safety_comment! {
unsafe_impl_known_layout!(#[repr([u8])] str);
unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
}
#[cfg(any(feature = "derive", test))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
pub use zerocopy_derive::FromZeroes;
#[doc(hidden)]
pub unsafe trait TryFromBytes {
#[doc(hidden)]
unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool;
#[inline]
#[doc(hidden)] fn try_from_ref(bytes: &[u8]) -> Option<&Self>
where
Self: KnownLayout,
{
let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?;
if unsafe { !Self::is_bit_valid(maybe_self) } {
return None;
}
Some(unsafe { maybe_self.as_ref() })
}
}
#[cfg_attr(
feature = "derive",
doc = "[derive]: zerocopy_derive::FromZeroes",
doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis"
)]
#[cfg_attr(
not(feature = "derive"),
doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"),
doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"),
)]
pub unsafe trait FromZeroes {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
#[inline(always)]
fn zero(&mut self) {
let slf: *mut Self = self;
let len = mem::size_of_val(self);
unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
}
#[inline(always)]
fn new_zeroed() -> Self
where
Self: Sized,
{
unsafe { mem::zeroed() }
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[inline]
fn new_box_zeroed() -> Box<Self>
where
Self: Sized,
{
let layout = Layout::new::<Self>();
if layout.size() == 0 {
return Box::new(Self::new_zeroed());
}
#[allow(clippy::undocumented_unsafe_blocks)]
let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
Box::from_raw(ptr)
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[inline]
fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
where
Self: Sized,
{
let size = mem::size_of::<Self>()
.checked_mul(len)
.expect("mem::size_of::<Self>() * len overflows `usize`");
let align = mem::align_of::<Self>();
#[allow(clippy::as_conversions)]
let max_alloc = (isize::MAX as usize).saturating_sub(align);
assert!(size <= max_alloc);
let layout =
Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
let ptr = if layout.size() != 0 {
#[allow(clippy::undocumented_unsafe_blocks)]
let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
ptr
} else {
NonNull::<Self>::dangling().as_ptr()
};
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
Box::from_raw(slice::from_raw_parts_mut(ptr, len))
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))]
#[inline(always)]
fn new_vec_zeroed(len: usize) -> Vec<Self>
where
Self: Sized,
{
Self::new_box_slice_zeroed(len).into()
}
}
#[cfg(any(feature = "derive", test))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
pub use zerocopy_derive::FromBytes;
#[cfg_attr(
feature = "derive",
doc = "[derive]: zerocopy_derive::FromBytes",
doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
)]
#[cfg_attr(
not(feature = "derive"),
doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
)]
pub unsafe trait FromBytes: FromZeroes {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
#[inline]
fn ref_from(bytes: &[u8]) -> Option<&Self>
where
Self: Sized,
{
Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref)
}
#[inline]
fn ref_from_prefix(bytes: &[u8]) -> Option<&Self>
where
Self: Sized,
{
Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref())
}
#[inline]
fn ref_from_suffix(bytes: &[u8]) -> Option<&Self>
where
Self: Sized,
{
Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref())
}
#[inline]
fn mut_from(bytes: &mut [u8]) -> Option<&mut Self>
where
Self: Sized + AsBytes,
{
Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut)
}
#[inline]
fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self>
where
Self: Sized + AsBytes,
{
Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut())
}
#[inline]
fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self>
where
Self: Sized + AsBytes,
{
Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut())
}
#[inline]
fn slice_from(bytes: &[u8]) -> Option<&[Self]>
where
Self: Sized,
{
Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice())
}
#[inline]
fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])>
where
Self: Sized,
{
Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b))
}
#[inline]
fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])>
where
Self: Sized,
{
Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice()))
}
#[inline]
fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]>
where
Self: Sized + AsBytes,
{
Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice())
}
#[inline]
fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
where
Self: Sized + AsBytes,
{
Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b))
}
#[inline]
fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
where
Self: Sized + AsBytes,
{
Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice()))
}
#[inline]
fn read_from(bytes: &[u8]) -> Option<Self>
where
Self: Sized,
{
Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner())
}
#[inline]
fn read_from_prefix(bytes: &[u8]) -> Option<Self>
where
Self: Sized,
{
Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)
.map(|(r, _)| r.read().into_inner())
}
#[inline]
fn read_from_suffix(bytes: &[u8]) -> Option<Self>
where
Self: Sized,
{
Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)
.map(|(_, r)| r.read().into_inner())
}
}
#[cfg(any(feature = "derive", test))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
pub use zerocopy_derive::AsBytes;
#[cfg_attr(
feature = "derive",
doc = "[derive]: zerocopy_derive::AsBytes",
doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis"
)]
#[cfg_attr(
not(feature = "derive"),
doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"),
doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"),
)]
pub unsafe trait AsBytes {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
#[inline(always)]
fn as_bytes(&self) -> &[u8] {
let len = mem::size_of_val(self);
let slf: *const Self = self;
unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
}
#[inline(always)]
fn as_bytes_mut(&mut self) -> &mut [u8]
where
Self: FromBytes,
{
let len = mem::size_of_val(self);
let slf: *mut Self = self;
unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
}
#[inline]
fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
if bytes.len() != mem::size_of_val(self) {
return None;
}
bytes.copy_from_slice(self.as_bytes());
Some(())
}
#[inline]
fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
let size = mem::size_of_val(self);
bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
Some(())
}
#[inline]
fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
let start = bytes.len().checked_sub(mem::size_of_val(self))?;
bytes
.get_mut(start..)
.expect("`start` should be in-bounds of `bytes`")
.copy_from_slice(self.as_bytes());
Some(())
}
}
pub unsafe trait Unaligned {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
}
safety_comment! {
unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_unaligned!(());
}
safety_comment! {
unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_unaligned!(u8, i8);
unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
}
safety_comment! {
unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
assert_unaligned!(bool);
unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2);
}
safety_comment! {
unsafe_impl!(char: FromZeroes, AsBytes);
unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some());
}
safety_comment! {
unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok());
}
safety_comment! {
unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
assert_unaligned!(NonZeroU8, NonZeroI8);
unsafe_impl!(NonZeroU16: AsBytes);
unsafe_impl!(NonZeroI16: AsBytes);
unsafe_impl!(NonZeroU32: AsBytes);
unsafe_impl!(NonZeroI32: AsBytes);
unsafe_impl!(NonZeroU64: AsBytes);
unsafe_impl!(NonZeroI64: AsBytes);
unsafe_impl!(NonZeroU128: AsBytes);
unsafe_impl!(NonZeroI128: AsBytes);
unsafe_impl!(NonZeroUsize: AsBytes);
unsafe_impl!(NonZeroIsize: AsBytes);
unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0);
unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0);
unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0);
unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0);
unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0);
unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0);
unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0);
unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0);
unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0);
unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0);
unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0);
unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0);
}
safety_comment! {
unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
}
safety_comment! {
#[cfg(feature = "alloc")]
unsafe_impl!(
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
T => FromZeroes for Option<Box<T>>
);
unsafe_impl!(T => FromZeroes for Option<&'_ T>);
unsafe_impl!(T => FromZeroes for Option<&'_ mut T>);
unsafe_impl!(T => FromZeroes for Option<NonNull<T>>);
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...));
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...));
}
safety_comment! {
unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
}
safety_comment! {
unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| {
unsafe { T::is_bit_valid(candidate) }
});
unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
assert_unaligned!(Wrapping<()>, Wrapping<u8>);
}
safety_comment! {
unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>);
unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
}
safety_comment! {
unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
}
safety_comment! {
unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| {
c.iter().all(|elem|
unsafe { <T as TryFromBytes>::is_bit_valid(elem) }
)
});
unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
unsafe_impl!(T: FromBytes => FromBytes for [T]);
unsafe_impl!(T: AsBytes => AsBytes for [T]);
unsafe_impl!(T: Unaligned => Unaligned for [T]);
}
safety_comment! {
unsafe_impl!(T => FromZeroes for *const T);
unsafe_impl!(T => FromZeroes for *mut T);
}
#[cfg(feature = "simd")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
mod simd {
#[allow(unused_macros)] macro_rules! simd_arch_mod {
(#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => {
#[cfg $cfg]
#[cfg_attr(doc_cfg, doc(cfg $cfg))]
mod $mod {
use core::arch::$arch::{$($typ),*};
use crate::*;
impl_known_layout!($($typ),*);
safety_comment! {
$( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )*
}
}
};
}
#[rustfmt::skip]
const _: () = {
simd_arch_mod!(
#[cfg(target_arch = "x86")]
x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
);
simd_arch_mod!(
#[cfg(target_arch = "x86_64")]
x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
);
simd_arch_mod!(
#[cfg(target_arch = "wasm32")]
wasm32, wasm32, v128
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
);
simd_arch_mod!(
#[cfg(target_arch = "aarch64")]
aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
uint64x1_t, uint64x2_t
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
arm, arm, int8x4_t, uint8x4_t
);
};
}
#[macro_export]
macro_rules! transmute {
($e:expr) => {{
let e = $e;
if false {
struct AssertIsAsBytes<T: $crate::AsBytes>(T);
let _ = AssertIsAsBytes(e);
struct AssertIsFromBytes<U: $crate::FromBytes>(U);
#[allow(unused, unreachable_code)]
let u = AssertIsFromBytes(loop {});
u.0
} else {
unsafe {
#[allow(clippy::useless_transmute, clippy::missing_transmute_annotations)]
$crate::macro_util::core_reexport::mem::transmute(e)
}
}
}}
}
#[macro_export]
macro_rules! transmute_ref {
($e:expr) => {{
let e: &_ = $e;
#[allow(unused, clippy::diverging_sub_expression)]
if false {
struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
let _ = AssertIsAsBytes(e);
struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U);
#[allow(unused, unreachable_code)]
let u = AssertIsFromBytes(loop {});
u.0
} else if false {
let mut t = unreachable!();
e = &t;
let u;
$crate::assert_size_eq!(t, u);
$crate::assert_align_gt_eq!(t, u);
&u
} else {
unsafe { $crate::macro_util::transmute_ref(e) }
}
}}
}
#[macro_export]
macro_rules! transmute_mut {
($e:expr) => {{
let e: &mut _ = $e;
#[allow(unused, clippy::diverging_sub_expression)]
if false {
struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
if true {
let _ = AssertSrcIsFromBytes(&*e);
} else {
let _ = AssertSrcIsAsBytes(&*e);
}
if true {
#[allow(unused, unreachable_code)]
let u = AssertDstIsFromBytes(loop {});
&mut *u.0
} else {
#[allow(unused, unreachable_code)]
let u = AssertDstIsAsBytes(loop {});
&mut *u.0
}
} else if false {
let mut t = unreachable!();
e = &mut t;
let u;
$crate::assert_size_eq!(t, u);
$crate::assert_align_gt_eq!(t, u);
&mut u
} else {
unsafe { $crate::macro_util::transmute_mut(e) }
}
}}
}
#[doc(alias("include_bytes", "include_data", "include_type"))]
#[macro_export]
macro_rules! include_value {
($file:expr $(,)?) => {
$crate::transmute!(*::core::include_bytes!($file))
};
}
pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
#[doc(hidden)]
pub type LayoutVerified<B, T> = Ref<B, T>;
impl<B, T> Ref<B, T>
where
B: ByteSlice,
{
#[inline]
pub fn new(bytes: B) -> Option<Ref<B, T>> {
if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some(Ref(bytes, PhantomData))
}
#[inline]
pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
Some((Ref(bytes, PhantomData), suffix))
}
#[inline]
pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
let bytes_len = bytes.len();
let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
let (prefix, bytes) = bytes.split_at(split_at);
if !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some((prefix, Ref(bytes, PhantomData)))
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
{
#[inline]
pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
let remainder = bytes
.len()
.checked_rem(mem::size_of::<T>())
.expect("Ref::new_slice called on a zero-sized type");
if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some(Ref(bytes, PhantomData))
}
#[inline]
pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
if bytes.len() < expected_len {
return None;
}
let (prefix, bytes) = bytes.split_at(expected_len);
Self::new_slice(prefix).map(move |l| (l, bytes))
}
#[inline]
pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
let split_at = bytes.len().checked_sub(expected_len)?;
let (bytes, suffix) = bytes.split_at(split_at);
Self::new_slice(suffix).map(move |l| (bytes, l))
}
}
fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
match opt {
Some(mut r) => {
r.0.fill(0);
Some(r)
}
None => None,
}
}
fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
opt: Option<(Ref<B, T>, B)>,
) -> Option<(Ref<B, T>, B)> {
match opt {
Some((mut r, rest)) => {
r.0.fill(0);
Some((r, rest))
}
None => None,
}
}
fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
opt: Option<(B, Ref<B, T>)>,
) -> Option<(B, Ref<B, T>)> {
map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
}
impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
{
#[inline(always)]
pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
map_zeroed(Self::new(bytes))
}
#[inline(always)]
pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
}
#[inline(always)]
pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
{
#[inline(always)]
pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
map_zeroed(Self::new_slice(bytes))
}
#[inline(always)]
pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
}
#[inline(always)]
pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
}
}
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: Unaligned,
{
#[inline(always)]
pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
Ref::new(bytes)
}
#[inline(always)]
pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
Ref::new_from_prefix(bytes)
}
#[inline(always)]
pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
Ref::new_from_suffix(bytes)
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: Unaligned,
{
#[inline(always)]
pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
Ref::new_slice(bytes)
}
#[inline(always)]
pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
Ref::new_slice_from_prefix(bytes, count)
}
#[inline(always)]
pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
Ref::new_slice_from_suffix(bytes, count)
}
}
impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: Unaligned,
{
#[inline(always)]
pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
map_zeroed(Self::new_unaligned(bytes))
}
#[inline(always)]
pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
}
#[inline(always)]
pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
T: Unaligned,
{
#[inline(always)]
pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
map_zeroed(Self::new_slice_unaligned(bytes))
}
#[inline(always)]
pub fn new_slice_unaligned_from_prefix_zeroed(
bytes: B,
count: usize,
) -> Option<(Ref<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
}
#[inline(always)]
pub fn new_slice_unaligned_from_suffix_zeroed(
bytes: B,
count: usize,
) -> Option<(B, Ref<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
}
}
impl<'a, B, T> Ref<B, T>
where
B: 'a + ByteSlice,
T: FromBytes,
{
#[inline(always)]
pub fn into_ref(self) -> &'a T {
assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
unsafe { self.deref_helper() }
}
}
impl<'a, B, T> Ref<B, T>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline(always)]
pub fn into_mut(mut self) -> &'a mut T {
assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
unsafe { self.deref_mut_helper() }
}
}
impl<'a, B, T> Ref<B, [T]>
where
B: 'a + ByteSlice,
T: FromBytes,
{
#[inline(always)]
pub fn into_slice(self) -> &'a [T] {
assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
unsafe { self.deref_slice_helper() }
}
}
impl<'a, B, T> Ref<B, [T]>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline(always)]
pub fn into_mut_slice(mut self) -> &'a mut [T] {
assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
unsafe { self.deref_mut_slice_helper() }
}
}
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
{
unsafe fn deref_helper<'a>(&self) -> &'a T {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
&*self.0.as_ptr().cast::<T>()
}
}
}
impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
&mut *self.0.as_mut_ptr().cast::<T>()
}
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
{
unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
#[allow(clippy::arithmetic_side_effects)]
let elems = {
debug_assert_eq!(len % elem_size, 0);
len / elem_size
};
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems)
}
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
#[allow(clippy::arithmetic_side_effects)]
let elems = {
debug_assert_eq!(len % elem_size, 0);
len / elem_size
};
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
}
}
}
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: ?Sized,
{
#[inline]
pub fn bytes(&self) -> &[u8] {
&self.0
}
}
impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: ?Sized,
{
#[inline]
pub fn bytes_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
{
#[inline]
pub fn read(&self) -> T {
unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
}
}
impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: AsBytes,
{
#[inline]
pub fn write(&mut self, t: T) {
unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) }
}
}
impl<B, T> Deref for Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { self.deref_helper() }
}
}
impl<B, T> DerefMut for Ref<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { self.deref_mut_helper() }
}
}
impl<B, T> Deref for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
{
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe { self.deref_slice_helper() }
}
}
impl<B, T> DerefMut for Ref<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe { self.deref_mut_slice_helper() }
}
}
impl<T, B> Display for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Display,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &T = self;
inner.fmt(fmt)
}
}
impl<T, B> Display for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
[T]: Display,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &[T] = self;
inner.fmt(fmt)
}
}
impl<T, B> Debug for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Debug,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &T = self;
fmt.debug_tuple("Ref").field(&inner).finish()
}
}
impl<T, B> Debug for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Debug,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &[T] = self;
fmt.debug_tuple("Ref").field(&inner).finish()
}
}
impl<T, B> Eq for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
impl<T, B> Eq for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
impl<T, B> PartialEq for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + PartialEq,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T, B> PartialEq for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialEq,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T, B> Ord for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
let inner: &T = self;
let other_inner: &T = other;
inner.cmp(other_inner)
}
}
impl<T, B> Ord for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
let inner: &[T] = self;
let other_inner: &[T] = other;
inner.cmp(other_inner)
}
}
impl<T, B> PartialOrd for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let inner: &T = self;
let other_inner: &T = other;
inner.partial_cmp(other_inner)
}
}
impl<T, B> PartialOrd for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let inner: &[T] = self;
let other_inner: &[T] = other;
inner.partial_cmp(other_inner)
}
}
mod sealed {
pub trait ByteSliceSealed {}
}
#[allow(clippy::missing_safety_doc)] #[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
#[cfg_attr(
not(feature = "alloc"),
doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
)]
pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + sealed::ByteSliceSealed {
#[doc(hidden)]
const INTO_REF_INTO_MUT_ARE_SOUND: bool;
#[inline]
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self);
}
#[allow(clippy::missing_safety_doc)] pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
#[inline]
fn as_mut_ptr(&mut self) -> *mut u8 {
<[u8]>::as_mut_ptr(self)
}
}
impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a [u8] {
const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at(self, mid)
}
}
impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a mut [u8] {
const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at_mut(self, mid)
}
}
impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716")
} else {
false
};
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
}
}
impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716")
} else {
false
};
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
mod alloc_support {
use alloc::vec::Vec;
use super::*;
#[inline(always)]
pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
insert_vec_zeroed(v, v.len(), additional);
}
#[inline]
pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
assert!(position <= v.len());
v.reserve(additional);
unsafe {
let ptr = v.as_mut_ptr();
#[allow(clippy::arithmetic_side_effects)]
ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
ptr.add(position).write_bytes(0, additional);
#[allow(clippy::arithmetic_side_effects)]
v.set_len(v.len() + additional);
}
}
#[cfg(test)]
mod tests {
use core::convert::TryFrom as _;
use super::*;
#[test]
fn test_extend_vec_zeroed() {
let mut v = vec![100u64, 200, 300];
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 6);
assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
drop(v);
let mut v: Vec<u64> = Vec::new();
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 3);
assert_eq!(&*v, &[0, 0, 0]);
drop(v);
}
#[test]
fn test_extend_vec_zeroed_zst() {
let mut v = vec![(), (), ()];
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 6);
assert_eq!(&*v, &[(), (), (), (), (), ()]);
drop(v);
let mut v: Vec<()> = Vec::new();
extend_vec_zeroed(&mut v, 3);
assert_eq!(&*v, &[(), (), ()]);
drop(v);
}
#[test]
fn test_insert_vec_zeroed() {
let mut v: Vec<u64> = Vec::new();
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 2);
assert_eq!(&*v, &[0, 0]);
drop(v);
let mut v = vec![100u64, 200, 300];
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 5);
assert_eq!(&*v, &[0, 0, 100, 200, 300]);
drop(v);
let mut v = vec![100u64, 200, 300];
insert_vec_zeroed(&mut v, 1, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[100, 0, 200, 300]);
drop(v);
let mut v = vec![100u64, 200, 300];
insert_vec_zeroed(&mut v, 3, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[100, 200, 300, 0]);
drop(v);
}
#[test]
fn test_insert_vec_zeroed_zst() {
let mut v: Vec<()> = Vec::new();
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 2);
assert_eq!(&*v, &[(), ()]);
drop(v);
let mut v = vec![(), (), ()];
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 5);
assert_eq!(&*v, &[(), (), (), (), ()]);
drop(v);
let mut v = vec![(), (), ()];
insert_vec_zeroed(&mut v, 1, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[(), (), (), ()]);
drop(v);
let mut v = vec![(), (), ()];
insert_vec_zeroed(&mut v, 3, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[(), (), (), ()]);
drop(v);
}
#[test]
fn test_new_box_zeroed() {
assert_eq!(*u64::new_box_zeroed(), 0);
}
#[test]
fn test_new_box_zeroed_array() {
drop(<[u32; 0x1000]>::new_box_zeroed());
}
#[test]
fn test_new_box_zeroed_zst() {
#[allow(clippy::unit_cmp)]
{
assert_eq!(*<()>::new_box_zeroed(), ());
}
}
#[test]
fn test_new_box_slice_zeroed() {
let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
assert_eq!(s.len(), 3);
assert_eq!(&*s, &[0, 0, 0]);
s[1] = 3;
assert_eq!(&*s, &[0, 3, 0]);
}
#[test]
fn test_new_box_slice_zeroed_empty() {
let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
assert_eq!(s.len(), 0);
}
#[test]
fn test_new_box_slice_zeroed_zst() {
let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
assert_eq!(s.len(), 3);
assert!(s.get(10).is_none());
#[allow(clippy::unit_cmp)]
{
assert_eq!(s[1], ());
}
s[2] = ();
}
#[test]
fn test_new_box_slice_zeroed_zst_empty() {
let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
assert_eq!(s.len(), 0);
}
#[test]
#[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
fn test_new_box_slice_zeroed_panics_mul_overflow() {
let _ = u16::new_box_slice_zeroed(usize::MAX);
}
#[test]
#[should_panic(expected = "assertion failed: size <= max_alloc")]
fn test_new_box_slice_zeroed_panics_isize_overflow() {
let max = usize::try_from(isize::MAX).unwrap();
let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
}
}
}
#[cfg(feature = "alloc")]
#[doc(inline)]
pub use alloc_support::*;
#[cfg(test)]
mod tests {
#![allow(clippy::unreadable_literal)]
use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref};
use static_assertions::assert_impl_all;
use super::*;
use crate::util::testutil::*;
#[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
#[repr(transparent)]
struct Unsized([u8]);
impl Unsized {
fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
unsafe { mem::transmute(slc) }
}
}
#[allow(clippy::decimal_literal_representation)]
#[test]
fn test_dst_layout_extend_sized_with_sized() {
macro_rules! test_align_is_size {
($n:expr) => {
let base = DstLayout::for_type::<u8>();
let trailing_field = DstLayout::for_type::<elain::Align<$n>>();
let packs =
core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p))));
for pack in packs {
let composite = base.extend(trailing_field, pack);
let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN);
let align = $n.min(max_align.get());
assert_eq!(
composite,
DstLayout {
align: NonZeroUsize::new(align).unwrap(),
size_info: SizeInfo::Sized { _size: align }
}
)
}
};
}
test_align_is_size!(1);
test_align_is_size!(2);
test_align_is_size!(4);
test_align_is_size!(8);
test_align_is_size!(16);
test_align_is_size!(32);
test_align_is_size!(64);
test_align_is_size!(128);
test_align_is_size!(256);
test_align_is_size!(512);
test_align_is_size!(1024);
test_align_is_size!(2048);
test_align_is_size!(4096);
test_align_is_size!(8192);
test_align_is_size!(16384);
test_align_is_size!(32768);
test_align_is_size!(65536);
test_align_is_size!(131072);
test_align_is_size!(262144);
test_align_is_size!(524288);
test_align_is_size!(1048576);
test_align_is_size!(2097152);
test_align_is_size!(4194304);
test_align_is_size!(8388608);
test_align_is_size!(16777216);
test_align_is_size!(33554432);
test_align_is_size!(67108864);
test_align_is_size!(33554432);
test_align_is_size!(134217728);
test_align_is_size!(268435456);
}
#[test]
fn test_dst_layout_extend_sized_with_dst() {
let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap());
let packs = core::iter::once(None).chain(aligns.clone().map(Some));
for align in aligns {
for pack in packs.clone() {
let base = DstLayout::for_type::<u8>();
let elem_size = 42;
let trailing_field_offset = 11;
let trailing_field = DstLayout {
align,
size_info: SizeInfo::SliceDst(TrailingSliceLayout {
_elem_size: elem_size,
_offset: 11,
}),
};
let composite = base.extend(trailing_field, pack);
let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get();
let align = align.get().min(max_align);
assert_eq!(
composite,
DstLayout {
align: NonZeroUsize::new(align).unwrap(),
size_info: SizeInfo::SliceDst(TrailingSliceLayout {
_elem_size: elem_size,
_offset: align + trailing_field_offset,
}),
}
)
}
}
}
#[test]
fn test_dst_layout_pad_to_align_with_sized() {
for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } };
assert_eq!(
layout.pad_to_align(),
DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } }
);
}
macro_rules! test {
(unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr }
=> padded { size: $padded_size:expr, align: $padded_align:expr }) => {
let unpadded = DstLayout {
align: NonZeroUsize::new($unpadded_align).unwrap(),
size_info: SizeInfo::Sized { _size: $unpadded_size },
};
let padded = unpadded.pad_to_align();
assert_eq!(
padded,
DstLayout {
align: NonZeroUsize::new($padded_align).unwrap(),
size_info: SizeInfo::Sized { _size: $padded_size },
}
);
};
}
test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 });
test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 });
test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 });
test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 });
test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 });
test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 });
test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 });
test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 });
test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 });
let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get();
test!(unpadded { size: 1, align: current_max_align }
=> padded { size: current_max_align, align: current_max_align });
test!(unpadded { size: current_max_align + 1, align: current_max_align }
=> padded { size: current_max_align * 2, align: current_max_align });
}
#[test]
fn test_dst_layout_pad_to_align_with_dst() {
for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
for offset in 0..10 {
for elem_size in 0..10 {
let layout = DstLayout {
align,
size_info: SizeInfo::SliceDst(TrailingSliceLayout {
_offset: offset,
_elem_size: elem_size,
}),
};
assert_eq!(layout.pad_to_align(), layout);
}
}
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn testvalidate_cast_and_convert_metadata() {
impl From<usize> for SizeInfo {
fn from(_size: usize) -> SizeInfo {
SizeInfo::Sized { _size }
}
}
impl From<(usize, usize)> for SizeInfo {
fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo {
SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
}
}
fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout {
DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() }
}
macro_rules! test {
($(:$sizes:expr =>)?
layout($size:tt, $align:tt)
.validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
) => {
itertools::iproduct!(
test!(@generate_size $size),
test!(@generate_align $align),
test!(@generate_usize $addr),
test!(@generate_usize $bytes_len),
test!(@generate_cast_type $cast_type)
).for_each(|(size_info, align, addr, bytes_len, cast_type)| {
let previous_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(|_| {}));
let actual = std::panic::catch_unwind(|| {
layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
}).map_err(|d| {
*d.downcast::<&'static str>().expect("expected string panic message").as_ref()
});
std::panic::set_hook(previous_hook);
assert_matches::assert_matches!(
actual, $expect,
"layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
);
});
};
(@generate_usize _) => { 0..8 };
(@generate_size _) => {
test!(@generate_size (_)).chain(test!(@generate_size (_, _)))
};
(@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => {
test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes))
};
(@generate_size (_)) => { test!(@generate_size (0..8)) };
(@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) };
(@generate_size ($min_sizes:tt, $elem_sizes:tt)) => {
itertools::iproduct!(
test!(@generate_min_size $min_sizes),
test!(@generate_elem_size $elem_sizes)
).map(Into::<SizeInfo>::into)
};
(@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) };
(@generate_min_size _) => { 0..8 };
(@generate_elem_size _) => { 1..8 };
(@generate_align _) => { [1, 2, 4, 8, 16] };
(@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
(@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
(@generate_cast_type $variant:ident) => { [_CastType::$variant] };
(@$_:ident ($vals:expr)) => { $vals };
(@$_:ident $vals:expr) => { $vals };
}
const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14];
const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15];
test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None));
test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None));
test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
mod msgs {
pub(super) const TRAILING: &str =
"attempted to cast to slice type with zero-sized element";
pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
}
test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),);
test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW));
test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
test!(
layout(_, _).validate(
[usize::MAX / 2 + 1, usize::MAX],
[usize::MAX / 2 + 1, usize::MAX],
_
),
Err(msgs::OVERFLOW)
);
fn validate_behavior(
(layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
) {
if let Some((elems, split_at)) =
layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
{
let (size_info, align) = (layout.size_info, layout.align);
let debug_str = format!(
"layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
);
let sized = matches!(layout.size_info, SizeInfo::Sized { .. });
assert!(!(sized && elems != 0), "{}", debug_str);
let resulting_size = match layout.size_info {
SizeInfo::Sized { _size } => _size,
SizeInfo::SliceDst(TrailingSliceLayout {
_offset: offset,
_elem_size: elem_size,
}) => {
let padded_size = |elems| {
let without_padding = offset + elems * elem_size;
without_padding
+ util::core_layout::padding_needed_for(without_padding, align)
};
let resulting_size = padded_size(elems);
assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str);
resulting_size
}
};
assert!(resulting_size <= bytes_len, "{}", debug_str);
match cast_type {
_CastType::_Prefix => {
assert_eq!(addr % align, 0, "{}", debug_str);
assert_eq!(resulting_size, split_at, "{}", debug_str);
}
_CastType::_Suffix => {
assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
}
}
} else {
let min_size = match layout.size_info {
SizeInfo::Sized { _size } => _size,
SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => {
_offset + util::core_layout::padding_needed_for(_offset, layout.align)
}
};
let insufficient_bytes = bytes_len < min_size;
let base = match cast_type {
_CastType::_Prefix => 0,
_CastType::_Suffix => bytes_len,
};
let misaligned = (base + addr) % layout.align != 0;
assert!(insufficient_bytes || misaligned);
}
}
let sizes = 0..8;
let elem_sizes = 1..8;
let size_infos = sizes
.clone()
.map(Into::<SizeInfo>::into)
.chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into));
let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32])
.filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0))
.map(|(size_info, align)| layout(size_info, align));
itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
.for_each(validate_behavior);
}
#[test]
#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
fn test_validate_rust_layout() {
use core::ptr::NonNull;
#[derive(Debug)]
struct MacroArgs {
offset: usize,
align: NonZeroUsize,
elem_size: Option<usize>,
}
fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>(
args: MacroArgs,
with_elems: W,
addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>,
) {
let dst = args.elem_size.is_some();
let layout = {
let size_info = match args.elem_size {
Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
_offset: args.offset,
_elem_size: elem_size,
}),
None => SizeInfo::Sized {
_size: args.offset
+ util::core_layout::padding_needed_for(args.offset, args.align),
},
};
DstLayout { size_info, align: args.align }
};
for elems in 0..128 {
let ptr = with_elems(elems);
if let Some(addr_of_slice_field) = addr_of_slice_field {
let slc_field_ptr = addr_of_slice_field(ptr).as_ptr();
let offset: usize =
unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() };
assert_eq!(offset, args.offset);
}
let (size, align) = unsafe {
(mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr()))
};
let assert_msg = if !cfg!(miri) {
format!("\n{args:?}\nsize:{size}, align:{align}")
} else {
String::new()
};
let without_padding =
args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0);
assert!(size >= without_padding, "{}", assert_msg);
assert_eq!(align, args.align.get(), "{}", assert_msg);
let expected_size = without_padding
+ util::core_layout::padding_needed_for(without_padding, args.align);
assert_eq!(expected_size, size, "{}", assert_msg);
if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) {
let addr = ptr.addr().get();
let (got_elems, got_split_at) = layout
.validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix)
.unwrap();
let assert_msg = if !cfg!(miri) {
format!(
"{}\nvalidate_cast_and_convert_metadata({addr}, {size})",
assert_msg
)
} else {
String::new()
};
assert_eq!(got_split_at, size, "{}", assert_msg);
if dst {
assert!(got_elems >= elems, "{}", assert_msg);
if got_elems != elems {
let got_ptr = with_elems(got_elems);
let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) };
assert_eq!(size_of_got_ptr, size, "{}", assert_msg);
}
} else {
assert_eq!(got_elems, 0, "{}", assert_msg)
}
}
}
}
macro_rules! validate_against_rust {
($offset:literal, $align:literal $(, $elem_size:literal)?) => {{
#[repr(C, align($align))]
struct Foo([u8; $offset]$(, [[u8; $elem_size]])?);
let args = MacroArgs {
offset: $offset,
align: $align.try_into().unwrap(),
elem_size: {
#[allow(unused)]
let ret = None::<usize>;
$(let ret = Some($elem_size);)?
ret
}
};
#[repr(C, align($align))]
struct FooAlign;
let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]);
let with_elems = |elems| {
let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems);
#[allow(clippy::as_conversions)]
NonNull::new(slc.as_ptr() as *mut Foo).unwrap()
};
let addr_of_slice_field = {
#[allow(unused)]
let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>;
$(
let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe {
NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>()
});
let _ = $elem_size;
)?
f
};
test::<Foo, _>(args, with_elems, addr_of_slice_field);
}};
}
validate_against_rust!(0, 1);
validate_against_rust!(0, 1, 0);
validate_against_rust!(0, 1, 1);
validate_against_rust!(0, 1, 2);
validate_against_rust!(0, 1, 3);
validate_against_rust!(0, 1, 4);
validate_against_rust!(0, 2);
validate_against_rust!(0, 2, 0);
validate_against_rust!(0, 2, 1);
validate_against_rust!(0, 2, 2);
validate_against_rust!(0, 2, 3);
validate_against_rust!(0, 2, 4);
validate_against_rust!(0, 4);
validate_against_rust!(0, 4, 0);
validate_against_rust!(0, 4, 1);
validate_against_rust!(0, 4, 2);
validate_against_rust!(0, 4, 3);
validate_against_rust!(0, 4, 4);
validate_against_rust!(0, 8);
validate_against_rust!(0, 8, 0);
validate_against_rust!(0, 8, 1);
validate_against_rust!(0, 8, 2);
validate_against_rust!(0, 8, 3);
validate_against_rust!(0, 8, 4);
validate_against_rust!(0, 16);
validate_against_rust!(0, 16, 0);
validate_against_rust!(0, 16, 1);
validate_against_rust!(0, 16, 2);
validate_against_rust!(0, 16, 3);
validate_against_rust!(0, 16, 4);
validate_against_rust!(1, 1);
validate_against_rust!(1, 1, 0);
validate_against_rust!(1, 1, 1);
validate_against_rust!(1, 1, 2);
validate_against_rust!(1, 1, 3);
validate_against_rust!(1, 1, 4);
validate_against_rust!(1, 2);
validate_against_rust!(1, 2, 0);
validate_against_rust!(1, 2, 1);
validate_against_rust!(1, 2, 2);
validate_against_rust!(1, 2, 3);
validate_against_rust!(1, 2, 4);
validate_against_rust!(1, 4);
validate_against_rust!(1, 4, 0);
validate_against_rust!(1, 4, 1);
validate_against_rust!(1, 4, 2);
validate_against_rust!(1, 4, 3);
validate_against_rust!(1, 4, 4);
validate_against_rust!(1, 8);
validate_against_rust!(1, 8, 0);
validate_against_rust!(1, 8, 1);
validate_against_rust!(1, 8, 2);
validate_against_rust!(1, 8, 3);
validate_against_rust!(1, 8, 4);
validate_against_rust!(1, 16);
validate_against_rust!(1, 16, 0);
validate_against_rust!(1, 16, 1);
validate_against_rust!(1, 16, 2);
validate_against_rust!(1, 16, 3);
validate_against_rust!(1, 16, 4);
validate_against_rust!(2, 1);
validate_against_rust!(2, 1, 0);
validate_against_rust!(2, 1, 1);
validate_against_rust!(2, 1, 2);
validate_against_rust!(2, 1, 3);
validate_against_rust!(2, 1, 4);
validate_against_rust!(2, 2);
validate_against_rust!(2, 2, 0);
validate_against_rust!(2, 2, 1);
validate_against_rust!(2, 2, 2);
validate_against_rust!(2, 2, 3);
validate_against_rust!(2, 2, 4);
validate_against_rust!(2, 4);
validate_against_rust!(2, 4, 0);
validate_against_rust!(2, 4, 1);
validate_against_rust!(2, 4, 2);
validate_against_rust!(2, 4, 3);
validate_against_rust!(2, 4, 4);
validate_against_rust!(2, 8);
validate_against_rust!(2, 8, 0);
validate_against_rust!(2, 8, 1);
validate_against_rust!(2, 8, 2);
validate_against_rust!(2, 8, 3);
validate_against_rust!(2, 8, 4);
validate_against_rust!(2, 16);
validate_against_rust!(2, 16, 0);
validate_against_rust!(2, 16, 1);
validate_against_rust!(2, 16, 2);
validate_against_rust!(2, 16, 3);
validate_against_rust!(2, 16, 4);
validate_against_rust!(3, 1);
validate_against_rust!(3, 1, 0);
validate_against_rust!(3, 1, 1);
validate_against_rust!(3, 1, 2);
validate_against_rust!(3, 1, 3);
validate_against_rust!(3, 1, 4);
validate_against_rust!(3, 2);
validate_against_rust!(3, 2, 0);
validate_against_rust!(3, 2, 1);
validate_against_rust!(3, 2, 2);
validate_against_rust!(3, 2, 3);
validate_against_rust!(3, 2, 4);
validate_against_rust!(3, 4);
validate_against_rust!(3, 4, 0);
validate_against_rust!(3, 4, 1);
validate_against_rust!(3, 4, 2);
validate_against_rust!(3, 4, 3);
validate_against_rust!(3, 4, 4);
validate_against_rust!(3, 8);
validate_against_rust!(3, 8, 0);
validate_against_rust!(3, 8, 1);
validate_against_rust!(3, 8, 2);
validate_against_rust!(3, 8, 3);
validate_against_rust!(3, 8, 4);
validate_against_rust!(3, 16);
validate_against_rust!(3, 16, 0);
validate_against_rust!(3, 16, 1);
validate_against_rust!(3, 16, 2);
validate_against_rust!(3, 16, 3);
validate_against_rust!(3, 16, 4);
validate_against_rust!(4, 1);
validate_against_rust!(4, 1, 0);
validate_against_rust!(4, 1, 1);
validate_against_rust!(4, 1, 2);
validate_against_rust!(4, 1, 3);
validate_against_rust!(4, 1, 4);
validate_against_rust!(4, 2);
validate_against_rust!(4, 2, 0);
validate_against_rust!(4, 2, 1);
validate_against_rust!(4, 2, 2);
validate_against_rust!(4, 2, 3);
validate_against_rust!(4, 2, 4);
validate_against_rust!(4, 4);
validate_against_rust!(4, 4, 0);
validate_against_rust!(4, 4, 1);
validate_against_rust!(4, 4, 2);
validate_against_rust!(4, 4, 3);
validate_against_rust!(4, 4, 4);
validate_against_rust!(4, 8);
validate_against_rust!(4, 8, 0);
validate_against_rust!(4, 8, 1);
validate_against_rust!(4, 8, 2);
validate_against_rust!(4, 8, 3);
validate_against_rust!(4, 8, 4);
validate_against_rust!(4, 16);
validate_against_rust!(4, 16, 0);
validate_against_rust!(4, 16, 1);
validate_against_rust!(4, 16, 2);
validate_against_rust!(4, 16, 3);
validate_against_rust!(4, 16, 4);
}
#[test]
fn test_known_layout() {
macro_rules! test {
($ty:ty, $expect:expr) => {
let expect = $expect;
assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
};
}
let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
align: NonZeroUsize::new(align).unwrap(),
size_info: match _trailing_slice_elem_size {
None => SizeInfo::Sized { _size: offset },
Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
_offset: offset,
_elem_size: elem_size,
}),
},
};
test!((), layout(0, 1, None));
test!(u8, layout(1, 1, None));
test!(u64, layout(8, mem::align_of::<u64>(), None));
test!(AU64, layout(8, 8, None));
test!(Option<&'static ()>, usize::LAYOUT);
test!([()], layout(0, 1, Some(0)));
test!([u8], layout(0, 1, Some(1)));
test!(str, layout(0, 1, Some(1)));
}
#[cfg(feature = "derive")]
#[test]
fn test_known_layout_derive() {
struct NotKnownLayout<T = ()> {
_t: T,
}
#[derive(KnownLayout)]
#[repr(C)]
struct AlignSize<const ALIGN: usize, const SIZE: usize>
where
elain::Align<ALIGN>: elain::Alignment,
{
_align: elain::Align<ALIGN>,
_size: [u8; SIZE],
}
type AU16 = AlignSize<2, 2>;
type AU32 = AlignSize<4, 4>;
fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
let sized_layout = |align, size| DstLayout {
align: NonZeroUsize::new(align).unwrap(),
size_info: SizeInfo::Sized { _size: size },
};
let unsized_layout = |align, elem_size, offset| DstLayout {
align: NonZeroUsize::new(align).unwrap(),
size_info: SizeInfo::SliceDst(TrailingSliceLayout {
_offset: offset,
_elem_size: elem_size,
}),
};
#[derive(KnownLayout)]
#[allow(dead_code)] struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
let expected = DstLayout::for_type::<KL01>();
assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
#[derive(KnownLayout)]
#[repr(align(64))]
#[allow(dead_code)] struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
let expected = DstLayout::for_type::<KL01Align>();
assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
#[derive(KnownLayout)]
#[repr(packed)]
#[allow(dead_code)] struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
let expected = DstLayout::for_type::<KL01Packed>();
assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
#[derive(KnownLayout)]
#[repr(packed(2))]
#[allow(dead_code)] struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
assert_impl_all!(KL01PackedN: KnownLayout);
let expected = DstLayout::for_type::<KL01PackedN>();
assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
#[derive(KnownLayout)]
#[allow(dead_code)] struct KL03(NotKnownLayout, u8);
let expected = DstLayout::for_type::<KL03>();
assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
#[derive(KnownLayout)]
#[repr(align(64))]
#[allow(dead_code)] struct KL03Align(NotKnownLayout<AU32>, u8);
let expected = DstLayout::for_type::<KL03Align>();
assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
#[derive(KnownLayout)]
#[repr(packed)]
#[allow(dead_code)] struct KL03Packed(NotKnownLayout<AU32>, u8);
let expected = DstLayout::for_type::<KL03Packed>();
assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
#[derive(KnownLayout)]
#[repr(packed(2))]
#[allow(dead_code)] struct KL03PackedN(NotKnownLayout<AU32>, u8);
assert_impl_all!(KL03PackedN: KnownLayout);
let expected = DstLayout::for_type::<KL03PackedN>();
assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
#[derive(KnownLayout)]
#[allow(dead_code)] struct KL05<T>(u8, T);
fn _test_kl05<T>(t: T) -> impl KnownLayout {
KL05(0u8, t)
}
#[derive(KnownLayout)]
#[allow(dead_code)] struct KL07<T: KnownLayout>(u8, T);
fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
let _ = KL07(0u8, t);
}
#[derive(KnownLayout)]
#[repr(C)]
struct KL10(NotKnownLayout<AU32>, [u8]);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
.extend(<[u8] as KnownLayout>::LAYOUT, None)
.pad_to_align();
assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
#[derive(KnownLayout)]
#[repr(C, align(64))]
struct KL10Align(NotKnownLayout<AU32>, [u8]);
let repr_align = NonZeroUsize::new(64);
let expected = DstLayout::new_zst(repr_align)
.extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
.extend(<[u8] as KnownLayout>::LAYOUT, None)
.pad_to_align();
assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
#[derive(KnownLayout)]
#[repr(C, packed)]
struct KL10Packed(NotKnownLayout<AU32>, [u8]);
let repr_packed = NonZeroUsize::new(1);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
.extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
.pad_to_align();
assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
#[derive(KnownLayout)]
#[repr(C, packed(2))]
struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
let repr_packed = NonZeroUsize::new(2);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
.extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
.pad_to_align();
assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
#[derive(KnownLayout)]
#[repr(C)]
struct KL11(NotKnownLayout<AU64>, u8);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
.extend(<u8 as KnownLayout>::LAYOUT, None)
.pad_to_align();
assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
#[derive(KnownLayout)]
#[repr(C, align(64))]
struct KL11Align(NotKnownLayout<AU64>, u8);
let repr_align = NonZeroUsize::new(64);
let expected = DstLayout::new_zst(repr_align)
.extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
.extend(<u8 as KnownLayout>::LAYOUT, None)
.pad_to_align();
assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
#[derive(KnownLayout)]
#[repr(C, packed)]
struct KL11Packed(NotKnownLayout<AU64>, u8);
let repr_packed = NonZeroUsize::new(1);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
.extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
.pad_to_align();
assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
#[derive(KnownLayout)]
#[repr(C, packed(2))]
struct KL11PackedN(NotKnownLayout<AU64>, u8);
let repr_packed = NonZeroUsize::new(2);
let expected = DstLayout::new_zst(None)
.extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
.extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
.pad_to_align();
assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
#[derive(KnownLayout)]
#[repr(C)]
struct KL14<T: ?Sized + KnownLayout>(u8, T);
fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
_assert_kl(kl)
}
#[derive(KnownLayout)]
#[repr(C)]
struct KL15<T: KnownLayout>(u8, T);
fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
let _ = KL15(0u8, t);
}
#[allow(clippy::upper_case_acronyms)]
#[derive(KnownLayout)]
#[repr(C)]
struct KLTU<T, U: ?Sized>(T, U);
assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
#[derive(KnownLayout)]
#[repr(C)]
struct KLF0;
assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
#[derive(KnownLayout)]
#[repr(C)]
struct KLF1([u8]);
assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
#[derive(KnownLayout)]
#[repr(C)]
struct KLF2(NotKnownLayout<u8>, [u8]);
assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
#[derive(KnownLayout)]
#[repr(C)]
struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
#[derive(KnownLayout)]
#[repr(C)]
struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
}
#[test]
fn test_object_safety() {
fn _takes_from_zeroes(_: &dyn FromZeroes) {}
fn _takes_from_bytes(_: &dyn FromBytes) {}
fn _takes_unaligned(_: &dyn Unaligned) {}
}
#[test]
fn test_from_zeroes_only() {
assert!(!bool::new_zeroed());
assert_eq!(char::new_zeroed(), '\0');
#[cfg(feature = "alloc")]
{
assert_eq!(bool::new_box_zeroed(), Box::new(false));
assert_eq!(char::new_box_zeroed(), Box::new('\0'));
assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
}
let mut string = "hello".to_string();
let s: &mut str = string.as_mut();
assert_eq!(s, "hello");
s.zero();
assert_eq!(s, "\0\0\0\0\0");
}
#[test]
fn test_read_write() {
const VAL: u64 = 0x12345678;
#[cfg(target_endian = "big")]
const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
#[cfg(target_endian = "little")]
const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
let mut bytes = [0u8; 8];
assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
assert_eq!(bytes, VAL_BYTES);
let mut bytes = [0u8; 16];
assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
assert_eq!(bytes, want);
let mut bytes = [0u8; 16];
assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
assert_eq!(bytes, want);
}
#[test]
fn test_transmute() {
let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
assert_eq!(x, array_of_arrays);
let x: [u8; 8] = transmute!(array_of_arrays);
assert_eq!(x, array_of_u8s);
#[derive(AsBytes)]
#[repr(transparent)]
struct PanicOnDrop(());
impl Drop for PanicOnDrop {
fn drop(&mut self) {
panic!("PanicOnDrop::drop");
}
}
#[allow(clippy::let_unit_value)]
let _: () = transmute!(PanicOnDrop(()));
const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
assert_eq!(X, ARRAY_OF_ARRAYS);
}
#[test]
fn test_transmute_ref() {
let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s);
assert_eq!(*x, array_of_arrays);
let x: &[u8; 8] = transmute_ref!(&array_of_arrays);
assert_eq!(*x, array_of_u8s);
const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
#[allow(clippy::redundant_static_lifetimes)]
const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S);
assert_eq!(*X, ARRAY_OF_ARRAYS);
let x: &[u8; 8] = transmute_ref!(X);
assert_eq!(*x, ARRAY_OF_U8S);
let u = AU64(0);
let array = [0, 0, 0, 0, 0, 0, 0, 0];
let x: &[u8; 8] = transmute_ref!(&u);
assert_eq!(*x, array);
let mut x = 0u8;
#[allow(clippy::useless_transmute)]
let y: &u8 = transmute_ref!(&mut x);
assert_eq!(*y, 0);
}
#[test]
fn test_transmute_mut() {
let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s);
assert_eq!(*x, array_of_arrays);
let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
assert_eq!(*x, array_of_u8s);
{
let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
assert_eq!(*x, array_of_u8s);
}
let mut u = AU64(0);
let array = [0, 0, 0, 0, 0, 0, 0, 0];
let x: &[u8; 8] = transmute_mut!(&mut u);
assert_eq!(*x, array);
let mut x = 0u8;
#[allow(clippy::useless_transmute)]
let y: &u8 = transmute_mut!(&mut x);
assert_eq!(*y, 0);
}
#[test]
fn test_macros_evaluate_args_once() {
let mut ctr = 0;
let _: usize = transmute!({
ctr += 1;
0usize
});
assert_eq!(ctr, 1);
let mut ctr = 0;
let _: &usize = transmute_ref!({
ctr += 1;
&0usize
});
assert_eq!(ctr, 1);
}
#[test]
fn test_include_value() {
const AS_U32: u32 = include_value!("../testdata/include_value/data");
assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
const AS_I32: i32 = include_value!("../testdata/include_value/data");
assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
}
#[test]
fn test_address() {
let buf = [0];
let r = Ref::<_, u8>::new(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
let deref_ptr: *const u8 = r.deref();
assert_eq!(buf_ptr, deref_ptr);
let buf = [0];
let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
let deref_ptr = r.deref().as_ptr();
assert_eq!(buf_ptr, deref_ptr);
}
fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
assert_eq!(*r, AU64(0));
assert_eq!(r.read(), AU64(0));
const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
*r = VAL1;
assert_eq!(r.bytes(), &VAL1.to_bytes());
*r = AU64(0);
r.write(VAL1);
assert_eq!(r.bytes(), &VAL1.to_bytes());
const VAL2: AU64 = AU64(!VAL1.0); r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
assert_eq!(*r, VAL2);
assert_eq!(r.read(), VAL2);
}
fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
let untyped_len = typed_len * 8;
assert_eq!(r.bytes().len(), untyped_len);
assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
for typed in &mut *r {
*typed = VAL1;
}
assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
const VAL2: AU64 = AU64(!VAL1.0); r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
assert!(r.iter().copied().all(|x| x == VAL2));
}
fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
assert_eq!(*r, [0; 8]);
assert_eq!(r.read(), [0; 8]);
const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
*r = VAL1;
assert_eq!(r.bytes(), &VAL1);
*r = [0; 8];
r.write(VAL1);
assert_eq!(r.bytes(), &VAL1);
const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; r.bytes_mut().copy_from_slice(&VAL2[..]);
assert_eq!(*r, VAL2);
assert_eq!(r.read(), VAL2);
}
fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
assert_eq!(&*r, vec![0u8; len].as_slice());
assert_eq!(r.bytes().len(), len);
assert_eq!(r.bytes().as_ptr(), r.as_ptr());
let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
r.copy_from_slice(&expected_bytes);
assert_eq!(r.bytes(), expected_bytes.as_slice());
for byte in &mut expected_bytes {
*byte = !*byte; }
r.bytes_mut().copy_from_slice(&expected_bytes);
assert_eq!(&*r, expected_bytes.as_slice());
}
#[test]
fn test_new_aligned_sized() {
let mut buf = Align::<[u8; 8], AU64>::default();
test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap();
buf.t = ascending;
test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
{
buf.set_default();
let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(r);
}
{
buf.t = ascending;
let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(r);
}
{
buf.set_default();
let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(r);
}
{
buf.t = ascending;
let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(r);
}
let mut buf = Align::<[u8; 24], AU64>::default();
test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3);
let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap();
let mut ascending_prefix = ascending;
ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
let mut ascending_suffix = ascending;
ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3);
{
buf.t = ascending_suffix;
let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
assert_eq!(suffix, &ascending[8..]);
test_new_helper_slice(r, 1);
}
{
buf.t = ascending_suffix;
let (r, suffix) =
Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
assert_eq!(suffix, &ascending[8..]);
test_new_helper_slice(r, 1);
}
{
buf.t = ascending_prefix;
let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
assert_eq!(prefix, &ascending[..16]);
test_new_helper_slice(r, 1);
}
{
buf.t = ascending_prefix;
let (prefix, r) =
Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
assert_eq!(prefix, &ascending[..16]);
test_new_helper_slice(r, 1);
}
}
#[test]
fn test_new_unaligned_sized() {
let mut buf = [0u8; 8];
test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
buf = [0xFFu8; 8];
test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
{
buf = [0u8; 8];
let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 8];
let (r, suffix) =
Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(r);
}
{
buf = [0u8; 8];
let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 8];
let (prefix, r) =
Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(r);
}
let mut buf = [0u8; 16];
test_new_helper_slice_unaligned(
Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
16,
);
buf = [0xFFu8; 16];
test_new_helper_slice_unaligned(
Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
16,
);
{
buf = [0u8; 16];
let (r, suffix) =
Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
assert_eq!(suffix, [0; 8]);
test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0xFFu8; 16];
let (r, suffix) =
Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
assert_eq!(suffix, [0xFF; 8]);
test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0u8; 16];
let (prefix, r) =
Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
assert_eq!(prefix, [0; 8]);
test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0xFFu8; 16];
let (prefix, r) =
Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
assert_eq!(prefix, [0xFF; 8]);
test_new_helper_slice_unaligned(r, 8);
}
}
#[test]
fn test_new_oversized() {
let mut buf = Align::<[u8; 16], AU64>::default();
{
let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper(r);
}
{
buf.t = [0xFFu8; 16];
let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
assert_eq!(suffix, &[0xFFu8; 8]);
test_new_helper(r);
}
{
buf.set_default();
let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper(r);
}
{
buf.t = [0xFFu8; 16];
let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
assert_eq!(prefix, &[0xFFu8; 8]);
test_new_helper(r);
}
}
#[test]
fn test_new_unaligned_oversized() {
let mut buf = [0u8; 16];
{
let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 16];
let (r, suffix) =
Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
assert_eq!(suffix, &[0xFF; 8]);
test_new_helper_unaligned(r);
}
{
buf = [0u8; 16];
let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 16];
let (prefix, r) =
Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
assert_eq!(prefix, &[0xFF; 8]);
test_new_helper_unaligned(r);
}
}
#[test]
fn test_ref_from_mut_from() {
let mut buf =
Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
assert_eq!(
AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(),
[8, 9, 10, 11, 12, 13, 14, 15]
);
let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap();
suffix.0 = 0x0101010101010101;
assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]);
let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
suffix.0 = 0x0202020202020202;
<[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42;
assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]);
<[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30;
assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
}
#[test]
fn test_ref_from_mut_from_error() {
let mut buf = Align::<[u8; 16], AU64>::default();
assert!(AU64::ref_from(&buf.t[..]).is_none());
assert!(AU64::mut_from(&mut buf.t[..]).is_none());
assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 4], AU64>::default();
assert!(AU64::ref_from(&buf.t[..]).is_none());
assert!(AU64::mut_from(&mut buf.t[..]).is_none());
assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
assert!(AU64::ref_from_prefix(&buf.t[..]).is_none());
assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none());
assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none());
assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none());
assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none());
assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 13], AU64>::default();
assert!(AU64::ref_from(&buf.t[1..]).is_none());
assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
assert!(AU64::ref_from(&buf.t[1..]).is_none());
assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none());
assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none());
assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_new_error() {
let mut buf = Align::<[u8; 16], AU64>::default();
assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 4], AU64>::default();
assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 12], AU64>::default();
assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 12], AU64>::default();
assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
.is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
.is_none());
let mut buf = Align::<[u8; 13], AU64>::default();
assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 16], AU64>::default();
let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
.is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
.is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
.is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
.is_none());
assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
}
mod test_zst_panics {
macro_rules! zst_test {
($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
#[test]
#[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
fn $name() {
let mut buffer = [0u8];
let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
unreachable!("should have panicked, got {:?}", r);
}
}
}
zst_test!(new_slice(), "new_slice");
zst_test!(new_slice_zeroed(), "new_slice");
zst_test!(new_slice_from_prefix(1), "new_slice");
zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
zst_test!(new_slice_from_suffix(1), "new_slice");
zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
zst_test!(new_slice_unaligned(), "new_slice_unaligned");
zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
}
#[test]
fn test_as_bytes_methods() {
fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
t: &mut T,
bytes: &[u8],
post_mutation: &T,
) {
assert_eq!(t.as_bytes(), bytes);
t.as_bytes_mut()[0] ^= 0xFF;
assert_eq!(t, post_mutation);
t.as_bytes_mut()[0] ^= 0xFF;
assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
let mut too_many_bytes = vec![0; N + 1];
too_many_bytes[N] = 123;
assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
assert_eq!(&too_many_bytes[..N], t.as_bytes());
assert_eq!(too_many_bytes[N], 123);
assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
let mut too_many_bytes = vec![0; N + 1];
too_many_bytes[0] = 123;
assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
assert_eq!(&too_many_bytes[1..], t.as_bytes());
assert_eq!(too_many_bytes[0], 123);
}
#[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: u32,
b: Wrapping<u32>,
c: Option<NonZeroU32>,
}
let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
} else {
vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
};
let post_mutation_expected_a =
if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
test::<_, 12>(
&mut Foo { a: 1, b: Wrapping(2), c: None },
expected_bytes.as_bytes(),
&Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
);
test::<_, 3>(
Unsized::from_mut_slice(&mut [1, 2, 3]),
&[1, 2, 3],
Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
);
}
#[test]
fn test_array() {
#[derive(FromZeroes, FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: [u16; 33],
}
let foo = Foo { a: [0xFFFF; 33] };
let expected = [0xFFu8; 66];
assert_eq!(foo.as_bytes(), &expected[..]);
}
#[test]
fn test_display_debug() {
let buf = Align::<[u8; 8], u64>::default();
let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
assert_eq!(format!("{}", r), "0");
assert_eq!(format!("{:?}", r), "Ref(0)");
let buf = Align::<[u8; 8], u64>::default();
let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
assert_eq!(format!("{:?}", r), "Ref([0])");
}
#[test]
fn test_eq() {
let buf1 = 0_u64;
let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 0_u64;
let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
assert_eq!(r1, r2);
}
#[test]
fn test_ne() {
let buf1 = 0_u64;
let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 1_u64;
let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
assert_ne!(r1, r2);
}
#[test]
fn test_ord() {
let buf1 = 0_u64;
let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 1_u64;
let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
assert!(r1 < r2);
}
#[test]
fn test_new_zeroed() {
assert!(!bool::new_zeroed());
assert_eq!(u64::new_zeroed(), 0);
#[allow(clippy::unit_cmp)]
{
assert_eq!(<()>::new_zeroed(), ());
}
}
#[test]
fn test_transparent_packed_generic_struct() {
#[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
#[repr(transparent)]
#[allow(dead_code)] struct Foo<T> {
_t: T,
_phantom: PhantomData<()>,
}
assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
assert_impl_all!(Foo<u8>: Unaligned);
#[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
#[repr(packed)]
#[allow(dead_code)] struct Bar<T, U> {
_t: T,
_u: U,
}
assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
}
#[test]
fn test_impls() {
use core::borrow::Borrow;
trait TryFromBytesTestable {
fn with_passing_test_cases<F: Fn(&Self)>(f: F);
fn with_failing_test_cases<F: Fn(&[u8])>(f: F);
}
impl<T: FromBytes> TryFromBytesTestable for T {
fn with_passing_test_cases<F: Fn(&Self)>(f: F) {
f(&Self::new_zeroed());
let ffs = {
let mut t = Self::new_zeroed();
let ptr: *mut T = &mut t;
unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
t
};
f(&ffs);
}
fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {}
}
macro_rules! impl_try_from_bytes_testable {
(=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
impl TryFromBytesTestable for $ty {
impl_try_from_bytes_testable!(
@methods @success $($success_case),*
$(, @failure $($failure_case),*)?
);
}
impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
};
($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
$(
impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
)*
};
(@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
fn with_passing_test_cases<F: Fn(&Self)>(_f: F) {
$(
_f($success_case.borrow());
)*
}
fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {
$($(
let case = $failure_case.as_bytes();
_f(case.as_bytes());
)*)?
}
};
}
impl_try_from_bytes_testable!(
bool => @success true, false,
@failure 2u8, 3u8, 0xFFu8;
char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
@failure 0xD800u32, 0xDFFFu32, 0x110000u32;
str => @success "", "hello", "❤️🧡💛💚💙💜",
@failure [0, 159, 146, 150];
[u8] => @success [], [0, 1, 2];
NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
NonZeroUsize, NonZeroIsize
=> @success Self::new(1).unwrap(),
@failure Option::<Self>::None;
[bool]
=> @success [true, false], [false, true],
@failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
);
macro_rules! assert_impls {
($ty:ty: TryFromBytes) => {
<$ty as TryFromBytesTestable>::with_passing_test_cases(|val| {
let c = Ptr::from(val);
let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) };
assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val);
});
#[allow(clippy::as_conversions)]
<$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
let res = <$ty as TryFromBytes>::try_from_ref(c);
assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c);
});
#[allow(dead_code)]
const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
};
($ty:ty: $trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
};
($ty:ty: !$trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
};
($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
$(
assert_impls!($ty: $trait);
)*
$(
assert_impls!($ty: !$negative_trait);
)*
};
}
assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
struct NotZerocopy;
#[rustfmt::skip]
type FnManyArgs = fn(
NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
) -> (NotZerocopy, NotZerocopy);
#[allow(improper_ctypes_definitions)]
#[rustfmt::skip]
type ECFnManyArgs = extern "C" fn(
NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
) -> (NotZerocopy, NotZerocopy);
#[cfg(feature = "alloc")]
assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes);
assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes);
assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
#[cfg(feature = "simd")]
{
#[allow(unused_macros)]
macro_rules! test_simd_arch_mod {
($arch:ident, $($typ:ident),*) => {
{
use core::arch::$arch::{$($typ),*};
use crate::*;
$( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )*
}
};
}
#[cfg(target_arch = "x86")]
test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
#[cfg(target_arch = "x86_64")]
test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
#[cfg(target_arch = "wasm32")]
test_simd_arch_mod!(wasm32, v128);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
test_simd_arch_mod!(
powerpc,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
test_simd_arch_mod!(
powerpc64,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(target_arch = "aarch64")]
#[rustfmt::skip]
test_simd_arch_mod!(
aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
uint64x1_t, uint64x2_t
);
#[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
#[rustfmt::skip]
test_simd_arch_mod!(arm, int8x4_t, uint8x4_t);
}
}
}
#[cfg(kani)]
mod proofs {
use super::*;
impl kani::Arbitrary for DstLayout {
fn any() -> Self {
let align: NonZeroUsize = kani::any();
let size_info: SizeInfo = kani::any();
kani::assume(align.is_power_of_two());
kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
kani::assume(
match size_info {
SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()),
SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
Layout::from_size_align(_offset, align.get())
}
}
.is_ok(),
);
Self { align: align, size_info: size_info }
}
}
impl kani::Arbitrary for SizeInfo {
fn any() -> Self {
let is_sized: bool = kani::any();
match is_sized {
true => {
let size: usize = kani::any();
kani::assume(size <= isize::MAX as _);
SizeInfo::Sized { _size: size }
}
false => SizeInfo::SliceDst(kani::any()),
}
}
}
impl kani::Arbitrary for TrailingSliceLayout {
fn any() -> Self {
let elem_size: usize = kani::any();
let offset: usize = kani::any();
kani::assume(elem_size < isize::MAX as _);
kani::assume(offset < isize::MAX as _);
TrailingSliceLayout { _elem_size: elem_size, _offset: offset }
}
}
#[kani::proof]
fn prove_dst_layout_extend() {
use crate::util::{core_layout::padding_needed_for, max, min};
let base: DstLayout = kani::any();
let field: DstLayout = kani::any();
let packed: Option<NonZeroUsize> = kani::any();
if let Some(max_align) = packed {
kani::assume(max_align.is_power_of_two());
kani::assume(base.align <= max_align);
}
kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
let base_size = if let SizeInfo::Sized { _size: size } = base.size_info {
size
} else {
unreachable!();
};
let composite = base.extend(field, packed);
let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
assert_eq!(composite.align, max(base.align, field_align));
let padding = padding_needed_for(base_size, field_align);
let offset = base_size + padding;
let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
match field.size_info {
SizeInfo::Sized { _size: field_size } => {
if let SizeInfo::Sized { _size: composite_size } = composite.size_info {
assert_eq!(composite_size, offset + field_size);
let field_analog =
Layout::from_size_align(field_size, field_align.get()).unwrap();
if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
{
assert_eq!(actual_offset, offset);
assert_eq!(actual_composite.size(), composite_size);
assert_eq!(actual_composite.align(), composite.align.get());
} else {
}
} else {
panic!("The composite of two sized layouts must be sized.")
}
}
SizeInfo::SliceDst(TrailingSliceLayout {
_offset: field_offset,
_elem_size: field_elem_size,
}) => {
if let SizeInfo::SliceDst(TrailingSliceLayout {
_offset: composite_offset,
_elem_size: composite_elem_size,
}) = composite.size_info
{
assert_eq!(composite_offset, offset + field_offset);
assert_eq!(composite_elem_size, field_elem_size);
let field_analog =
Layout::from_size_align(field_offset, field_align.get()).unwrap();
if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
{
assert_eq!(actual_offset, offset);
assert_eq!(actual_composite.size(), composite_offset);
assert_eq!(actual_composite.align(), composite.align.get());
} else {
}
} else {
panic!("The extension of a layout with a DST must result in a DST.")
}
}
}
}
#[kani::proof]
#[kani::should_panic]
fn prove_dst_layout_extend_dst_panics() {
let base: DstLayout = kani::any();
let field: DstLayout = kani::any();
let packed: Option<NonZeroUsize> = kani::any();
if let Some(max_align) = packed {
kani::assume(max_align.is_power_of_two());
kani::assume(base.align <= max_align);
}
kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
let _ = base.extend(field, packed);
}
#[kani::proof]
fn prove_dst_layout_pad_to_align() {
use crate::util::core_layout::padding_needed_for;
let layout: DstLayout = kani::any();
let padded: DstLayout = layout.pad_to_align();
assert_eq!(padded.align, layout.align);
if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info {
if let SizeInfo::Sized { _size: padded_size } = padded.size_info {
let padding = padding_needed_for(unpadded_size, layout.align);
assert_eq!(padded_size, unpadded_size + padding);
let layout_analog =
Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
let padded_analog = layout_analog.pad_to_align();
assert_eq!(padded_analog.align(), layout.align.get());
assert_eq!(padded_analog.size(), padded_size);
} else {
panic!("The padding of a sized layout must result in a sized layout.")
}
} else {
assert_eq!(padded.size_info, layout.size_info);
}
}
}