#![allow(unknown_lints)]
#![deny(renamed_and_removed_lints)]
#![deny(
anonymous_parameters,
deprecated_in_future,
illegal_floating_point_literal_pattern,
late_bound_lifetime_arguments,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
path_statements,
patterns_in_fns_without_body,
rust_2018_idioms,
trivial_numeric_casts,
unreachable_pub,
unsafe_op_in_unsafe_fn,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
#![deny(
clippy::all,
clippy::alloc_instead_of_core,
clippy::arithmetic_side_effects,
clippy::as_underscore,
clippy::assertions_on_result_states,
clippy::as_conversions,
clippy::correctness,
clippy::dbg_macro,
clippy::decimal_literal_representation,
clippy::get_unwrap,
clippy::indexing_slicing,
clippy::missing_safety_doc,
clippy::obfuscated_if_else,
clippy::perf,
clippy::print_stdout,
clippy::std_instead_of_core,
clippy::style,
clippy::suspicious,
clippy::todo,
clippy::undocumented_unsafe_blocks,
clippy::unimplemented,
clippy::unnested_or_patterns,
clippy::unwrap_used,
clippy::use_debug
)]
#![allow(clippy::vec_init_then_push)]
#![deny(
rustdoc::bare_urls,
rustdoc::broken_intra_doc_links,
rustdoc::invalid_codeblock_attributes,
rustdoc::invalid_html_tags,
rustdoc::invalid_rust_codeblocks,
rustdoc::missing_crate_level_docs,
rustdoc::private_intra_doc_links
)]
#![cfg_attr(test, allow(
// In tests, you get line numbers and have access to source code, so panic
// messages are less important. You also often unwrap a lot, which would
// make expect'ing instead very verbose.
clippy::unwrap_used,
// In tests, there's no harm to "panic risks" - the worst that can happen is
// that your test will fail, and you'll fix it. By contrast, panic risks in
// production code introduce the possibly of code panicking unexpectedly "in
// the field".
clippy::arithmetic_side_effects,
clippy::indexing_slicing,
))]
#![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "simd-nightly", feature(stdsimd))]
pub mod byteorder;
#[doc(hidden)]
pub mod derive_util;
pub use crate::byteorder::*;
pub use zerocopy_derive::*;
use core::{
cell::{Ref, RefMut},
cmp::Ordering,
fmt::{self, Debug, Display, Formatter},
hash::{Hash, Hasher},
marker::PhantomData,
mem::{self, ManuallyDrop, MaybeUninit},
num::{
NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
},
ops::{Deref, DerefMut},
ptr, slice,
};
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
use {
alloc::boxed::Box,
alloc::vec::Vec,
core::{alloc::Layout, ptr::NonNull},
};
mod zerocopy {
pub(crate) use crate::*;
}
pub unsafe trait FromBytes {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
fn read_from<B: ByteSlice>(bytes: B) -> Option<Self>
where
Self: Sized,
{
let lv = LayoutVerified::<_, Unalign<Self>>::new_unaligned(bytes)?;
Some(lv.read().into_inner())
}
fn read_from_prefix<B: ByteSlice>(bytes: B) -> Option<Self>
where
Self: Sized,
{
let (lv, _suffix) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)?;
Some(lv.read().into_inner())
}
fn read_from_suffix<B: ByteSlice>(bytes: B) -> Option<Self>
where
Self: Sized,
{
let (_prefix, lv) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)?;
Some(lv.read().into_inner())
}
fn new_zeroed() -> Self
where
Self: Sized,
{
unsafe { mem::zeroed() }
}
#[cfg(feature = "alloc")]
fn new_box_zeroed() -> Box<Self>
where
Self: Sized,
{
let layout = Layout::new::<Self>();
if layout.size() == 0 {
return Box::new(Self::new_zeroed());
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
let ptr = alloc::alloc::alloc_zeroed(layout).cast::<Self>();
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
Box::from_raw(ptr)
}
}
#[cfg(feature = "alloc")]
fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
where
Self: Sized,
{
let layout = Layout::from_size_align(
mem::size_of::<Self>()
.checked_mul(len)
.expect("mem::size_of::<Self>() * len overflows `usize`"),
mem::align_of::<Self>(),
)
.expect("total allocation size overflows `isize`");
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
if layout.size() != 0 {
let ptr = alloc::alloc::alloc_zeroed(layout).cast::<Self>();
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
Box::from_raw(slice::from_raw_parts_mut(ptr, len))
} else {
Box::from_raw(slice::from_raw_parts_mut(NonNull::<Self>::dangling().as_ptr(), len))
}
}
}
#[cfg(feature = "alloc")]
fn new_vec_zeroed(len: usize) -> Vec<Self>
where
Self: Sized,
{
Self::new_box_slice_zeroed(len).into()
}
}
pub unsafe trait AsBytes {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
fn as_bytes(&self) -> &[u8] {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
let len = mem::size_of_val(self);
let slf: *const Self = self;
slice::from_raw_parts(slf.cast::<u8>(), len)
}
}
fn as_bytes_mut(&mut self) -> &mut [u8]
where
Self: FromBytes,
{
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
let len = mem::size_of_val(self);
let slf: *mut Self = self;
slice::from_raw_parts_mut(slf.cast::<u8>(), len)
}
}
fn write_to<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
if bytes.len() != mem::size_of_val(self) {
return None;
}
bytes.copy_from_slice(self.as_bytes());
Some(())
}
fn write_to_prefix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
let size = mem::size_of_val(self);
bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
Some(())
}
fn write_to_suffix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
let start = bytes.len().checked_sub(mem::size_of_val(self))?;
bytes
.get_mut(start..)
.expect("`start` should be in-bounds of `bytes`")
.copy_from_slice(self.as_bytes());
Some(())
}
}
pub unsafe trait Unaligned {
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
}
macro_rules! safety_comment {
(#[doc = r" SAFETY:"] $(#[doc = $_doc:literal])* $($macro:ident!$args:tt;)*) => {
#[allow(clippy::undocumented_unsafe_blocks)]
const _: () = { $($macro!$args;)* };
}
}
macro_rules! unsafe_impl {
($ty:ty: $trait:ty) => {
unsafe impl $trait for $ty { fn only_derive_is_allowed_to_implement_this_trait() {} }
};
($ty:ty: $($traits:ty),*) => {
$( unsafe_impl!($ty: $traits); )*
};
($tyvar:ident => $trait:ident for $ty:ty) => {
unsafe impl<$tyvar> $trait for $ty { fn only_derive_is_allowed_to_implement_this_trait() {} }
};
($tyvar:ident: ?Sized => $trait:ident for $ty:ty) => {
unsafe impl<$tyvar: ?Sized> $trait for $ty { fn only_derive_is_allowed_to_implement_this_trait() {} }
};
($tyvar:ident: $bound:path => $trait:ident for $ty:ty) => {
unsafe impl<$tyvar: $bound> $trait for $ty { fn only_derive_is_allowed_to_implement_this_trait() {} }
};
($tyvar:ident: ?Sized + $bound:path => $trait:ident for $ty:ty) => {
unsafe impl<$tyvar: ?Sized + $bound> $trait for $ty { fn only_derive_is_allowed_to_implement_this_trait() {} }
};
($tyvar:ident: $bound:path, const $constvar:ident: $constty:ty => $trait:ident for $ty:ty) => {
unsafe impl<$tyvar: $bound, const $constvar: $constty> $trait for $ty {
fn only_derive_is_allowed_to_implement_this_trait() {}
}
};
}
macro_rules! assert_unaligned {
($ty:ty) => {
#[cfg(test)]
static_assertions::const_assert_eq!(core::mem::align_of::<$ty>(), 1);
};
($($ty:ty),*) => {
$(assert_unaligned!($ty);)*
};
}
safety_comment! {
unsafe_impl!((): FromBytes, AsBytes, Unaligned);
assert_unaligned!(());
}
safety_comment! {
unsafe_impl!(u8: FromBytes, AsBytes, Unaligned);
unsafe_impl!(i8: FromBytes, AsBytes, Unaligned);
assert_unaligned!(u8, i8);
unsafe_impl!(u16: FromBytes, AsBytes);
unsafe_impl!(i16: FromBytes, AsBytes);
unsafe_impl!(u32: FromBytes, AsBytes);
unsafe_impl!(i32: FromBytes, AsBytes);
unsafe_impl!(u64: FromBytes, AsBytes);
unsafe_impl!(i64: FromBytes, AsBytes);
unsafe_impl!(u128: FromBytes, AsBytes);
unsafe_impl!(i128: FromBytes, AsBytes);
unsafe_impl!(usize: FromBytes, AsBytes);
unsafe_impl!(isize: FromBytes, AsBytes);
}
safety_comment! {
unsafe_impl!(f32: FromBytes, AsBytes);
unsafe_impl!(f64: FromBytes, AsBytes);
}
safety_comment! {
unsafe_impl!(bool: AsBytes, Unaligned);
assert_unaligned!(bool);
}
safety_comment! {
unsafe_impl!(char: AsBytes);
}
safety_comment! {
unsafe_impl!(str: AsBytes, Unaligned);
}
safety_comment! {
unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
assert_unaligned!(NonZeroU8, NonZeroI8);
unsafe_impl!(NonZeroU16: AsBytes);
unsafe_impl!(NonZeroI16: AsBytes);
unsafe_impl!(NonZeroU32: AsBytes);
unsafe_impl!(NonZeroI32: AsBytes);
unsafe_impl!(NonZeroU64: AsBytes);
unsafe_impl!(NonZeroI64: AsBytes);
unsafe_impl!(NonZeroU128: AsBytes);
unsafe_impl!(NonZeroI128: AsBytes);
unsafe_impl!(NonZeroUsize: AsBytes);
unsafe_impl!(NonZeroIsize: AsBytes);
}
safety_comment! {
unsafe_impl!(Option<NonZeroU8>: FromBytes, AsBytes, Unaligned);
unsafe_impl!(Option<NonZeroI8>: FromBytes, AsBytes, Unaligned);
assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
unsafe_impl!(Option<NonZeroU16>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI16>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU32>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI32>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU64>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI64>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroU128>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroI128>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroUsize>: FromBytes, AsBytes);
unsafe_impl!(Option<NonZeroIsize>: FromBytes, AsBytes);
}
safety_comment! {
unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
}
safety_comment! {
unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
assert_unaligned!(Wrapping<()>, Wrapping<u8>);
}
safety_comment! {
unsafe_impl!(T => FromBytes for MaybeUninit<T>);
unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
}
safety_comment! {
unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
}
safety_comment! {
unsafe_impl!(T: FromBytes, const N: usize => FromBytes for [T; N]);
unsafe_impl!(T: AsBytes, const N: usize => AsBytes for [T; N]);
unsafe_impl!(T: Unaligned, const N: usize => Unaligned for [T; N]);
assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
unsafe_impl!(T: FromBytes => FromBytes for [T]);
unsafe_impl!(T: AsBytes => AsBytes for [T]);
unsafe_impl!(T: Unaligned => Unaligned for [T]);
}
#[cfg(feature = "simd")]
mod simd {
#[allow(unused_macros)] macro_rules! simd_arch_mod {
($arch:ident, $($typ:ident),*) => {
mod $arch {
use core::arch::$arch::{$($typ),*};
use crate::*;
safety_comment! {
$( unsafe_impl!($typ: FromBytes, AsBytes); )*
}
}
};
}
#[cfg(target_arch = "x86")]
simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(target_arch = "x86_64")]
simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(target_arch = "wasm32")]
simd_arch_mod!(wasm32, v128);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
simd_arch_mod!(
powerpc,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
simd_arch_mod!(
powerpc64,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(target_arch = "aarch64")]
#[rustfmt::skip]
simd_arch_mod!(
aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
uint64x1_t, uint64x2_t
);
#[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
#[rustfmt::skip]
simd_arch_mod!(arm, int8x4_t, uint8x4_t);
}
#[allow(missing_debug_implementations)]
#[derive(FromBytes, Unaligned, Default, Copy)]
#[repr(C, packed)]
pub struct Unalign<T>(T);
impl<T: Copy> Clone for Unalign<T> {
fn clone(&self) -> Unalign<T> {
*self
}
}
impl<T> Unalign<T> {
pub const fn new(val: T) -> Unalign<T> {
Unalign(val)
}
pub const fn into_inner(self) -> T {
#[repr(C)]
union Transmute<T> {
u: ManuallyDrop<Unalign<T>>,
t: ManuallyDrop<T>,
}
unsafe { ManuallyDrop::into_inner(Transmute { u: ManuallyDrop::new(self) }.t) }
}
pub fn try_deref(&self) -> Option<&T> {
if !aligned_to::<_, T>(self) {
return None;
}
unsafe { Some(self.deref_unchecked()) }
}
pub fn try_deref_mut(&mut self) -> Option<&mut T> {
if !aligned_to::<_, T>(&*self) {
return None;
}
unsafe { Some(self.deref_mut_unchecked()) }
}
pub const unsafe fn deref_unchecked(&self) -> &T {
unsafe { &*self.get_ptr() }
}
pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T {
unsafe { &mut *self.get_mut_ptr() }
}
pub const fn get_ptr(&self) -> *const T {
ptr::addr_of!(self.0)
}
pub fn get_mut_ptr(&mut self) -> *mut T {
ptr::addr_of_mut!(self.0)
}
pub fn set(&mut self, t: T) {
*self = Unalign::new(t);
}
pub fn update<O, F: FnOnce(&mut T) -> O>(&mut self, f: F) -> O {
struct WriteBackOnDrop<T> {
copy: ManuallyDrop<T>,
slf: *mut Unalign<T>,
}
impl<T> Drop for WriteBackOnDrop<T> {
fn drop(&mut self) {
unsafe {
let copy = ManuallyDrop::take(&mut self.copy);
ptr::write(self.slf, Unalign::new(copy));
}
}
}
let copy = unsafe { ptr::read(self) }.into_inner();
let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self };
let ret = f(&mut write_back.copy);
drop(write_back);
ret
}
}
impl<T: Copy> Unalign<T> {
pub fn get(&self) -> T {
let Unalign(val) = *self;
val
}
}
safety_comment! {
unsafe_impl!(T: AsBytes => AsBytes for Unalign<T>);
}
impl<T: Unaligned> Deref for Unalign<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.deref_unchecked() }
}
}
impl<T: Unaligned> DerefMut for Unalign<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.deref_mut_unchecked() }
}
}
impl<T: Unaligned + PartialOrd> PartialOrd<Unalign<T>> for Unalign<T> {
fn partial_cmp(&self, other: &Unalign<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(self.deref(), other.deref())
}
}
impl<T: Unaligned + Ord> Ord for Unalign<T> {
fn cmp(&self, other: &Unalign<T>) -> Ordering {
Ord::cmp(self.deref(), other.deref())
}
}
impl<T: Unaligned + PartialEq> PartialEq<Unalign<T>> for Unalign<T> {
fn eq(&self, other: &Unalign<T>) -> bool {
PartialEq::eq(self.deref(), other.deref())
}
}
impl<T: Unaligned + Eq> Eq for Unalign<T> {}
impl<T: Unaligned + Hash> Hash for Unalign<T> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.deref().hash(state);
}
}
impl<T: Unaligned + Debug> Debug for Unalign<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Debug::fmt(self.deref(), f)
}
}
impl<T: Unaligned + Display> Display for Unalign<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.deref(), f)
}
}
#[doc(hidden)]
pub use core::mem::transmute as __real_transmute;
#[macro_export]
macro_rules! transmute {
($e:expr) => {{
let e = $e;
if false {
const fn transmute<T: $crate::AsBytes, U: $crate::FromBytes>(_t: T) -> U {
unreachable!()
}
transmute(e)
} else {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe { $crate::__real_transmute(e) }
}
}}
}
pub struct LayoutVerified<B, T: ?Sized>(B, PhantomData<T>);
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
{
#[inline]
pub fn new(bytes: B) -> Option<LayoutVerified<B, T>> {
if bytes.len() != mem::size_of::<T>() || !aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
#[inline]
pub fn new_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
if bytes.len() < mem::size_of::<T>() || !aligned_to::<_, T>(bytes.deref()) {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
Some((LayoutVerified(bytes, PhantomData), suffix))
}
#[inline]
pub fn new_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
let bytes_len = bytes.len();
let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
let (prefix, bytes) = bytes.split_at(split_at);
if !aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some((prefix, LayoutVerified(bytes, PhantomData)))
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSlice,
{
#[inline]
pub fn new_slice(bytes: B) -> Option<LayoutVerified<B, [T]>> {
let remainder = bytes
.len()
.checked_rem(mem::size_of::<T>())
.expect("LayoutVerified::new_slice called on a zero-sized type");
if remainder != 0 || !aligned_to::<_, T>(bytes.deref()) {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
#[inline]
pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(LayoutVerified<B, [T]>, B)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
if bytes.len() < expected_len {
return None;
}
let (prefix, bytes) = bytes.split_at(expected_len);
Self::new_slice(prefix).map(move |l| (l, bytes))
}
#[inline]
pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, LayoutVerified<B, [T]>)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
if bytes.len() < expected_len {
return None;
}
let (bytes, suffix) = bytes.split_at(expected_len);
Self::new_slice(suffix).map(move |l| (bytes, l))
}
}
fn map_zeroed<B: ByteSliceMut, T: ?Sized>(
opt: Option<LayoutVerified<B, T>>,
) -> Option<LayoutVerified<B, T>> {
match opt {
Some(mut lv) => {
lv.0.fill(0);
Some(lv)
}
None => None,
}
}
fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
opt: Option<(LayoutVerified<B, T>, B)>,
) -> Option<(LayoutVerified<B, T>, B)> {
match opt {
Some((mut lv, rest)) => {
lv.0.fill(0);
Some((lv, rest))
}
None => None,
}
}
fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
opt: Option<(B, LayoutVerified<B, T>)>,
) -> Option<(B, LayoutVerified<B, T>)> {
map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
{
#[inline]
pub fn new_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
map_zeroed(Self::new(bytes))
}
#[inline]
pub fn new_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
}
#[inline]
pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSliceMut,
{
#[inline]
pub fn new_slice_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
map_zeroed(Self::new_slice(bytes))
}
#[inline]
pub fn new_slice_from_prefix_zeroed(
bytes: B,
count: usize,
) -> Option<(LayoutVerified<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
}
#[inline]
pub fn new_slice_from_suffix_zeroed(
bytes: B,
count: usize,
) -> Option<(B, LayoutVerified<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
T: Unaligned,
{
#[inline]
pub fn new_unaligned(bytes: B) -> Option<LayoutVerified<B, T>> {
if bytes.len() != mem::size_of::<T>() {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
#[inline]
pub fn new_unaligned_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
if bytes.len() < mem::size_of::<T>() {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
Some((LayoutVerified(bytes, PhantomData), suffix))
}
#[inline]
pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
let bytes_len = bytes.len();
let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
let (prefix, bytes) = bytes.split_at(split_at);
Some((prefix, LayoutVerified(bytes, PhantomData)))
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSlice,
T: Unaligned,
{
#[inline]
pub fn new_slice_unaligned(bytes: B) -> Option<LayoutVerified<B, [T]>> {
let remainder = bytes
.len()
.checked_rem(mem::size_of::<T>())
.expect("LayoutVerified::new_slice_unaligned called on a zero-sized type");
if remainder != 0 {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
#[inline]
pub fn new_slice_unaligned_from_prefix(
bytes: B,
count: usize,
) -> Option<(LayoutVerified<B, [T]>, B)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
if bytes.len() < expected_len {
return None;
}
let (prefix, bytes) = bytes.split_at(expected_len);
Self::new_slice_unaligned(prefix).map(move |l| (l, bytes))
}
#[inline]
pub fn new_slice_unaligned_from_suffix(
bytes: B,
count: usize,
) -> Option<(B, LayoutVerified<B, [T]>)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
};
if bytes.len() < expected_len {
return None;
}
let (bytes, suffix) = bytes.split_at(expected_len);
Self::new_slice_unaligned(suffix).map(move |l| (bytes, l))
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
T: Unaligned,
{
#[inline]
pub fn new_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
map_zeroed(Self::new_unaligned(bytes))
}
#[inline]
pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
}
#[inline]
pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSliceMut,
T: Unaligned,
{
#[inline]
pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
map_zeroed(Self::new_slice_unaligned(bytes))
}
#[inline]
pub fn new_slice_unaligned_from_prefix_zeroed(
bytes: B,
count: usize,
) -> Option<(LayoutVerified<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
}
#[inline]
pub fn new_slice_unaligned_from_suffix_zeroed(
bytes: B,
count: usize,
) -> Option<(B, LayoutVerified<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
}
}
impl<'a, B, T> LayoutVerified<B, T>
where
B: 'a + ByteSlice,
T: FromBytes,
{
pub fn into_ref(self) -> &'a T {
unsafe { self.deref_helper() }
}
}
impl<'a, B, T> LayoutVerified<B, T>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
pub fn into_mut(mut self) -> &'a mut T {
unsafe { self.deref_mut_helper() }
}
}
impl<'a, B, T> LayoutVerified<B, [T]>
where
B: 'a + ByteSlice,
T: FromBytes,
{
pub fn into_slice(self) -> &'a [T] {
unsafe { self.deref_slice_helper() }
}
}
impl<'a, B, T> LayoutVerified<B, [T]>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
pub fn into_mut_slice(mut self) -> &'a mut [T] {
unsafe { self.deref_mut_slice_helper() }
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes,
{
unsafe fn deref_helper<'a>(&self) -> &'a T {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
&*self.0.as_ptr().cast::<T>()
}
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
&mut *self.0.as_mut_ptr().cast::<T>()
}
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes,
{
unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
#[allow(clippy::arithmetic_side_effects)]
let elems = {
debug_assert_eq!(len % elem_size, 0);
len / elem_size
};
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems)
}
}
}
impl<B, T> LayoutVerified<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
#[allow(clippy::arithmetic_side_effects)]
let elems = {
debug_assert_eq!(len % elem_size, 0);
len / elem_size
};
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
}
}
}
trait AsAddress {
fn addr(self) -> usize;
}
impl<'a, T: ?Sized> AsAddress for &'a T {
#[inline(always)]
fn addr(self) -> usize {
#![allow(clippy::needless_return)]
let ptr: *const T = self;
#[allow(clippy::as_conversions)]
return ptr.cast::<()>() as usize;
}
}
impl<'a, T: ?Sized> AsAddress for &'a mut T {
#[inline(always)]
fn addr(self) -> usize {
#![allow(clippy::needless_return)]
let ptr: *mut T = self;
#[allow(clippy::as_conversions)]
return ptr.cast::<()>() as usize;
}
}
impl<T: ?Sized> AsAddress for *const T {
#[inline(always)]
fn addr(self) -> usize {
#![allow(clippy::needless_return)]
#[allow(clippy::as_conversions)]
return self.cast::<()>() as usize;
}
}
impl<T: ?Sized> AsAddress for *mut T {
#[inline(always)]
fn addr(self) -> usize {
#![allow(clippy::needless_return)]
#[allow(clippy::as_conversions)]
return self.cast::<()>() as usize;
}
}
#[inline(always)]
fn aligned_to<T: AsAddress, U>(t: T) -> bool {
#[allow(clippy::arithmetic_side_effects)]
let remainder = t.addr() % mem::align_of::<U>();
remainder == 0
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
T: ?Sized,
{
#[inline]
pub fn bytes(&self) -> &[u8] {
&self.0
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
T: ?Sized,
{
#[inline]
pub fn bytes_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes,
{
#[inline]
pub fn read(&self) -> T {
unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
T: AsBytes,
{
#[inline]
pub fn write(&mut self, t: T) {
unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) }
}
}
impl<B, T> Deref for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes,
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { self.deref_helper() }
}
}
impl<B, T> DerefMut for LayoutVerified<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { self.deref_mut_helper() }
}
}
impl<B, T> Deref for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes,
{
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe { self.deref_slice_helper() }
}
}
impl<B, T> DerefMut for LayoutVerified<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe { self.deref_mut_slice_helper() }
}
}
impl<T, B> Display for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Display,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &T = self;
inner.fmt(fmt)
}
}
impl<T, B> Display for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes,
[T]: Display,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &[T] = self;
inner.fmt(fmt)
}
}
impl<T, B> Debug for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Debug,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &T = self;
fmt.debug_tuple("LayoutVerified").field(&inner).finish()
}
}
impl<T, B> Debug for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes + Debug,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &[T] = self;
fmt.debug_tuple("LayoutVerified").field(&inner).finish()
}
}
impl<T, B> Eq for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
impl<T, B> Eq for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
impl<T, B> PartialEq for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + PartialEq,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T, B> PartialEq for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialEq,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.deref().eq(other.deref())
}
}
impl<T, B> Ord for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
let inner: &T = self;
let other_inner: &T = other;
inner.cmp(other_inner)
}
}
impl<T, B> Ord for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes + Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
let inner: &[T] = self;
let other_inner: &[T] = other;
inner.cmp(other_inner)
}
}
impl<T, B> PartialOrd for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let inner: &T = self;
let other_inner: &T = other;
inner.partial_cmp(other_inner)
}
}
impl<T, B> PartialOrd for LayoutVerified<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let inner: &[T] = self;
let other_inner: &[T] = other;
inner.partial_cmp(other_inner)
}
}
mod sealed {
use core::cell::{Ref, RefMut};
pub trait Sealed {}
impl<'a> Sealed for &'a [u8] {}
impl<'a> Sealed for &'a mut [u8] {}
impl<'a> Sealed for Ref<'a, [u8]> {}
impl<'a> Sealed for RefMut<'a, [u8]> {}
}
#[allow(clippy::missing_safety_doc)] pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed {
#[inline]
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self);
}
#[allow(clippy::missing_safety_doc)] pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
#[inline]
fn as_mut_ptr(&mut self) -> *mut u8 {
<[u8]>::as_mut_ptr(self)
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a [u8] {
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at(self, mid)
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a mut [u8] {
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at_mut(self, mid)
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for Ref<'a, [u8]> {
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
#[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
#[cfg(feature = "alloc")]
mod alloc_support {
use alloc::vec::Vec;
use super::*;
pub fn extend_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, additional: usize) {
insert_vec_zeroed(v, v.len(), additional);
}
pub fn insert_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, position: usize, additional: usize) {
assert!(position <= v.len());
v.reserve(additional);
unsafe {
let ptr = v.as_mut_ptr();
#[allow(clippy::arithmetic_side_effects)]
ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
ptr.add(position).write_bytes(0, additional);
#[allow(clippy::arithmetic_side_effects)]
v.set_len(v.len() + additional);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extend_vec_zeroed() {
let mut v: Vec<u64> = Vec::with_capacity(3);
v.push(100);
v.push(200);
v.push(300);
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 6);
assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
drop(v);
let mut v: Vec<u64> = Vec::new();
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 3);
assert_eq!(&*v, &[0, 0, 0]);
drop(v);
}
#[test]
fn test_extend_vec_zeroed_zst() {
let mut v: Vec<()> = Vec::with_capacity(3);
v.push(());
v.push(());
v.push(());
extend_vec_zeroed(&mut v, 3);
assert_eq!(v.len(), 6);
assert_eq!(&*v, &[(), (), (), (), (), ()]);
drop(v);
let mut v: Vec<()> = Vec::new();
extend_vec_zeroed(&mut v, 3);
assert_eq!(&*v, &[(), (), ()]);
drop(v);
}
#[test]
fn test_insert_vec_zeroed() {
let mut v: Vec<u64> = Vec::new();
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 2);
assert_eq!(&*v, &[0, 0]);
drop(v);
let mut v: Vec<u64> = Vec::with_capacity(3);
v.push(100);
v.push(200);
v.push(300);
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 5);
assert_eq!(&*v, &[0, 0, 100, 200, 300]);
drop(v);
let mut v: Vec<u64> = Vec::with_capacity(3);
v.push(100);
v.push(200);
v.push(300);
insert_vec_zeroed(&mut v, 1, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[100, 0, 200, 300]);
drop(v);
let mut v: Vec<u64> = Vec::with_capacity(3);
v.push(100);
v.push(200);
v.push(300);
insert_vec_zeroed(&mut v, 3, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[100, 200, 300, 0]);
drop(v);
}
#[test]
fn test_insert_vec_zeroed_zst() {
let mut v: Vec<()> = Vec::new();
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 2);
assert_eq!(&*v, &[(), ()]);
drop(v);
let mut v: Vec<()> = Vec::with_capacity(3);
v.push(());
v.push(());
v.push(());
insert_vec_zeroed(&mut v, 0, 2);
assert_eq!(v.len(), 5);
assert_eq!(&*v, &[(), (), (), (), ()]);
drop(v);
let mut v: Vec<()> = Vec::with_capacity(3);
v.push(());
v.push(());
v.push(());
insert_vec_zeroed(&mut v, 1, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[(), (), (), ()]);
drop(v);
let mut v: Vec<()> = Vec::with_capacity(3);
v.push(());
v.push(());
v.push(());
insert_vec_zeroed(&mut v, 3, 1);
assert_eq!(v.len(), 4);
assert_eq!(&*v, &[(), (), (), ()]);
drop(v);
}
#[test]
fn test_new_box_zeroed() {
assert_eq!(*u64::new_box_zeroed(), 0);
}
#[test]
fn test_new_box_zeroed_array() {
drop(<[u32; 0x1000]>::new_box_zeroed());
}
#[test]
fn test_new_box_zeroed_zst() {
#[allow(clippy::unit_cmp)]
{
assert_eq!(*<()>::new_box_zeroed(), ());
}
}
#[test]
fn test_new_box_slice_zeroed() {
let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
assert_eq!(s.len(), 3);
assert_eq!(&*s, &[0, 0, 0]);
s[1] = 3;
assert_eq!(&*s, &[0, 3, 0]);
}
#[test]
fn test_new_box_slice_zeroed_empty() {
let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
assert_eq!(s.len(), 0);
}
#[test]
fn test_new_box_slice_zeroed_zst() {
let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
assert_eq!(s.len(), 3);
assert!(s.get(10).is_none());
#[allow(clippy::unit_cmp)]
{
assert_eq!(s[1], ());
}
s[2] = ();
}
#[test]
fn test_new_box_slice_zeroed_zst_empty() {
let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
assert_eq!(s.len(), 0);
}
#[test]
#[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
fn test_new_box_slice_zeroed_panics_mul_overflow() {
let _ = u16::new_box_slice_zeroed(usize::MAX);
}
#[rustversion::since(1.65.0)]
#[test]
#[should_panic(expected = "total allocation size overflows `isize`: LayoutError")]
fn test_new_box_slice_zeroed_panics_isize_overflow() {
use core::convert::TryFrom as _;
let max = usize::try_from(isize::MAX).unwrap();
let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
}
}
}
#[cfg(feature = "alloc")]
#[doc(inline)]
pub use alloc_support::*;
#[cfg(test)]
mod tests {
#![allow(clippy::unreadable_literal)]
use core::{ops::Deref, panic::AssertUnwindSafe};
use static_assertions::assert_impl_all;
use super::*;
#[derive(Default)]
struct Align<T, A> {
t: T,
_a: [A; 0],
}
impl<T: Default, A> Align<T, A> {
fn set_default(&mut self) {
self.t = T::default();
}
}
impl<T, A> Align<T, A> {
const fn new(t: T) -> Align<T, A> {
Align { t, _a: [] }
}
}
#[repr(C)]
struct ForceUnalign<T, A> {
_u: u8,
t: T,
_a: [A; 0],
}
impl<T, A> ForceUnalign<T, A> {
const fn new(t: T) -> ForceUnalign<T, A> {
ForceUnalign { _u: 0, t, _a: [] }
}
}
#[derive(FromBytes, AsBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)]
#[repr(C, align(8))]
struct AU64(u64);
impl Display for AU64 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
fn au64_to_bytes(u: AU64) -> [u8; 8] {
transmute!(u)
}
#[derive(Debug, Eq, PartialEq, FromBytes, AsBytes, Unaligned)]
#[repr(transparent)]
struct Unsized([u8]);
impl Unsized {
fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
unsafe { mem::transmute(slc) }
}
}
#[test]
fn test_object_safety() {
fn _takes_from_bytes(_: &dyn FromBytes) {}
fn _takes_unaligned(_: &dyn Unaligned) {}
}
#[test]
fn test_unalign() {
let mut u = Unalign::new(AU64(123));
assert_eq!(u.get(), AU64(123));
assert_eq!(u.into_inner(), AU64(123));
assert_eq!(u.get_ptr(), <*const _>::cast::<AU64>(&u));
assert_eq!(u.get_mut_ptr(), <*mut _>::cast::<AU64>(&mut u));
u.set(AU64(321));
assert_eq!(u.get(), AU64(321));
let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
assert_eq!(u.t.try_deref(), Some(&AU64(123)));
assert_eq!(u.t.try_deref_mut(), Some(&mut AU64(123)));
assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123));
assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123));
*u.t.try_deref_mut().unwrap() = AU64(321);
assert_eq!(u.t.get(), AU64(321));
let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123)));
assert_eq!(u.t.try_deref(), None);
assert_eq!(u.t.try_deref_mut(), None);
let mut u = Unalign::new(123u8);
assert_eq!(u.try_deref(), Some(&123));
assert_eq!(u.try_deref_mut(), Some(&mut 123));
assert_eq!(u.deref(), &123);
assert_eq!(u.deref_mut(), &mut 123);
*u = 21;
assert_eq!(u.get(), 21);
const _UNALIGN: Unalign<u64> = Unalign::new(0);
const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr();
const _U64: u64 = _UNALIGN.into_inner();
#[allow(dead_code)]
const _: () = {
let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
let au64 = unsafe { x.t.deref_unchecked() };
match au64 {
AU64(123) => {}
_ => unreachable!(),
}
};
}
#[test]
fn test_unalign_update() {
let mut u = Unalign::new(AU64(123));
u.update(|a| a.0 += 1);
assert_eq!(u.get(), AU64(124));
let mut u = Unalign::new(Box::new(AU64(123)));
let res = std::panic::catch_unwind(AssertUnwindSafe(|| {
u.update(|a| {
a.0 += 1;
panic!();
})
}));
#[allow(clippy::assertions_on_result_states)]
{
assert!(res.is_err());
}
assert_eq!(u.into_inner(), Box::new(AU64(124)));
}
#[test]
fn test_read_write() {
const VAL: u64 = 0x12345678;
#[cfg(target_endian = "big")]
const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
#[cfg(target_endian = "little")]
const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
let mut bytes = [0u8; 8];
assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
assert_eq!(bytes, VAL_BYTES);
let mut bytes = [0u8; 16];
assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
assert_eq!(bytes, want);
let mut bytes = [0u8; 16];
assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
assert_eq!(bytes, want);
}
#[test]
fn test_transmute() {
let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
assert_eq!(x, array_of_arrays);
let x: [u8; 8] = transmute!(array_of_arrays);
assert_eq!(x, array_of_u8s);
#[derive(AsBytes)]
#[repr(transparent)]
struct PanicOnDrop(());
impl Drop for PanicOnDrop {
fn drop(&mut self) {
panic!("PanicOnDrop::drop");
}
}
let _: () = transmute!(PanicOnDrop(()));
const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
assert_eq!(X, ARRAY_OF_ARRAYS);
}
#[test]
fn test_address() {
let buf = [0];
let lv = LayoutVerified::<_, u8>::new(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
let deref_ptr: *const u8 = lv.deref();
assert_eq!(buf_ptr, deref_ptr);
let buf = [0];
let lv = LayoutVerified::<_, [u8]>::new_slice(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
let deref_ptr = lv.deref().as_ptr();
assert_eq!(buf_ptr, deref_ptr);
}
fn test_new_helper(mut lv: LayoutVerified<&mut [u8], AU64>) {
assert_eq!(*lv, AU64(0));
assert_eq!(lv.read(), AU64(0));
const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
*lv = VAL1;
assert_eq!(lv.bytes(), &au64_to_bytes(VAL1));
*lv = AU64(0);
lv.write(VAL1);
assert_eq!(lv.bytes(), &au64_to_bytes(VAL1));
const VAL2: AU64 = AU64(!VAL1.0); lv.bytes_mut().copy_from_slice(&au64_to_bytes(VAL2)[..]);
assert_eq!(*lv, VAL2);
assert_eq!(lv.read(), VAL2);
}
fn test_new_helper_slice(mut lv: LayoutVerified<&mut [u8], [AU64]>, typed_len: usize) {
assert_eq!(&*lv, vec![AU64(0); typed_len].as_slice());
let untyped_len = typed_len * 8;
assert_eq!(lv.bytes().len(), untyped_len);
assert_eq!(lv.bytes().as_ptr(), lv.as_ptr().cast::<u8>());
const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
for typed in &mut *lv {
*typed = VAL1;
}
assert_eq!(lv.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
const VAL2: AU64 = AU64(!VAL1.0); lv.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
assert!(lv.iter().copied().all(|x| x == VAL2));
}
fn test_new_helper_unaligned(mut lv: LayoutVerified<&mut [u8], [u8; 8]>) {
assert_eq!(*lv, [0; 8]);
assert_eq!(lv.read(), [0; 8]);
const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
*lv = VAL1;
assert_eq!(lv.bytes(), &VAL1);
*lv = [0; 8];
lv.write(VAL1);
assert_eq!(lv.bytes(), &VAL1);
const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; lv.bytes_mut().copy_from_slice(&VAL2[..]);
assert_eq!(*lv, VAL2);
assert_eq!(lv.read(), VAL2);
}
fn test_new_helper_slice_unaligned(mut lv: LayoutVerified<&mut [u8], [u8]>, len: usize) {
assert_eq!(&*lv, vec![0u8; len].as_slice());
assert_eq!(lv.bytes().len(), len);
assert_eq!(lv.bytes().as_ptr(), lv.as_ptr());
let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
lv.copy_from_slice(&expected_bytes);
assert_eq!(lv.bytes(), expected_bytes.as_slice());
for byte in &mut expected_bytes {
*byte = !*byte; }
lv.bytes_mut().copy_from_slice(&expected_bytes);
assert_eq!(&*lv, expected_bytes.as_slice());
}
#[test]
fn test_new_aligned_sized() {
let mut buf = Align::<[u8; 8], AU64>::default();
test_new_helper(LayoutVerified::<_, AU64>::new(&mut buf.t[..]).unwrap());
buf.t = [0xFFu8; 8];
test_new_helper(LayoutVerified::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
{
buf.set_default();
let (lv, suffix) = LayoutVerified::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(lv);
}
{
buf.t = [0xFFu8; 8];
let (lv, suffix) =
LayoutVerified::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(lv);
}
{
buf.set_default();
let (prefix, lv) = LayoutVerified::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(lv);
}
{
buf.t = [0xFFu8; 8];
let (prefix, lv) =
LayoutVerified::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(lv);
}
let mut buf = Align::<[u8; 16], AU64>::default();
test_new_helper_slice(LayoutVerified::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 2);
buf.t = [0xFFu8; 16];
test_new_helper_slice(
LayoutVerified::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(),
2,
);
{
buf.set_default();
let (lv, suffix) =
LayoutVerified::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
assert_eq!(suffix, [0; 8]);
test_new_helper_slice(lv, 1);
}
{
buf.t = [0xFFu8; 16];
let (lv, suffix) =
LayoutVerified::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1)
.unwrap();
assert_eq!(suffix, [0xFF; 8]);
test_new_helper_slice(lv, 1);
}
{
buf.set_default();
let (prefix, lv) =
LayoutVerified::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
assert_eq!(prefix, [0; 8]);
test_new_helper_slice(lv, 1);
}
{
buf.t = [0xFFu8; 16];
let (prefix, lv) =
LayoutVerified::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1)
.unwrap();
assert_eq!(prefix, [0xFF; 8]);
test_new_helper_slice(lv, 1);
}
}
#[test]
fn test_new_unaligned_sized() {
let mut buf = [0u8; 8];
test_new_helper_unaligned(
LayoutVerified::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap(),
);
buf = [0xFFu8; 8];
test_new_helper_unaligned(
LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap(),
);
{
buf = [0u8; 8];
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 8];
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
.unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0u8; 8];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 8];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
.unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(lv);
}
let mut buf = [0u8; 16];
test_new_helper_slice_unaligned(
LayoutVerified::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
16,
);
buf = [0xFFu8; 16];
test_new_helper_slice_unaligned(
LayoutVerified::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
16,
);
{
buf = [0u8; 16];
let (lv, suffix) =
LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8)
.unwrap();
assert_eq!(suffix, [0; 8]);
test_new_helper_slice_unaligned(lv, 8);
}
{
buf = [0xFFu8; 16];
let (lv, suffix) =
LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8)
.unwrap();
assert_eq!(suffix, [0xFF; 8]);
test_new_helper_slice_unaligned(lv, 8);
}
{
buf = [0u8; 16];
let (prefix, lv) =
LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8)
.unwrap();
assert_eq!(prefix, [0; 8]);
test_new_helper_slice_unaligned(lv, 8);
}
{
buf = [0xFFu8; 16];
let (prefix, lv) =
LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8)
.unwrap();
assert_eq!(prefix, [0xFF; 8]);
test_new_helper_slice_unaligned(lv, 8);
}
}
#[test]
fn test_new_oversized() {
let mut buf = Align::<[u8; 16], AU64>::default();
{
let (lv, suffix) = LayoutVerified::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper(lv);
}
{
buf.t = [0xFFu8; 16];
let (lv, suffix) =
LayoutVerified::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
assert_eq!(suffix, &[0xFFu8; 8]);
test_new_helper(lv);
}
{
buf.set_default();
let (prefix, lv) = LayoutVerified::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper(lv);
}
{
buf.t = [0xFFu8; 16];
let (prefix, lv) =
LayoutVerified::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
assert_eq!(prefix, &[0xFFu8; 8]);
test_new_helper(lv);
}
}
#[test]
fn test_new_unaligned_oversized() {
let mut buf = [0u8; 16];
{
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 16];
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
.unwrap();
assert_eq!(suffix, &[0xFF; 8]);
test_new_helper_unaligned(lv);
}
{
buf = [0u8; 16];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 16];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
.unwrap();
assert_eq!(prefix, &[0xFF; 8]);
test_new_helper_unaligned(lv);
}
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_new_error() {
let mut buf = Align::<[u8; 16], AU64>::default();
assert!(LayoutVerified::<_, AU64>::new(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 4], AU64>::default();
assert!(LayoutVerified::<_, AU64>::new(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..])
.is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..])
.is_none());
let mut buf = Align::<[u8; 12], AU64>::default();
assert!(LayoutVerified::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
assert!(
LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none()
);
let mut buf = Align::<[u8; 12], AU64>::default();
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
assert!(
LayoutVerified::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none()
);
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
assert!(
LayoutVerified::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none()
);
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
&mut buf.t[..],
2
)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
&mut buf.t[..],
2
)
.is_none());
let mut buf = Align::<[u8; 13], AU64>::default();
assert!(LayoutVerified::<_, AU64>::new(&buf.t[1..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
assert!(
LayoutVerified::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none()
);
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
assert!(
LayoutVerified::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none()
);
assert!(LayoutVerified::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
assert!(LayoutVerified::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
let mut buf = Align::<[u8; 16], AU64>::default();
let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len)
.is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_prefix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len)
.is_none());
assert!(LayoutVerified::<_, [AU64]>::new_slice_from_suffix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(
&buf.t[..],
unreasonable_len
)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(
&buf.t[..],
unreasonable_len
)
.is_none());
assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
&mut buf.t[..],
unreasonable_len
)
.is_none());
}
mod test_zst_panics {
macro_rules! zst_test {
($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
#[test]
#[should_panic = concat!("LayoutVerified::", $constructor_in_panic_msg, " called on a zero-sized type")]
fn $name() {
let mut buffer = [0u8];
let lv = $crate::LayoutVerified::<_, [()]>::$name(&mut buffer[..], $($tt)*);
unreachable!("should have panicked, got {:?}", lv);
}
}
}
zst_test!(new_slice(), "new_slice");
zst_test!(new_slice_zeroed(), "new_slice");
zst_test!(new_slice_from_prefix(1), "new_slice");
zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
zst_test!(new_slice_from_suffix(1), "new_slice");
zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
zst_test!(new_slice_unaligned(), "new_slice_unaligned");
zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
}
#[test]
fn test_as_bytes_methods() {
fn test<const N: usize, T: FromBytes + AsBytes + Debug + Eq + ?Sized>(
t: &mut T,
bytes: &[u8],
post_mutation: &T,
) {
assert_eq!(t.as_bytes(), bytes);
t.as_bytes_mut()[0] ^= 0xFF;
assert_eq!(t, post_mutation);
t.as_bytes_mut()[0] ^= 0xFF;
assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
let mut too_many_bytes = vec![0; N + 1];
too_many_bytes[N] = 123;
assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
assert_eq!(&too_many_bytes[..N], t.as_bytes());
assert_eq!(too_many_bytes[N], 123);
assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
let mut bytes = [0; N];
assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
assert_eq!(bytes, t.as_bytes());
let mut too_many_bytes = vec![0; N + 1];
too_many_bytes[0] = 123;
assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
assert_eq!(&too_many_bytes[1..], t.as_bytes());
assert_eq!(too_many_bytes[0], 123);
}
#[derive(Debug, Eq, PartialEq, FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: u32,
b: Wrapping<u32>,
c: Option<NonZeroU32>,
}
let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
} else {
vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
};
let post_mutation_expected_a =
if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
test::<12, _>(
&mut Foo { a: 1, b: Wrapping(2), c: None },
expected_bytes.as_bytes(),
&Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
);
test::<3, _>(
Unsized::from_mut_slice(&mut [1, 2, 3]),
&[1, 2, 3],
Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
);
}
#[test]
fn test_array() {
#[derive(FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: [u16; 33],
}
let foo = Foo { a: [0xFFFF; 33] };
let expected = [0xFFu8; 66];
assert_eq!(foo.as_bytes(), &expected[..]);
}
#[test]
fn test_display_debug() {
let buf = Align::<[u8; 8], u64>::default();
let lv = LayoutVerified::<_, u64>::new(&buf.t[..]).unwrap();
assert_eq!(format!("{}", lv), "0");
assert_eq!(format!("{:?}", lv), "LayoutVerified(0)");
let buf = Align::<[u8; 8], u64>::default();
let lv = LayoutVerified::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
assert_eq!(format!("{:?}", lv), "LayoutVerified([0])");
}
#[test]
fn test_eq() {
let buf1 = 0_u64;
let lv1 = LayoutVerified::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 0_u64;
let lv2 = LayoutVerified::<_, u64>::new(buf2.as_bytes()).unwrap();
assert_eq!(lv1, lv2);
}
#[test]
fn test_ne() {
let buf1 = 0_u64;
let lv1 = LayoutVerified::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 1_u64;
let lv2 = LayoutVerified::<_, u64>::new(buf2.as_bytes()).unwrap();
assert_ne!(lv1, lv2);
}
#[test]
fn test_ord() {
let buf1 = 0_u64;
let lv1 = LayoutVerified::<_, u64>::new(buf1.as_bytes()).unwrap();
let buf2 = 1_u64;
let lv2 = LayoutVerified::<_, u64>::new(buf2.as_bytes()).unwrap();
assert!(lv1 < lv2);
}
#[test]
fn test_new_zeroed() {
assert_eq!(u64::new_zeroed(), 0);
#[allow(clippy::unit_cmp)]
{
assert_eq!(<()>::new_zeroed(), ());
}
}
#[test]
fn test_transparent_packed_generic_struct() {
#[derive(AsBytes, FromBytes, Unaligned)]
#[repr(transparent)]
struct Foo<T> {
_t: T,
_phantom: PhantomData<()>,
}
assert_impl_all!(Foo<u32>: FromBytes);
assert_impl_all!(Foo<f32>: AsBytes);
assert_impl_all!(Foo<u8>: Unaligned);
#[derive(AsBytes, FromBytes, Unaligned)]
#[repr(packed)]
struct Bar<T, U> {
_t: T,
_u: U,
}
assert_impl_all!(Bar<u8, AU64>: FromBytes, AsBytes, Unaligned);
}
#[test]
fn test_impls() {
macro_rules! assert_impls {
($ty:ty: $trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
};
($ty:ty: !$trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
};
($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
$(
assert_impls!($ty: $trait);
)*
$(
assert_impls!($ty: !$negative_trait);
)*
};
}
assert_impls!((): FromBytes, AsBytes, Unaligned);
assert_impls!(u8: FromBytes, AsBytes, Unaligned);
assert_impls!(i8: FromBytes, AsBytes, Unaligned);
assert_impls!(u16: FromBytes, AsBytes, !Unaligned);
assert_impls!(i16: FromBytes, AsBytes, !Unaligned);
assert_impls!(u32: FromBytes, AsBytes, !Unaligned);
assert_impls!(i32: FromBytes, AsBytes, !Unaligned);
assert_impls!(u64: FromBytes, AsBytes, !Unaligned);
assert_impls!(i64: FromBytes, AsBytes, !Unaligned);
assert_impls!(u128: FromBytes, AsBytes, !Unaligned);
assert_impls!(i128: FromBytes, AsBytes, !Unaligned);
assert_impls!(usize: FromBytes, AsBytes, !Unaligned);
assert_impls!(isize: FromBytes, AsBytes, !Unaligned);
assert_impls!(f32: FromBytes, AsBytes, !Unaligned);
assert_impls!(f64: FromBytes, AsBytes, !Unaligned);
assert_impls!(bool: AsBytes, Unaligned, !FromBytes);
assert_impls!(char: AsBytes, !FromBytes, !Unaligned);
assert_impls!(str: AsBytes, Unaligned, !FromBytes);
assert_impls!(NonZeroU8: AsBytes, Unaligned, !FromBytes);
assert_impls!(NonZeroI8: AsBytes, Unaligned, !FromBytes);
assert_impls!(NonZeroU16: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI16: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU32: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI32: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU64: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI64: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroU128: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroI128: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroUsize: AsBytes, !FromBytes, !Unaligned);
assert_impls!(NonZeroIsize: AsBytes, !FromBytes, !Unaligned);
assert_impls!(Option<NonZeroU8>: FromBytes, AsBytes, Unaligned);
assert_impls!(Option<NonZeroI8>: FromBytes, AsBytes, Unaligned);
assert_impls!(Option<NonZeroU16>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI16>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU32>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI32>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU64>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI64>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroU128>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroI128>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroUsize>: FromBytes, AsBytes, !Unaligned);
assert_impls!(Option<NonZeroIsize>: FromBytes, AsBytes, !Unaligned);
struct NotZerocopy;
assert_impls!(PhantomData<NotZerocopy>: FromBytes, AsBytes, Unaligned);
assert_impls!(PhantomData<[u8]>: FromBytes, AsBytes, Unaligned);
assert_impls!(ManuallyDrop<u8>: FromBytes, AsBytes, Unaligned);
assert_impls!(ManuallyDrop<[u8]>: FromBytes, AsBytes, Unaligned);
assert_impls!(ManuallyDrop<NotZerocopy>: !FromBytes, !AsBytes, !Unaligned);
assert_impls!(ManuallyDrop<[NotZerocopy]>: !FromBytes, !AsBytes, !Unaligned);
assert_impls!(MaybeUninit<u8>: FromBytes, Unaligned, !AsBytes);
assert_impls!(MaybeUninit<NotZerocopy>: FromBytes, !AsBytes, !Unaligned);
assert_impls!(Wrapping<u8>: FromBytes, AsBytes, Unaligned);
assert_impls!(Wrapping<NotZerocopy>: !FromBytes, !AsBytes, !Unaligned);
assert_impls!(Unalign<u8>: FromBytes, AsBytes, Unaligned);
assert_impls!(Unalign<NotZerocopy>: Unaligned, !FromBytes, !AsBytes);
assert_impls!([u8]: FromBytes, AsBytes, Unaligned);
assert_impls!([NotZerocopy]: !FromBytes, !AsBytes, !Unaligned);
assert_impls!([u8; 0]: FromBytes, AsBytes, Unaligned);
assert_impls!([NotZerocopy; 0]: !FromBytes, !AsBytes, !Unaligned);
assert_impls!([u8; 1]: FromBytes, AsBytes, Unaligned);
assert_impls!([NotZerocopy; 1]: !FromBytes, !AsBytes, !Unaligned);
}
}