use core::{
cell::{Cell, UnsafeCell},
mem::MaybeUninit as CoreMaybeUninit,
ptr::NonNull,
};
use super::*;
use crate::pointer::cast::{CastSizedExact, CastUnsized};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!((): Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_unaligned!(());
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(u8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
unsafe_impl!(i8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_unaligned!(u8, i8);
unsafe_impl!(u16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(i16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(u32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(i32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(u64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(i64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(u128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(i128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(isize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(f32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(f64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
#[cfg(feature = "float-nightly")]
unsafe_impl!(#[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] f16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
#[cfg(feature = "float-nightly")]
unsafe_impl!(#[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] f128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes);
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe { unsafe_impl!(bool: Immutable, FromZeros, IntoBytes, Unaligned) };
assert_unaligned!(bool);
const _: () = unsafe {
unsafe_impl!(=> TryFromBytes for bool; |byte| {
let byte = byte.transmute_with::<u8, invariant::Valid, CastSizedExact, BecauseImmutable>();
*byte.unaligned_as_ref() < 2
})
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe { unsafe_impl!(char: Immutable, FromZeros, IntoBytes) };
const _: () = unsafe {
unsafe_impl!(=> TryFromBytes for char; |c| {
let c = c.transmute_with::<Unalign<u32>, invariant::Valid, CastSizedExact, BecauseImmutable>();
let c = c.read().into_inner();
char::from_u32(c).is_some()
});
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe { unsafe_impl!(str: Immutable, FromZeros, IntoBytes, Unaligned) };
const _: () = unsafe {
unsafe_impl!(=> TryFromBytes for str; |c| {
let c = c.transmute_with::<[u8], invariant::Valid, CastUnsized, BecauseImmutable>();
let c = c.unaligned_as_ref();
core::str::from_utf8(c).is_ok()
})
};
macro_rules! unsafe_impl_try_from_bytes_for_nonzero {
($($nonzero:ident[$prim:ty]),*) => {
$(
unsafe_impl!(=> TryFromBytes for $nonzero; |n| {
let n = n.transmute_with::<Unalign<$prim>, invariant::Valid, CastSizedExact, BecauseImmutable>();
$nonzero::new(n.read().into_inner()).is_some()
});
)*
}
}
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(NonZeroU8: Immutable, IntoBytes, Unaligned);
unsafe_impl!(NonZeroI8: Immutable, IntoBytes, Unaligned);
assert_unaligned!(NonZeroU8, NonZeroI8);
unsafe_impl!(NonZeroU16: Immutable, IntoBytes);
unsafe_impl!(NonZeroI16: Immutable, IntoBytes);
unsafe_impl!(NonZeroU32: Immutable, IntoBytes);
unsafe_impl!(NonZeroI32: Immutable, IntoBytes);
unsafe_impl!(NonZeroU64: Immutable, IntoBytes);
unsafe_impl!(NonZeroI64: Immutable, IntoBytes);
unsafe_impl!(NonZeroU128: Immutable, IntoBytes);
unsafe_impl!(NonZeroI128: Immutable, IntoBytes);
unsafe_impl!(NonZeroUsize: Immutable, IntoBytes);
unsafe_impl!(NonZeroIsize: Immutable, IntoBytes);
unsafe_impl_try_from_bytes_for_nonzero!(
NonZeroU8[u8],
NonZeroI8[i8],
NonZeroU16[u16],
NonZeroI16[i16],
NonZeroU32[u32],
NonZeroI32[i32],
NonZeroU64[u64],
NonZeroI64[i64],
NonZeroU128[u128],
NonZeroI128[i128],
NonZeroUsize[usize],
NonZeroIsize[isize]
);
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeros, FromBytes, IntoBytes);
};
#[cfg(feature = "alloc")]
const _: () = unsafe {
unsafe_impl!(
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
T: Sized => Immutable for Box<T>
)
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
#[cfg(feature = "alloc")]
unsafe_impl!(
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
T => TryFromBytes for Option<Box<T>>; |c| pointer::is_zeroed(c)
);
#[cfg(feature = "alloc")]
unsafe_impl!(
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
T => FromZeros for Option<Box<T>>
);
unsafe_impl!(
T => TryFromBytes for Option<&'_ T>; |c| pointer::is_zeroed(c)
);
unsafe_impl!(T => FromZeros for Option<&'_ T>);
unsafe_impl!(
T => TryFromBytes for Option<&'_ mut T>; |c| pointer::is_zeroed(c)
);
unsafe_impl!(T => FromZeros for Option<&'_ mut T>);
unsafe_impl!(
T => TryFromBytes for Option<NonNull<T>>; |c| pointer::is_zeroed(c)
);
unsafe_impl!(T => FromZeros for Option<NonNull<T>>);
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_fn!(...));
unsafe_impl_for_power_set!(
A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_fn!(...);
|c| pointer::is_zeroed(c)
);
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_unsafe_fn!(...));
unsafe_impl_for_power_set!(
A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_unsafe_fn!(...);
|c| pointer::is_zeroed(c)
);
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_extern_c_fn!(...));
unsafe_impl_for_power_set!(
A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_extern_c_fn!(...);
|c| pointer::is_zeroed(c)
);
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_unsafe_extern_c_fn!(...));
unsafe_impl_for_power_set!(
A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_unsafe_extern_c_fn!(...);
|c| pointer::is_zeroed(c)
);
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_fn!(...));
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_unsafe_fn!(...));
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_extern_c_fn!(...));
unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_unsafe_extern_c_fn!(...));
};
#[cfg(all(
not(no_zerocopy_target_has_atomics_1_60_0),
any(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
)
))]
#[cfg_attr(doc_cfg, doc(cfg(rust = "1.60.0")))]
mod atomics {
use super::*;
macro_rules! impl_traits_for_atomics {
($($atomics:tt [$primitives:ty]),* $(,)?) => {
$(
impl_known_layout!($atomics);
impl_for_transmute_from!(=> FromZeros for $atomics [$primitives]);
impl_for_transmute_from!(=> FromBytes for $atomics [$primitives]);
impl_for_transmute_from!(=> TryFromBytes for $atomics [$primitives]);
impl_for_transmute_from!(=> IntoBytes for $atomics [$primitives]);
)*
};
}
macro_rules! unsafe_impl_transmute_from_for_atomic {
($($($tyvar:ident)? => $atomic:ty [$prim:ty]),*) => {{
crate::util::macros::__unsafe();
use crate::pointer::{SizeEq, TransmuteFrom, invariant::Valid};
$(
unsafe impl<$($tyvar)?> TransmuteFrom<$atomic, Valid, Valid> for $prim {}
unsafe impl<$($tyvar)?> TransmuteFrom<$prim, Valid, Valid> for $atomic {}
impl<$($tyvar)?> SizeEq<ReadOnly<$atomic>> for ReadOnly<$prim> {
type CastFrom = $crate::pointer::cast::CastSizedExact;
}
unsafe impl<$($tyvar)?> TransmuteFrom<$atomic, Valid, Valid> for core::cell::UnsafeCell<$prim> {}
unsafe impl<$($tyvar)?> TransmuteFrom<core::cell::UnsafeCell<$prim>, Valid, Valid> for $atomic {}
)*
}};
}
#[cfg(target_has_atomic = "8")]
#[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "8")))]
mod atomic_8 {
use core::sync::atomic::{AtomicBool, AtomicI8, AtomicU8};
use super::*;
impl_traits_for_atomics!(AtomicU8[u8], AtomicI8[i8]);
impl_known_layout!(AtomicBool);
impl_for_transmute_from!(=> FromZeros for AtomicBool [bool]);
impl_for_transmute_from!(=> TryFromBytes for AtomicBool [bool]);
impl_for_transmute_from!(=> IntoBytes for AtomicBool [bool]);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(AtomicBool: Unaligned);
unsafe_impl!(AtomicU8: Unaligned);
unsafe_impl!(AtomicI8: Unaligned);
assert_unaligned!(AtomicBool, AtomicU8, AtomicI8);
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_transmute_from_for_atomic!(
=> AtomicU8 [u8],
=> AtomicI8 [i8],
=> AtomicBool [bool]
)
};
}
#[cfg(target_has_atomic = "16")]
#[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "16")))]
mod atomic_16 {
use core::sync::atomic::{AtomicI16, AtomicU16};
use super::*;
impl_traits_for_atomics!(AtomicU16[u16], AtomicI16[i16]);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_transmute_from_for_atomic!(=> AtomicU16 [u16], => AtomicI16 [i16])
};
}
#[cfg(target_has_atomic = "32")]
#[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "32")))]
mod atomic_32 {
use core::sync::atomic::{AtomicI32, AtomicU32};
use super::*;
impl_traits_for_atomics!(AtomicU32[u32], AtomicI32[i32]);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_transmute_from_for_atomic!(=> AtomicU32 [u32], => AtomicI32 [i32])
};
}
#[cfg(target_has_atomic = "64")]
#[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "64")))]
mod atomic_64 {
use core::sync::atomic::{AtomicI64, AtomicU64};
use super::*;
impl_traits_for_atomics!(AtomicU64[u64], AtomicI64[i64]);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_transmute_from_for_atomic!(=> AtomicU64 [u64], => AtomicI64 [i64])
};
}
#[cfg(target_has_atomic = "ptr")]
#[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "ptr")))]
mod atomic_ptr {
use core::sync::atomic::{AtomicIsize, AtomicPtr, AtomicUsize};
use super::*;
impl_traits_for_atomics!(AtomicUsize[usize], AtomicIsize[isize]);
impl_known_layout!(T => AtomicPtr<T>);
impl_for_transmute_from!(T => TryFromBytes for AtomicPtr<T> [*mut T]);
impl_for_transmute_from!(T => FromZeros for AtomicPtr<T> [*mut T]);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl_transmute_from_for_atomic!(=> AtomicUsize [usize], => AtomicIsize [isize])
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe { unsafe_impl_transmute_from_for_atomic!(T => AtomicPtr<T> [*mut T]) };
}
}
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(T: ?Sized => Immutable for PhantomData<T>);
unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => FromZeros for PhantomData<T>);
unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => IntoBytes for PhantomData<T>);
unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
};
impl_for_transmute_from!(T: TryFromBytes => TryFromBytes for Wrapping<T>[T]);
impl_for_transmute_from!(T: FromZeros => FromZeros for Wrapping<T>[T]);
impl_for_transmute_from!(T: FromBytes => FromBytes for Wrapping<T>[T]);
impl_for_transmute_from!(T: IntoBytes => IntoBytes for Wrapping<T>[T]);
assert_unaligned!(Wrapping<()>, Wrapping<u8>);
const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for Wrapping<T>) };
const _: () = unsafe { unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>) };
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(T => TryFromBytes for CoreMaybeUninit<T>);
unsafe_impl!(T => FromZeros for CoreMaybeUninit<T>);
unsafe_impl!(T => FromBytes for CoreMaybeUninit<T>);
};
const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for CoreMaybeUninit<T>) };
const _: () = unsafe { unsafe_impl!(T: Unaligned => Unaligned for CoreMaybeUninit<T>) };
assert_unaligned!(CoreMaybeUninit<()>, CoreMaybeUninit<u8>);
const _: () = unsafe { unsafe_impl!(T: ?Sized + Immutable => Immutable for ManuallyDrop<T>) };
impl_for_transmute_from!(T: ?Sized + TryFromBytes => TryFromBytes for ManuallyDrop<T>[T]);
impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for ManuallyDrop<T>[T]);
impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>[T]);
impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for ManuallyDrop<T>[T]);
const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>) };
assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
const _: () = {
#[allow(
non_camel_case_types,
missing_copy_implementations,
missing_debug_implementations,
missing_docs
)]
pub enum value {}
unsafe impl<T: ?Sized> HasTag for ManuallyDrop<T> {
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized,
{
}
type Tag = ();
type ProjectToTag = crate::pointer::cast::CastToUnit;
}
unsafe impl<T: ?Sized>
HasField<value, { crate::STRUCT_VARIANT_ID }, { crate::ident_id!(value) }>
for ManuallyDrop<T>
{
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized,
{
}
type Type = T;
#[inline(always)]
fn project(slf: PtrInner<'_, Self>) -> *mut T {
#[allow(clippy::as_conversions)]
return slf.as_ptr() as *mut T;
}
}
};
impl_for_transmute_from!(T: ?Sized + TryFromBytes => TryFromBytes for Cell<T>[T]);
impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for Cell<T>[T]);
impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for Cell<T>[T]);
impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for Cell<T>[T]);
const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for Cell<T>) };
impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for UnsafeCell<T>[T]);
impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for UnsafeCell<T>[T]);
impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for UnsafeCell<T>[T]);
const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for UnsafeCell<T>) };
assert_unaligned!(UnsafeCell<()>, UnsafeCell<u8>);
unsafe impl<T: TryFromBytes + ?Sized> TryFromBytes for UnsafeCell<T> {
#[allow(clippy::missing_inline_in_public_items)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized,
{
}
#[inline(always)]
fn is_bit_valid<A>(candidate: Maybe<'_, Self, A>) -> bool
where
A: invariant::Alignment,
{
T::is_bit_valid(candidate.transmute::<_, _, BecauseImmutable>())
}
}
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(const N: usize, T: Immutable => Immutable for [T; N]);
unsafe_impl!(const N: usize, T: TryFromBytes => TryFromBytes for [T; N]; |c| {
let c: Ptr<'_, [ReadOnly<T>; N], _> = c.cast::<_, crate::pointer::cast::CastSized, _>();
let c: Ptr<'_, [ReadOnly<T>], _> = c.as_slice();
let c: Ptr<'_, ReadOnly<[T]>, _> = c.cast::<_, crate::pointer::cast::CastUnsized, _>();
<[T] as TryFromBytes>::is_bit_valid(c)
});
unsafe_impl!(const N: usize, T: FromZeros => FromZeros for [T; N]);
unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
unsafe_impl!(const N: usize, T: IntoBytes => IntoBytes for [T; N]);
unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
unsafe_impl!(T: Immutable => Immutable for [T]);
unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c| {
let c: Ptr<'_, [ReadOnly<T>], _> = c.cast::<_, crate::pointer::cast::CastUnsized, _>();
c.iter().all(<T as TryFromBytes>::is_bit_valid)
});
unsafe_impl!(T: FromZeros => FromZeros for [T]);
unsafe_impl!(T: FromBytes => FromBytes for [T]);
unsafe_impl!(T: IntoBytes => IntoBytes for [T]);
unsafe_impl!(T: Unaligned => Unaligned for [T]);
};
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(T: ?Sized => Immutable for *const T);
unsafe_impl!(T: ?Sized => Immutable for *mut T);
unsafe_impl!(T => TryFromBytes for *const T; |c| pointer::is_zeroed(c));
unsafe_impl!(T => FromZeros for *const T);
unsafe_impl!(T => TryFromBytes for *mut T; |c| pointer::is_zeroed(c));
unsafe_impl!(T => FromZeros for *mut T);
};
const _: () = unsafe { unsafe_impl!(T: ?Sized => Immutable for NonNull<T>) };
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
unsafe_impl!(T: ?Sized => Immutable for &'_ T);
unsafe_impl!(T: ?Sized => Immutable for &'_ mut T);
};
const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for Option<T>) };
mod tuples {
use super::*;
macro_rules! impl_tuple {
($($T:ident $I:tt),+ $(,)?) => {
crate::util::macros::__unsafe();
impl_tuple!(@all [] [$($T $I)+]);
};
(@all [$($head_T:ident $head_I:tt)*] [$next_T:ident $next_I:tt $($tail:tt)*]) => {
unsafe_impl!($($head_T: Immutable,)* $next_T: Immutable => Immutable for ($($head_T,)* $next_T,));
unsafe_impl!($($head_T: TryFromBytes,)* $next_T: TryFromBytes => TryFromBytes for ($($head_T,)* $next_T,); |c| {
let mut c = c;
$(TryFromBytes::is_bit_valid(into_inner!(c.reborrow().project::<_, { crate::STRUCT_VARIANT_ID }, { crate::ident_id!($head_I) }>())) &&)*
TryFromBytes::is_bit_valid(into_inner!(c.reborrow().project::<_, { crate::STRUCT_VARIANT_ID }, { crate::ident_id!($next_I) }>()))
});
unsafe_impl!($($head_T: FromZeros,)* $next_T: FromZeros => FromZeros for ($($head_T,)* $next_T,));
unsafe_impl!($($head_T: FromBytes,)* $next_T: FromBytes => FromBytes for ($($head_T,)* $next_T,));
unsafe impl<$($head_T,)* $next_T> crate::HasTag for ($($head_T,)* $next_T,) {
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized
{}
type Tag = ();
type ProjectToTag = crate::pointer::cast::CastToUnit;
}
impl_tuple!(@variants
[$($head_T $head_I)* $next_T $next_I]
[]
[$($head_T $head_I)* $next_T $next_I]
);
impl_tuple!(@all [$($head_T $head_I)* $next_T $next_I] [$($tail)*]);
};
(@all [$($head_T:ident $head_I:tt)*] []) => {};
(@variants
// The full tuple definition in type–index pairs.
[$($AllT:ident $AllI:tt)+]
// Types before the current index.
[$($BeforeT:ident)*]
// The types and indices at and after the current index.
[$CurrT:ident $CurrI:tt $($AfterT:ident $AfterI:tt)*]
) => {
unsafe impl<$($AllT),+> crate::HasField<
(),
{ crate::STRUCT_VARIANT_ID },
{ crate::ident_id!($CurrI)}
> for ($($AllT,)+) {
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized
{}
type Type = $CurrT;
#[inline(always)]
fn project(slf: crate::PtrInner<'_, Self>) -> *mut Self::Type {
let slf = slf.as_non_null().as_ptr();
unsafe { core::ptr::addr_of_mut!((*slf).$CurrI) }
}
}
unsafe impl<Aliasing, Alignment, $($AllT),+> crate::ProjectField<
(),
(Aliasing, Alignment, crate::invariant::Uninit),
{ crate::STRUCT_VARIANT_ID },
{ crate::ident_id!($CurrI)}
> for ($($AllT,)+)
where
Aliasing: crate::invariant::Aliasing,
Alignment: crate::invariant::Alignment,
{
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized
{}
type Invariants = (Aliasing, Alignment, crate::invariant::Uninit);
type Error = core::convert::Infallible;
}
unsafe impl<Aliasing, Alignment, $($AllT),+> crate::ProjectField<
(),
(Aliasing, Alignment, crate::invariant::Initialized),
{ crate::STRUCT_VARIANT_ID },
{ crate::ident_id!($CurrI)}
> for ($($AllT,)+)
where
Aliasing: crate::invariant::Aliasing,
Alignment: crate::invariant::Alignment,
{
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized
{}
type Invariants = (Aliasing, Alignment, crate::invariant::Initialized);
type Error = core::convert::Infallible;
}
unsafe impl<Aliasing, Alignment, $($AllT),+> crate::ProjectField<
(),
(Aliasing, Alignment, crate::invariant::Valid),
{ crate::STRUCT_VARIANT_ID },
{ crate::ident_id!($CurrI)}
> for ($($AllT,)+)
where
Aliasing: crate::invariant::Aliasing,
Alignment: crate::invariant::Alignment,
{
#[inline]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized
{}
type Invariants = (Aliasing, Alignment, crate::invariant::Valid);
type Error = core::convert::Infallible;
}
impl_tuple!(@variants [$($AllT $AllI)+] [$($BeforeT)* $CurrT] [$($AfterT $AfterI)*]);
};
(@variants [$($AllT:ident $AllI:tt)+] [$($BeforeT:ident)*] []) => {};
}
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
impl_tuple! {
A 0,
B 1,
C 2,
D 3,
E 4,
F 5,
G 6,
H 7,
I 8,
J 9,
K 10,
L 11,
M 12,
N 13,
O 14,
P 15,
Q 16,
R 17,
S 18,
T 19,
U 20,
V 21,
W 22,
X 23,
Y 24,
Z 25,
};
};
}
#[cfg(feature = "simd")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
mod simd {
#[allow(unused_macros)] macro_rules! simd_arch_mod {
($(#[cfg $cfg:tt])* $(#[cfg_attr $cfg_attr:tt])? $arch:ident, $mod:ident, $($typ:ident),*) => {
$(#[cfg $cfg])*
#[cfg_attr(doc_cfg, doc(cfg $($cfg)*))]
$(#[cfg_attr $cfg_attr])?
mod $mod {
use core::arch::$arch::{$($typ),*};
use crate::*;
impl_known_layout!($($typ),*);
#[allow(clippy::multiple_unsafe_ops_per_block)]
const _: () = unsafe {
$( unsafe_impl!($typ: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); )*
};
}
};
}
#[rustfmt::skip]
const _: () = {
simd_arch_mod!(
#[cfg(target_arch = "x86")]
x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
);
#[cfg(not(no_zerocopy_simd_x86_avx12_1_89_0))]
simd_arch_mod!(
#[cfg(target_arch = "x86")]
#[cfg_attr(doc_cfg, doc(cfg(rust = "1.89.0")))]
x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
);
simd_arch_mod!(
#[cfg(target_arch = "x86_64")]
x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
);
#[cfg(not(no_zerocopy_simd_x86_avx12_1_89_0))]
simd_arch_mod!(
#[cfg(target_arch = "x86_64")]
#[cfg_attr(doc_cfg, doc(cfg(rust = "1.89.0")))]
x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
);
simd_arch_mod!(
#[cfg(target_arch = "wasm32")]
wasm32, wasm32, v128
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
);
simd_arch_mod!(
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
);
#[cfg(not(no_zerocopy_aarch64_simd_1_59_0))]
simd_arch_mod!(
#[cfg(all(
target_arch = "aarch64",
any(
target_endian = "little",
not(no_zerocopy_aarch64_simd_be_1_87_0)
)
))]
#[cfg_attr(
doc_cfg,
doc(cfg(all(target_arch = "aarch64", any(
all(rust = "1.59.0", target_endian = "little"),
rust = "1.87.0",
))))
)]
aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x4x2_t, uint16x4x3_t,
uint16x4x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t, uint64x2_t
);
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_impls() {
trait TryFromBytesTestable {
fn with_passing_test_cases<F: Fn(Box<ReadOnly<Self>>)>(f: F);
fn with_failing_test_cases<F: Fn(&mut [u8])>(f: F);
}
impl<T: FromBytes> TryFromBytesTestable for T {
fn with_passing_test_cases<F: Fn(Box<ReadOnly<Self>>)>(f: F) {
f(ReadOnly::<Self>::new_box_zeroed().unwrap());
let ffs = {
let mut t = ReadOnly::new(Self::new_zeroed());
let ptr: *mut T = ReadOnly::as_mut(&mut t);
unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
t
};
f(Box::new(ffs));
}
fn with_failing_test_cases<F: Fn(&mut [u8])>(_f: F) {}
}
macro_rules! impl_try_from_bytes_testable_for_null_pointer_optimization {
($($tys:ty),*) => {
$(
impl TryFromBytesTestable for Option<$tys> {
fn with_passing_test_cases<F: Fn(Box<ReadOnly<Self>>)>(f: F) {
f(Box::new(ReadOnly::new(None)));
}
fn with_failing_test_cases<F: Fn(&mut [u8])>(f: F) {
for pos in 0..mem::size_of::<Self>() {
let mut bytes = [0u8; mem::size_of::<Self>()];
bytes[pos] = 0x01;
f(&mut bytes[..]);
}
}
}
)*
};
}
macro_rules! impl_try_from_bytes_testable {
(=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
impl TryFromBytesTestable for $ty {
impl_try_from_bytes_testable!(
@methods @success $($success_case),*
$(, @failure $($failure_case),*)?
);
}
impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
};
($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
$(
impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
)*
};
(@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
fn with_passing_test_cases<F: Fn(Box<ReadOnly<Self>>)>(_f: F) {
$(
let bx = Box::<Self>::from($success_case);
let ro: Box<ReadOnly<_>> = {
let raw = Box::into_raw(bx);
#[allow(clippy::as_conversions)]
unsafe { Box::from_raw(raw as *mut _) }
};
_f(ro);
)*
}
fn with_failing_test_cases<F: Fn(&mut [u8])>(_f: F) {
$($(
let mut case = $failure_case;
_f(case.as_mut_bytes());
)*)?
}
};
}
impl_try_from_bytes_testable_for_null_pointer_optimization!(
Box<UnsafeCell<NotZerocopy>>,
&'static UnsafeCell<NotZerocopy>,
&'static mut UnsafeCell<NotZerocopy>,
NonNull<UnsafeCell<NotZerocopy>>,
fn(),
FnManyArgs,
extern "C" fn(),
ECFnManyArgs
);
macro_rules! bx {
($e:expr) => {
Box::new($e)
};
}
impl_try_from_bytes_testable!(
bool => @success true, false,
@failure 2u8, 3u8, 0xFFu8;
char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
@failure 0xD800u32, 0xDFFFu32, 0x110000u32;
str => @success "", "hello", "❤️🧡💛💚💙💜",
@failure [0, 159, 146, 150];
[u8] => @success vec![].into_boxed_slice(), vec![0, 1, 2].into_boxed_slice();
NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
NonZeroUsize, NonZeroIsize
=> @success Self::new(1).unwrap(),
@failure Option::<Self>::None;
[bool; 0] => @success [];
[bool; 1]
=> @success [true], [false],
@failure [2u8], [3u8], [0xFFu8];
[bool]
=> @success vec![true, false].into_boxed_slice(), vec![false, true].into_boxed_slice(),
@failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
Unalign<bool>
=> @success Unalign::new(false), Unalign::new(true),
@failure 2u8, 0xFFu8;
ManuallyDrop<bool>
=> @success ManuallyDrop::new(false), ManuallyDrop::new(true),
@failure 2u8, 0xFFu8;
ManuallyDrop<[u8]>
=> @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([0u8])), bx!(ManuallyDrop::new([0u8, 1u8]));
ManuallyDrop<[bool]>
=> @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([false])), bx!(ManuallyDrop::new([false, true])),
@failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
ManuallyDrop<[UnsafeCell<u8>]>
=> @success bx!(ManuallyDrop::new([UnsafeCell::new(0)])), bx!(ManuallyDrop::new([UnsafeCell::new(0), UnsafeCell::new(1)]));
ManuallyDrop<[UnsafeCell<bool>]>
=> @success bx!(ManuallyDrop::new([UnsafeCell::new(false)])), bx!(ManuallyDrop::new([UnsafeCell::new(false), UnsafeCell::new(true)])),
@failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
Wrapping<bool>
=> @success Wrapping(false), Wrapping(true),
@failure 2u8, 0xFFu8;
*const NotZerocopy
=> @success ptr::null::<NotZerocopy>(),
@failure [0x01; mem::size_of::<*const NotZerocopy>()];
*mut NotZerocopy
=> @success ptr::null_mut::<NotZerocopy>(),
@failure [0x01; mem::size_of::<*mut NotZerocopy>()];
);
mod autoref_trick {
use super::*;
pub(super) struct AutorefWrapper<T: ?Sized>(pub(super) PhantomData<T>);
pub(super) trait TestIsBitValidShared<T: ?Sized> {
#[allow(clippy::needless_lifetimes)]
fn test_is_bit_valid_shared<'ptr>(&self, candidate: Maybe<'ptr, T>)
-> Option<bool>;
}
impl<T: TryFromBytes + Immutable + ?Sized> TestIsBitValidShared<T> for AutorefWrapper<T> {
#[allow(clippy::needless_lifetimes)]
fn test_is_bit_valid_shared<'ptr>(
&self,
candidate: Maybe<'ptr, T>,
) -> Option<bool> {
Some(T::is_bit_valid(candidate))
}
}
pub(super) trait TestTryFromRef<T: ?Sized> {
#[allow(clippy::needless_lifetimes)]
fn test_try_from_ref<'bytes>(
&self,
bytes: &'bytes [u8],
) -> Option<Option<&'bytes T>>;
}
impl<T: TryFromBytes + Immutable + KnownLayout + ?Sized> TestTryFromRef<T> for AutorefWrapper<T> {
#[allow(clippy::needless_lifetimes)]
fn test_try_from_ref<'bytes>(
&self,
bytes: &'bytes [u8],
) -> Option<Option<&'bytes T>> {
Some(T::try_ref_from_bytes(bytes).ok())
}
}
pub(super) trait TestTryFromMut<T: ?Sized> {
#[allow(clippy::needless_lifetimes)]
fn test_try_from_mut<'bytes>(
&self,
bytes: &'bytes mut [u8],
) -> Option<Option<&'bytes mut T>>;
}
impl<T: TryFromBytes + IntoBytes + KnownLayout + ?Sized> TestTryFromMut<T> for AutorefWrapper<T> {
#[allow(clippy::needless_lifetimes)]
fn test_try_from_mut<'bytes>(
&self,
bytes: &'bytes mut [u8],
) -> Option<Option<&'bytes mut T>> {
Some(T::try_mut_from_bytes(bytes).ok())
}
}
pub(super) trait TestTryReadFrom<T> {
fn test_try_read_from(&self, bytes: &[u8]) -> Option<Option<T>>;
}
impl<T: TryFromBytes> TestTryReadFrom<T> for AutorefWrapper<T> {
fn test_try_read_from(&self, bytes: &[u8]) -> Option<Option<T>> {
Some(T::try_read_from_bytes(bytes).ok())
}
}
pub(super) trait TestAsBytes<T: ?Sized> {
#[allow(clippy::needless_lifetimes)]
fn test_as_bytes<'slf, 't>(&'slf self, t: &'t ReadOnly<T>) -> Option<&'t [u8]>;
}
impl<T: IntoBytes + Immutable + ?Sized> TestAsBytes<T> for AutorefWrapper<T> {
#[allow(clippy::needless_lifetimes)]
fn test_as_bytes<'slf, 't>(&'slf self, t: &'t ReadOnly<T>) -> Option<&'t [u8]> {
Some(t.as_bytes())
}
}
}
use autoref_trick::*;
macro_rules! assert_on_allowlist {
($fn_name:ident($ty:ty) $(: $($tys:ty),*)?) => {{
use core::any::TypeId;
let allowlist: &[TypeId] = &[ $($(TypeId::of::<$tys>()),*)? ];
let allowlist_names: &[&str] = &[ $($(stringify!($tys)),*)? ];
let id = TypeId::of::<$ty>();
assert!(allowlist.contains(&id), "{} is not on allowlist for {}: {:?}", stringify!($ty), stringify!($fn_name), allowlist_names);
}};
}
// Asserts that `$ty` implements any `$trait` and doesn't implement any
// `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
//
// For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success
// and failure cases.
macro_rules! assert_impls {
($ty:ty: TryFromBytes) => {
// "Default" implementations that match the "real"
// implementations defined in the `autoref_trick` module above.
#[allow(unused, non_local_definitions)]
impl AutorefWrapper<$ty> {
#[allow(clippy::needless_lifetimes)]
fn test_is_bit_valid_shared<'ptr>(
&mut self,
candidate: Maybe<'ptr, $ty>,
) -> Option<bool> {
assert_on_allowlist!(
test_is_bit_valid_shared($ty):
ManuallyDrop<UnsafeCell<()>>,
ManuallyDrop<[UnsafeCell<u8>]>,
ManuallyDrop<[UnsafeCell<bool>]>,
CoreMaybeUninit<NotZerocopy>,
CoreMaybeUninit<UnsafeCell<()>>,
Wrapping<UnsafeCell<()>>
);
None
}
#[allow(clippy::needless_lifetimes)]
fn test_try_from_ref<'bytes>(&mut self, _bytes: &'bytes [u8]) -> Option<Option<&'bytes $ty>> {
assert_on_allowlist!(
test_try_from_ref($ty):
ManuallyDrop<[UnsafeCell<bool>]>
);
None
}
#[allow(clippy::needless_lifetimes)]
fn test_try_from_mut<'bytes>(&mut self, _bytes: &'bytes mut [u8]) -> Option<Option<&'bytes mut $ty>> {
assert_on_allowlist!(
test_try_from_mut($ty):
Option<Box<UnsafeCell<NotZerocopy>>>,
Option<&'static UnsafeCell<NotZerocopy>>,
Option<&'static mut UnsafeCell<NotZerocopy>>,
Option<NonNull<UnsafeCell<NotZerocopy>>>,
Option<fn()>,
Option<FnManyArgs>,
Option<extern "C" fn()>,
Option<ECFnManyArgs>,
*const NotZerocopy,
*mut NotZerocopy
);
None
}
fn test_try_read_from(&mut self, _bytes: &[u8]) -> Option<Option<&$ty>> {
assert_on_allowlist!(
test_try_read_from($ty):
str,
ManuallyDrop<[u8]>,
ManuallyDrop<[bool]>,
ManuallyDrop<[UnsafeCell<bool>]>,
[u8],
[bool]
);
None
}
fn test_as_bytes(&mut self, _t: &ReadOnly<$ty>) -> Option<&[u8]> {
assert_on_allowlist!(
test_as_bytes($ty):
Option<&'static UnsafeCell<NotZerocopy>>,
Option<&'static mut UnsafeCell<NotZerocopy>>,
Option<NonNull<UnsafeCell<NotZerocopy>>>,
Option<Box<UnsafeCell<NotZerocopy>>>,
Option<fn()>,
Option<FnManyArgs>,
Option<extern "C" fn()>,
Option<ECFnManyArgs>,
CoreMaybeUninit<u8>,
CoreMaybeUninit<NotZerocopy>,
CoreMaybeUninit<UnsafeCell<()>>,
ManuallyDrop<UnsafeCell<()>>,
ManuallyDrop<[UnsafeCell<u8>]>,
ManuallyDrop<[UnsafeCell<bool>]>,
Wrapping<UnsafeCell<()>>,
*const NotZerocopy,
*mut NotZerocopy
);
None
}
}
<$ty as TryFromBytesTestable>::with_passing_test_cases(|mut val| {
// FIXME(#494): These tests only get exercised for types
// which are `IntoBytes`. Once we implement #494, we should
// be able to support non-`IntoBytes` types by zeroing
// padding.
// We define `w` and `ww` since, in the case of the inherent
// methods, Rust thinks they're both borrowed mutably at the
// same time (given how we use them below). If we just
// defined a single `w` and used it for multiple operations,
// this would conflict.
//
// We `#[allow(unused_mut]` for the cases where the "real"
// impls are used, which take `&self`.
#[allow(unused_mut)]
let (mut w, mut ww) = (AutorefWrapper::<$ty>(PhantomData), AutorefWrapper::<$ty>(PhantomData));
let c = Ptr::from_ref(&*val);
let c = c.forget_aligned();
// SAFETY: FIXME(#899): This is unsound. `$ty` is not
// necessarily `IntoBytes`, but that's the corner we've
// backed ourselves into by using `Ptr::from_ref`.
let c = unsafe { c.assume_initialized() };
let res = w.test_is_bit_valid_shared(c);
if let Some(res) = res {
assert!(res, "{}::is_bit_valid (shared `Ptr`): got false, expected true", stringify!($ty));
}
let c = Ptr::from_mut(&mut *val);
let c = c.forget_aligned();
// SAFETY: FIXME(#899): This is unsound. `$ty` is not
// necessarily `IntoBytes`, but that's the corner we've
// backed ourselves into by using `Ptr::from_ref`.
let mut c = unsafe { c.assume_initialized() };
let res = <$ty as TryFromBytes>::is_bit_valid(c.reborrow_shared());
assert!(res, "{}::is_bit_valid (exclusive `Ptr`): got false, expected true", stringify!($ty));
// `bytes` is `Some(val.as_bytes())` if `$ty: IntoBytes +
// Immutable` and `None` otherwise.
let bytes = w.test_as_bytes(&*val);
// The inner closure returns
// `Some($ty::try_ref_from_bytes(bytes))` if `$ty:
// Immutable` and `None` otherwise.
let res = bytes.and_then(|bytes| ww.test_try_from_ref(bytes));
if let Some(res) = res {
assert!(res.is_some(), "{}::try_ref_from_bytes: got `None`, expected `Some`", stringify!($ty));
}
if let Some(bytes) = bytes {
// We need to get a mutable byte slice, and so we clone
// into a `Vec`. However, we also need these bytes to
// satisfy `$ty`'s alignment requirement, which isn't
// guaranteed for `Vec<u8>`. In order to get around
// this, we create a `Vec` which is twice as long as we
// need. There is guaranteed to be an aligned byte range
// of size `size_of_val(val)` within that range.
let val = &*val;
let size = mem::size_of_val(val);
let align = mem::align_of_val(val);
let mut vec = bytes.to_vec();
vec.extend(bytes);
let slc = vec.as_slice();
let offset = slc.as_ptr().align_offset(align);
let bytes_mut = &mut vec.as_mut_slice()[offset..offset+size];
bytes_mut.copy_from_slice(bytes);
let res = ww.test_try_from_mut(bytes_mut);
if let Some(res) = res {
assert!(res.is_some(), "{}::try_mut_from_bytes: got `None`, expected `Some`", stringify!($ty));
}
}
let res = bytes.and_then(|bytes| ww.test_try_read_from(bytes));
if let Some(res) = res {
assert!(res.is_some(), "{}::try_read_from_bytes: got `None`, expected `Some`", stringify!($ty));
}
});
#[allow(clippy::as_conversions)]
<$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
#[allow(unused_mut)] // For cases where the "real" impls are used, which take `&self`.
let mut w = AutorefWrapper::<$ty>(PhantomData);
// This is `Some($ty::try_ref_from_bytes(c))` if `$ty:
// Immutable` and `None` otherwise.
let res = w.test_try_from_ref(c);
if let Some(res) = res {
assert!(res.is_none(), "{}::try_ref_from_bytes({:?}): got Some, expected None", stringify!($ty), c);
}
let res = w.test_try_from_mut(c);
if let Some(res) = res {
assert!(res.is_none(), "{}::try_mut_from_bytes({:?}): got Some, expected None", stringify!($ty), c);
}
let res = w.test_try_read_from(c);
if let Some(res) = res {
assert!(res.is_none(), "{}::try_read_from_bytes({:?}): got Some, expected None", stringify!($ty), c);
}
});
#[allow(dead_code)]
const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
};
($ty:ty: $trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
};
($ty:ty: !$trait:ident) => {
#[allow(dead_code)]
const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
};
($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
$(
assert_impls!($ty: $trait);
)*
$(
assert_impls!($ty: !$negative_trait);
)*
};
}
// NOTE: The negative impl assertions here are not necessarily
// prescriptive. They merely serve as change detectors to make sure
// we're aware of what trait impls are getting added with a given
// change. Of course, some impls would be invalid (e.g., `bool:
// FromBytes`), and so this change detection is very important.
assert_impls!(
(): KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned
);
assert_impls!(
u8: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned
);
assert_impls!(
i8: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned
);
assert_impls!(
u16: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
i16: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
u32: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
i32: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
u64: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
i64: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
u128: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
i128: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
usize: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
isize: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
#[cfg(feature = "float-nightly")]
assert_impls!(
f16: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
f32: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
f64: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
#[cfg(feature = "float-nightly")]
assert_impls!(
f128: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
!Unaligned
);
assert_impls!(
bool: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
IntoBytes,
Unaligned,
!FromBytes
);
assert_impls!(
char: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
str: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
IntoBytes,
Unaligned,
!FromBytes
);
assert_impls!(
NonZeroU8: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
Unaligned,
!FromZeros,
!FromBytes
);
assert_impls!(
NonZeroI8: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
Unaligned,
!FromZeros,
!FromBytes
);
assert_impls!(
NonZeroU16: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroI16: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroU32: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroI32: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroU64: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroI64: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroU128: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroI128: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroUsize: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(
NonZeroIsize: KnownLayout,
Immutable,
TryFromBytes,
IntoBytes,
!FromBytes,
!Unaligned
);
assert_impls!(Option<NonZeroU8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_impls!(Option<NonZeroI8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_impls!(Option<NonZeroU16>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroI16>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroU32>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroI32>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroU64>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroI64>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroU128>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroI128>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroUsize>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
assert_impls!(Option<NonZeroIsize>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned);
// Implements none of the ZC traits.
struct NotZerocopy;
#[rustfmt::skip]
type FnManyArgs = fn(
NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
) -> (NotZerocopy, NotZerocopy);
// Allowed, because we're not actually using this type for FFI.
#[allow(improper_ctypes_definitions)]
#[rustfmt::skip]
type ECFnManyArgs = extern "C" fn(
NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
) -> (NotZerocopy, NotZerocopy);
#[cfg(feature = "alloc")]
assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, TryFromBytes, FromZeros, Immutable, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<fn()>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<FnManyArgs>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<extern "C" fn()>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Option<ECFnManyArgs>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(PhantomData<NotZerocopy>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_impls!(PhantomData<UnsafeCell<()>>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_impls!(PhantomData<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
assert_impls!(ManuallyDrop<u8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
// This test is important because it allows us to test our hand-rolled
// implementation of `<ManuallyDrop<T> as TryFromBytes>::is_bit_valid`.
assert_impls!(ManuallyDrop<bool>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes);
assert_impls!(ManuallyDrop<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
// This test is important because it allows us to test our hand-rolled
// implementation of `<ManuallyDrop<T> as TryFromBytes>::is_bit_valid`.
assert_impls!(ManuallyDrop<[bool]>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes);
assert_impls!(ManuallyDrop<NotZerocopy>: !Immutable, !TryFromBytes, !KnownLayout, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(ManuallyDrop<[NotZerocopy]>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(ManuallyDrop<UnsafeCell<()>>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable);
assert_impls!(ManuallyDrop<[UnsafeCell<u8>]>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable);
assert_impls!(ManuallyDrop<[UnsafeCell<bool>]>: KnownLayout, TryFromBytes, FromZeros, IntoBytes, Unaligned, !Immutable, !FromBytes);
assert_impls!(CoreMaybeUninit<u8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, Unaligned, !IntoBytes);
assert_impls!(CoreMaybeUninit<NotZerocopy>: KnownLayout, TryFromBytes, FromZeros, FromBytes, !Immutable, !IntoBytes, !Unaligned);
assert_impls!(CoreMaybeUninit<UnsafeCell<()>>: KnownLayout, TryFromBytes, FromZeros, FromBytes, Unaligned, !Immutable, !IntoBytes);
assert_impls!(Wrapping<u8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
// This test is important because it allows us to test our hand-rolled
// implementation of `<Wrapping<T> as TryFromBytes>::is_bit_valid`.
assert_impls!(Wrapping<bool>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes);
assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(Wrapping<UnsafeCell<()>>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable);
assert_impls!(Unalign<u8>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned);
// This test is important because it allows us to test our hand-rolled
// implementation of `<Unalign<T> as TryFromBytes>::is_bit_valid`.
assert_impls!(Unalign<bool>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes);
assert_impls!(Unalign<NotZerocopy>: KnownLayout, Unaligned, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes);
assert_impls!(
[u8]: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned
);
assert_impls!(
[bool]: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
IntoBytes,
Unaligned,
!FromBytes
);
assert_impls!([NotZerocopy]: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(
[u8; 0]: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned,
);
assert_impls!(
[NotZerocopy; 0]: KnownLayout,
!Immutable,
!TryFromBytes,
!FromZeros,
!FromBytes,
!IntoBytes,
!Unaligned
);
assert_impls!(
[u8; 1]: KnownLayout,
Immutable,
TryFromBytes,
FromZeros,
FromBytes,
IntoBytes,
Unaligned,
);
assert_impls!(
[NotZerocopy; 1]: KnownLayout,
!Immutable,
!TryFromBytes,
!FromZeros,
!FromBytes,
!IntoBytes,
!Unaligned
);
assert_impls!(*const NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(*mut NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(*const [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(*mut [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(*const dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
assert_impls!(*mut dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned);
#[cfg(feature = "simd")]
{
#[allow(unused_macros)]
macro_rules! test_simd_arch_mod {
($arch:ident, $($typ:ident),*) => {
{
use core::arch::$arch::{$($typ),*};
use crate::*;
$( assert_impls!($typ: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); )*
}
};
}
#[cfg(target_arch = "x86")]
test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(all(not(no_zerocopy_simd_x86_avx12_1_89_0), target_arch = "x86"))]
test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
#[cfg(target_arch = "x86_64")]
test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
#[cfg(all(not(no_zerocopy_simd_x86_avx12_1_89_0), target_arch = "x86_64"))]
test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
#[cfg(target_arch = "wasm32")]
test_simd_arch_mod!(wasm32, v128);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
test_simd_arch_mod!(
powerpc,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
test_simd_arch_mod!(
powerpc64,
vector_bool_long,
vector_double,
vector_signed_long,
vector_unsigned_long
);
#[cfg(all(target_arch = "aarch64", not(no_zerocopy_aarch64_simd_1_59_0)))]
#[rustfmt::skip]
test_simd_arch_mod!(
aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x4x2_t, uint16x4x3_t,
uint16x4x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t, uint64x2_t
);
}
}
}