#![no_std]
#![warn(missing_docs)]
use core::{
marker::PhantomData,
mem::{align_of, size_of},
num::*,
ptr::NonNull,
};
#[macro_export]
macro_rules! branchless_min {
($x:expr, $y:expr, $u:ty) => {
$y ^ (($x ^ $y) & (<$u>::wrapping_neg(($x < $y) as $u)))
};
}
#[macro_export]
macro_rules! branchless_max {
($x:expr, $y:expr, $u:ty) => {
$x ^ (($x ^ $y) & (<$u>::wrapping_neg(($x < $y) as $u)))
};
}
macro_rules! impl_unsafe_marker_for_array {
( $marker:ident , $( $n:expr ),* ) => {
$(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
}
}
pub unsafe trait Zeroable: Sized {
fn zeroed() -> Self {
unsafe { core::mem::zeroed() }
}
}
unsafe impl Zeroable for () {}
unsafe impl Zeroable for bool {}
unsafe impl Zeroable for char {}
unsafe impl Zeroable for u8 {}
unsafe impl Zeroable for i8 {}
unsafe impl Zeroable for u16 {}
unsafe impl Zeroable for i16 {}
unsafe impl Zeroable for u32 {}
unsafe impl Zeroable for i32 {}
unsafe impl Zeroable for u64 {}
unsafe impl Zeroable for i64 {}
unsafe impl Zeroable for usize {}
unsafe impl Zeroable for isize {}
unsafe impl Zeroable for u128 {}
unsafe impl Zeroable for i128 {}
unsafe impl Zeroable for f32 {}
unsafe impl Zeroable for f64 {}
unsafe impl Zeroable for Option<NonZeroI8> {}
unsafe impl Zeroable for Option<NonZeroI16> {}
unsafe impl Zeroable for Option<NonZeroI32> {}
unsafe impl Zeroable for Option<NonZeroI64> {}
unsafe impl Zeroable for Option<NonZeroI128> {}
unsafe impl Zeroable for Option<NonZeroIsize> {}
unsafe impl Zeroable for Option<NonZeroU8> {}
unsafe impl Zeroable for Option<NonZeroU16> {}
unsafe impl Zeroable for Option<NonZeroU32> {}
unsafe impl Zeroable for Option<NonZeroU64> {}
unsafe impl Zeroable for Option<NonZeroU128> {}
unsafe impl Zeroable for Option<NonZeroUsize> {}
unsafe impl<T> Zeroable for *mut T {}
unsafe impl<T> Zeroable for *const T {}
unsafe impl<T> Zeroable for Option<NonNull<T>> {}
unsafe impl<T> Zeroable for PhantomData<T> where T: Zeroable {}
impl_unsafe_marker_for_array!(
Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024
);
pub unsafe trait Pod: Zeroable + Copy {}
unsafe impl Pod for () {}
unsafe impl Pod for u8 {}
unsafe impl Pod for i8 {}
unsafe impl Pod for u16 {}
unsafe impl Pod for i16 {}
unsafe impl Pod for u32 {}
unsafe impl Pod for i32 {}
unsafe impl Pod for u64 {}
unsafe impl Pod for i64 {}
unsafe impl Pod for usize {}
unsafe impl Pod for isize {}
unsafe impl Pod for u128 {}
unsafe impl Pod for i128 {}
unsafe impl Pod for f32 {}
unsafe impl Pod for f64 {}
unsafe impl Pod for Option<NonZeroI8> {}
unsafe impl Pod for Option<NonZeroI16> {}
unsafe impl Pod for Option<NonZeroI32> {}
unsafe impl Pod for Option<NonZeroI64> {}
unsafe impl Pod for Option<NonZeroI128> {}
unsafe impl Pod for Option<NonZeroIsize> {}
unsafe impl Pod for Option<NonZeroU8> {}
unsafe impl Pod for Option<NonZeroU16> {}
unsafe impl Pod for Option<NonZeroU32> {}
unsafe impl Pod for Option<NonZeroU64> {}
unsafe impl Pod for Option<NonZeroU128> {}
unsafe impl Pod for Option<NonZeroUsize> {}
unsafe impl<T> Pod for *mut T {}
unsafe impl<T> Pod for *const T {}
unsafe impl<T> Pod for Option<NonNull<T>> {}
unsafe impl<T> Pod for PhantomData<T> where T: Pod {}
impl_unsafe_marker_for_array!(
Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256, 512, 1024
);
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SliceCastError {
TargetAlignmentGreaterAndInputNotAligned,
CantConvertBetweenZSTAndNonZST,
OutputSliceWouldHaveSlop,
}
pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], SliceCastError> {
if align_of::<B>() > align_of::<A>() && (a.as_ptr() as usize) % align_of::<B>() != 0 {
Err(SliceCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(SliceCastError::CantConvertBetweenZSTAndNonZST)
} else if core::mem::size_of_val(a) % size_of::<B>() != 0 {
Err(SliceCastError::OutputSliceWouldHaveSlop)
} else {
let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
}
}
pub fn cast_slice<A: Pod, B: Pod>(a: &[A]) -> &[B] {
try_cast_slice(a).unwrap()
}
pub fn try_cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> Result<&mut [B], SliceCastError> {
if align_of::<B>() > align_of::<A>() && (a.as_ptr() as usize) % align_of::<B>() != 0 {
Err(SliceCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_ptr() as *mut B, a.len()) })
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(SliceCastError::CantConvertBetweenZSTAndNonZST)
} else if core::mem::size_of_val(a) % size_of::<B>() != 0 {
Err(SliceCastError::OutputSliceWouldHaveSlop)
} else {
let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe { core::slice::from_raw_parts_mut(a.as_ptr() as *mut B, new_len) })
}
}
pub fn cast_slice_mut<A: Pod, B: Pod>(a: &[A]) -> &[B] {
try_cast_slice(a).unwrap()
}
pub fn bytes_of<T: Pod>(t: &T) -> &[u8] {
try_cast_slice::<T, u8>(core::slice::from_ref(t)).unwrap_or(&[])
}