#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(
feature = "const_convert_and_const_trait_impl",
feature(const_convert, const_trait_impl)
)]
use core::fmt::{Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex};
use core::hash::{Hash, Hasher};
#[cfg(feature = "num-traits")]
use core::num::Wrapping;
use core::ops::{
Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl,
ShlAssign, Shr, ShrAssign, Sub, SubAssign,
};
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TryNewError;
impl Display for TryNewError {
fn fmt(&self, f: &mut Formatter) -> core::fmt::Result {
write!(f, "Value too large to fit within this integer type")
}
}
#[cfg_attr(feature = "const_convert_and_const_trait_impl", const_trait)]
pub trait Number: Sized {
type UnderlyingType: Debug
+ From<u8>
+ TryFrom<u16>
+ TryFrom<u32>
+ TryFrom<u64>
+ TryFrom<u128>;
const BITS: usize;
const MIN: Self;
const MAX: Self;
fn new(value: Self::UnderlyingType) -> Self;
fn try_new(value: Self::UnderlyingType) -> Result<Self, TryNewError>;
fn value(self) -> Self::UnderlyingType;
}
#[cfg(feature = "const_convert_and_const_trait_impl")]
macro_rules! impl_number_native {
($( $type:ty ),+) => {
$(
impl const Number for $type {
type UnderlyingType = $type;
const BITS: usize = Self::BITS as usize;
const MIN: Self = Self::MIN;
const MAX: Self = Self::MAX;
#[inline]
fn new(value: Self::UnderlyingType) -> Self { value }
#[inline]
fn try_new(value: Self::UnderlyingType) -> Result<Self, TryNewError> { Ok(value) }
#[inline]
fn value(self) -> Self::UnderlyingType { self }
}
)+
};
}
#[cfg(not(feature = "const_convert_and_const_trait_impl"))]
macro_rules! impl_number_native {
($( $type:ty ),+) => {
$(
impl Number for $type {
type UnderlyingType = $type;
const BITS: usize = Self::BITS as usize;
const MIN: Self = Self::MIN;
const MAX: Self = Self::MAX;
#[inline]
fn new(value: Self::UnderlyingType) -> Self { value }
#[inline]
fn try_new(value: Self::UnderlyingType) -> Result<Self, TryNewError> { Ok(value) }
#[inline]
fn value(self) -> Self::UnderlyingType { self }
}
)+
};
}
impl_number_native!(u8, u16, u32, u64, u128);
struct CompileTimeAssert<const A: usize, const B: usize> {}
impl<const A: usize, const B: usize> CompileTimeAssert<A, B> {
pub const SMALLER_OR_EQUAL: () = {
assert!(A <= B);
};
}
#[derive(Copy, Clone, Eq, PartialEq, Default, Ord, PartialOrd)]
pub struct UInt<T, const BITS: usize> {
value: T,
}
impl<T: Copy, const BITS: usize> UInt<T, BITS> {
pub const BITS: usize = BITS;
#[inline]
pub const fn value(self) -> T {
self.value
}
#[inline]
pub const unsafe fn new_unchecked(value: T) -> Self {
Self { value }
}
}
impl<T, const BITS: usize> UInt<T, BITS>
where
Self: Number,
T: Copy,
{
pub const MASK: T = Self::MAX.value;
}
#[cfg(feature = "const_convert_and_const_trait_impl")]
macro_rules! uint_impl_num {
($($type:ident),+) => {
$(
impl<const BITS: usize> const Number for UInt<$type, BITS> {
type UnderlyingType = $type;
const BITS: usize = BITS;
const MIN: Self = Self { value: 0 };
const MAX: Self = Self { value: (<$type as Number>::MAX >> (<$type as Number>::BITS - Self::BITS)) };
#[inline]
fn try_new(value: Self::UnderlyingType) -> Result<Self, TryNewError> {
if value <= Self::MAX.value {
Ok(Self { value })
} else {
Err(TryNewError{})
}
}
#[inline]
fn new(value: $type) -> Self {
assert!(value <= Self::MAX.value);
Self { value }
}
#[inline]
fn value(self) -> $type {
self.value
}
}
)+
};
}
#[cfg(not(feature = "const_convert_and_const_trait_impl"))]
macro_rules! uint_impl_num {
($($type:ident),+) => {
$(
impl<const BITS: usize> Number for UInt<$type, BITS> {
type UnderlyingType = $type;
const BITS: usize = BITS;
const MIN: Self = Self { value: 0 };
const MAX: Self = Self { value: (<$type as Number>::MAX >> (<$type as Number>::BITS - Self::BITS)) };
#[inline]
fn try_new(value: Self::UnderlyingType) -> Result<Self, TryNewError> {
if value <= Self::MAX.value {
Ok(Self { value })
} else {
Err(TryNewError{})
}
}
#[inline]
fn new(value: $type) -> Self {
assert!(value <= Self::MAX.value);
Self { value }
}
#[inline]
fn value(self) -> $type {
self.value
}
}
)+
};
}
uint_impl_num!(u8, u16, u32, u64, u128);
macro_rules! uint_impl {
($($type:ident),+) => {
$(
impl<const BITS: usize> UInt<$type, BITS> {
#[inline]
pub const fn new(value: $type) -> Self {
assert!(value <= Self::MAX.value);
Self { value }
}
#[inline]
pub const fn try_new(value: $type) -> Result<Self, TryNewError> {
if value <= Self::MAX.value {
Ok(Self { value })
} else {
Err(TryNewError {})
}
}
#[deprecated(note = "Use one of the specific functions like extract_u32")]
pub const fn extract(value: $type, start_bit: usize) -> Self {
assert!(start_bit + BITS <= $type::BITS as usize);
let _ = Self::MAX;
Self {
value: (value >> start_bit) & Self::MAX.value,
}
}
#[inline]
pub const fn extract_u8(value: u8, start_bit: usize) -> Self {
assert!(start_bit + BITS <= 8);
let _ = Self::MAX;
Self {
value: ((value >> start_bit) as $type) & Self::MAX.value,
}
}
#[inline]
pub const fn extract_u16(value: u16, start_bit: usize) -> Self {
assert!(start_bit + BITS <= 16);
let _ = Self::MAX;
Self {
value: ((value >> start_bit) as $type) & Self::MAX.value,
}
}
#[inline]
pub const fn extract_u32(value: u32, start_bit: usize) -> Self {
assert!(start_bit + BITS <= 32);
let _ = Self::MAX;
Self {
value: ((value >> start_bit) as $type) & Self::MAX.value,
}
}
#[inline]
pub const fn extract_u64(value: u64, start_bit: usize) -> Self {
assert!(start_bit + BITS <= 64);
let _ = Self::MAX;
Self {
value: ((value >> start_bit) as $type) & Self::MAX.value,
}
}
#[inline]
pub const fn extract_u128(value: u128, start_bit: usize) -> Self {
assert!(start_bit + BITS <= 128);
let _ = Self::MAX;
Self {
value: ((value >> start_bit) as $type) & Self::MAX.value,
}
}
pub const fn widen<const BITS_RESULT: usize>(
self,
) -> UInt<$type, BITS_RESULT> {
let _ = CompileTimeAssert::<BITS, BITS_RESULT>::SMALLER_OR_EQUAL;
let _ = UInt::<$type, BITS_RESULT>::MAX;
UInt::<$type, BITS_RESULT> { value: self.value }
}
pub const fn reverse_bits(self) -> Self {
let shift_right = (core::mem::size_of::<$type>() << 3) - BITS;
Self { value: self.value.reverse_bits() >> shift_right }
}
pub const fn count_ones(self) -> u32 {
self.value.count_ones()
}
pub const fn count_zeros(self) -> u32 {
let filler_bits = ((core::mem::size_of::<$type>() << 3) - BITS) as u32;
self.value.count_zeros() - filler_bits
}
pub const fn leading_ones(self) -> u32 {
let shift = ((core::mem::size_of::<$type>() << 3) - BITS) as u32;
(self.value << shift).leading_ones()
}
pub const fn leading_zeros(self) -> u32 {
let shift = ((core::mem::size_of::<$type>() << 3) - BITS) as u32;
(self.value << shift).leading_zeros()
}
pub const fn trailing_ones(self) -> u32 {
self.value.trailing_ones()
}
pub const fn trailing_zeros(self) -> u32 {
self.value.trailing_zeros()
}
pub const fn rotate_left(self, n: u32) -> Self {
let b = BITS as u32;
let n = if n >= b { n % b } else { n };
let moved_bits = (self.value << n) & Self::MASK;
let truncated_bits = self.value >> (b - n);
Self { value: moved_bits | truncated_bits }
}
pub const fn rotate_right(self, n: u32) -> Self {
let b = BITS as u32;
let n = if n >= b { n % b } else { n };
let moved_bits = self.value >> n;
let truncated_bits = (self.value << (b - n)) & Self::MASK;
Self { value: moved_bits | truncated_bits }
}
}
)+
};
}
uint_impl!(u8, u16, u32, u64, u128);
impl<T, const BITS: usize> Add for UInt<T, BITS>
where
Self: Number,
T: PartialEq
+ Copy
+ BitAnd<T, Output = T>
+ Not<Output = T>
+ Add<T, Output = T>
+ Sub<T, Output = T>
+ Shr<usize, Output = T>
+ Shl<usize, Output = T>
+ From<u8>,
{
type Output = UInt<T, BITS>;
fn add(self, rhs: Self) -> Self::Output {
let sum = self.value + rhs.value;
#[cfg(debug_assertions)]
if (sum & !Self::MASK) != T::from(0) {
panic!("attempt to add with overflow");
}
Self {
value: sum & Self::MASK,
}
}
}
impl<T, const BITS: usize> AddAssign for UInt<T, BITS>
where
Self: Number,
T: PartialEq
+ Eq
+ Not<Output = T>
+ Copy
+ AddAssign<T>
+ BitAnd<T, Output = T>
+ BitAndAssign<T>
+ Sub<T, Output = T>
+ Shr<usize, Output = T>
+ Shl<usize, Output = T>
+ From<u8>,
{
fn add_assign(&mut self, rhs: Self) {
self.value += rhs.value;
#[cfg(debug_assertions)]
if (self.value & !Self::MASK) != T::from(0) {
panic!("attempt to add with overflow");
}
self.value &= Self::MASK;
}
}
impl<T, const BITS: usize> Sub for UInt<T, BITS>
where
Self: Number,
T: Copy
+ BitAnd<T, Output = T>
+ Sub<T, Output = T>
+ Shl<usize, Output = T>
+ Shr<usize, Output = T>
+ From<u8>,
{
type Output = UInt<T, BITS>;
fn sub(self, rhs: Self) -> Self::Output {
Self {
value: (self.value - rhs.value) & Self::MASK,
}
}
}
impl<T, const BITS: usize> SubAssign for UInt<T, BITS>
where
Self: Number,
T: Copy
+ SubAssign<T>
+ BitAnd<T, Output = T>
+ BitAndAssign<T>
+ Sub<T, Output = T>
+ Shl<usize, Output = T>
+ Shr<usize, Output = T>
+ From<u8>,
{
fn sub_assign(&mut self, rhs: Self) {
self.value -= rhs.value;
self.value &= Self::MASK;
}
}
impl<T, const BITS: usize> BitAnd for UInt<T, BITS>
where
Self: Number,
T: Copy
+ BitAnd<T, Output = T>
+ Sub<T, Output = T>
+ Shl<usize, Output = T>
+ Shr<usize, Output = T>
+ From<u8>,
{
type Output = UInt<T, BITS>;
fn bitand(self, rhs: Self) -> Self::Output {
Self {
value: self.value & rhs.value,
}
}
}
impl<T, const BITS: usize> BitAndAssign for UInt<T, BITS>
where
T: Copy + BitAndAssign<T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
fn bitand_assign(&mut self, rhs: Self) {
self.value &= rhs.value;
}
}
impl<T, const BITS: usize> BitOr for UInt<T, BITS>
where
T: Copy + BitOr<T, Output = T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
type Output = UInt<T, BITS>;
fn bitor(self, rhs: Self) -> Self::Output {
Self {
value: self.value | rhs.value,
}
}
}
impl<T, const BITS: usize> BitOrAssign for UInt<T, BITS>
where
T: Copy + BitOrAssign<T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
fn bitor_assign(&mut self, rhs: Self) {
self.value |= rhs.value;
}
}
impl<T, const BITS: usize> BitXor for UInt<T, BITS>
where
T: Copy + BitXor<T, Output = T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
type Output = UInt<T, BITS>;
fn bitxor(self, rhs: Self) -> Self::Output {
Self {
value: self.value ^ rhs.value,
}
}
}
impl<T, const BITS: usize> BitXorAssign for UInt<T, BITS>
where
T: Copy + BitXorAssign<T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
fn bitxor_assign(&mut self, rhs: Self) {
self.value ^= rhs.value;
}
}
impl<T, const BITS: usize> Not for UInt<T, BITS>
where
Self: Number,
T: Copy
+ BitAnd<T, Output = T>
+ BitXor<T, Output = T>
+ Sub<T, Output = T>
+ Shl<usize, Output = T>
+ Shr<usize, Output = T>
+ From<u8>,
{
type Output = UInt<T, BITS>;
fn not(self) -> Self::Output {
Self {
value: self.value ^ Self::MASK,
}
}
}
impl<T, TSHIFTBITS, const BITS: usize> Shl<TSHIFTBITS> for UInt<T, BITS>
where
Self: Number,
T: Copy
+ BitAnd<T, Output = T>
+ Shl<TSHIFTBITS, Output = T>
+ Sub<T, Output = T>
+ Shl<usize, Output = T>
+ Shr<usize, Output = T>
+ From<u8>,
{
type Output = UInt<T, BITS>;
fn shl(self, rhs: TSHIFTBITS) -> Self::Output {
Self {
value: (self.value << rhs) & Self::MASK,
}
}
}
impl<T, TSHIFTBITS, const BITS: usize> ShlAssign<TSHIFTBITS> for UInt<T, BITS>
where
Self: Number,
T: Copy
+ BitAnd<T, Output = T>
+ BitAndAssign<T>
+ ShlAssign<TSHIFTBITS>
+ Sub<T, Output = T>
+ Shr<usize, Output = T>
+ Shl<usize, Output = T>
+ From<u8>,
{
fn shl_assign(&mut self, rhs: TSHIFTBITS) {
self.value <<= rhs;
self.value &= Self::MASK;
}
}
impl<T, TSHIFTBITS, const BITS: usize> Shr<TSHIFTBITS> for UInt<T, BITS>
where
T: Copy + Shr<TSHIFTBITS, Output = T> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
type Output = UInt<T, BITS>;
fn shr(self, rhs: TSHIFTBITS) -> Self::Output {
Self {
value: self.value >> rhs,
}
}
}
impl<T, TSHIFTBITS, const BITS: usize> ShrAssign<TSHIFTBITS> for UInt<T, BITS>
where
T: Copy + ShrAssign<TSHIFTBITS> + Sub<T, Output = T> + Shl<usize, Output = T> + From<u8>,
{
fn shr_assign(&mut self, rhs: TSHIFTBITS) {
self.value >>= rhs;
}
}
impl<T, const BITS: usize> Display for UInt<T, BITS>
where
T: Display,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> Debug for UInt<T, BITS>
where
T: Debug,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> LowerHex for UInt<T, BITS>
where
T: LowerHex,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> UpperHex for UInt<T, BITS>
where
T: UpperHex,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> Octal for UInt<T, BITS>
where
T: Octal,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> Binary for UInt<T, BITS>
where
T: Binary,
{
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.value.fmt(f)
}
}
impl<T, const BITS: usize> Hash for UInt<T, BITS>
where
T: Hash,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.value.hash(state)
}
}
#[cfg(feature = "num-traits")]
impl<T, const NUM_BITS: usize> num_traits::WrappingAdd for UInt<T, NUM_BITS>
where
Self: Number,
T: PartialEq
+ Eq
+ Copy
+ Add<T, Output = T>
+ Sub<T, Output = T>
+ BitAnd<T, Output = T>
+ Not<Output = T>
+ Shr<usize, Output = T>
+ Shl<usize, Output = T>
+ From<u8>,
Wrapping<T>: Add<Wrapping<T>, Output = Wrapping<T>>,
{
#[inline]
fn wrapping_add(&self, rhs: &Self) -> Self {
let sum = (Wrapping(self.value) + Wrapping(rhs.value)).0;
Self {
value: sum & Self::MASK,
}
}
}
#[cfg(feature = "num-traits")]
impl<T, const NUM_BITS: usize> num_traits::WrappingSub for UInt<T, NUM_BITS>
where
Self: Number,
T: PartialEq
+ Eq
+ Copy
+ Add<T, Output = T>
+ Sub<T, Output = T>
+ BitAnd<T, Output = T>
+ Not<Output = T>
+ Shr<usize, Output = T>
+ Shl<usize, Output = T>
+ From<u8>,
Wrapping<T>: Sub<Wrapping<T>, Output = Wrapping<T>>,
{
#[inline]
fn wrapping_sub(&self, rhs: &Self) -> Self {
let sum = (Wrapping(self.value) - Wrapping(rhs.value)).0;
Self {
value: sum & Self::MASK,
}
}
}
#[cfg(feature = "num-traits")]
impl<T, const NUM_BITS: usize> num_traits::bounds::Bounded for UInt<T, NUM_BITS>
where
Self: Number,
{
fn min_value() -> Self {
Self::MIN
}
fn max_value() -> Self {
Self::MAX
}
}
macro_rules! bytes_operation_impl {
($base_data_type:ty, $bits:expr, [$($indices:expr),+]) => {
impl UInt<$base_data_type, $bits>
{
#[inline]
pub const fn swap_bytes(&self) -> Self {
const SHIFT_RIGHT: usize = (core::mem::size_of::<$base_data_type>() << 3) - $bits;
Self { value: self.value.swap_bytes() >> SHIFT_RIGHT }
}
pub const fn to_le_bytes(&self) -> [u8; $bits >> 3] {
let v = self.value();
[ $( (v >> ($indices << 3)) as u8, )+ ]
}
pub const fn from_le_bytes(from: [u8; $bits >> 3]) -> Self {
let value = { 0 $( | (from[$indices] as $base_data_type) << ($indices << 3))+ };
Self { value }
}
pub const fn to_be_bytes(&self) -> [u8; $bits >> 3] {
let v = self.value();
[ $( (v >> ($bits - 8 - ($indices << 3))) as u8, )+ ]
}
pub const fn from_be_bytes(from: [u8; $bits >> 3]) -> Self {
let value = { 0 $( | (from[$indices] as $base_data_type) << ($bits - 8 - ($indices << 3)))+ };
Self { value }
}
#[inline]
pub const fn to_ne_bytes(&self) -> [u8; $bits >> 3] {
#[cfg(target_endian = "little")]
{
self.to_le_bytes()
}
#[cfg(target_endian = "big")]
{
self.to_be_bytes()
}
}
#[inline]
pub const fn from_ne_bytes(bytes: [u8; $bits >> 3]) -> Self {
#[cfg(target_endian = "little")]
{
Self::from_le_bytes(bytes)
}
#[cfg(target_endian = "big")]
{
Self::from_be_bytes(bytes)
}
}
#[inline]
pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(target_endian = "big")]
{
self.swap_bytes()
}
}
#[inline]
pub const fn to_be(self) -> Self {
#[cfg(target_endian = "little")]
{
self.swap_bytes()
}
#[cfg(target_endian = "big")]
{
self
}
}
#[inline]
pub const fn from_le(value: Self) -> Self {
value.to_le()
}
#[inline]
pub const fn from_be(value: Self) -> Self {
value.to_be()
}
}
};
}
bytes_operation_impl!(u32, 24, [0, 1, 2]);
bytes_operation_impl!(u64, 24, [0, 1, 2]);
bytes_operation_impl!(u128, 24, [0, 1, 2]);
bytes_operation_impl!(u64, 40, [0, 1, 2, 3, 4]);
bytes_operation_impl!(u128, 40, [0, 1, 2, 3, 4]);
bytes_operation_impl!(u64, 48, [0, 1, 2, 3, 4, 5]);
bytes_operation_impl!(u128, 48, [0, 1, 2, 3, 4, 5]);
bytes_operation_impl!(u64, 56, [0, 1, 2, 3, 4, 5, 6]);
bytes_operation_impl!(u128, 56, [0, 1, 2, 3, 4, 5, 6]);
bytes_operation_impl!(u128, 72, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
bytes_operation_impl!(u128, 80, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
bytes_operation_impl!(u128, 88, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
bytes_operation_impl!(u128, 96, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
bytes_operation_impl!(u128, 104, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
bytes_operation_impl!(u128, 112, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]);
bytes_operation_impl!(
u128,
120,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
);
#[cfg(feature = "const_convert_and_const_trait_impl")]
macro_rules! from_arbitrary_int_impl {
($from:ty, [$($into:ty),+]) => {
$(
impl<const BITS: usize, const BITS_FROM: usize> const From<UInt<$from, BITS_FROM>>
for UInt<$into, BITS>
{
#[inline]
fn from(item: UInt<$from, BITS_FROM>) -> Self {
let _ = CompileTimeAssert::<BITS_FROM, BITS>::SMALLER_OR_EQUAL;
Self { value: item.value as $into }
}
}
)+
};
}
#[cfg(not(feature = "const_convert_and_const_trait_impl"))]
macro_rules! from_arbitrary_int_impl {
($from:ty, [$($into:ty),+]) => {
$(
impl<const BITS: usize, const BITS_FROM: usize> From<UInt<$from, BITS_FROM>>
for UInt<$into, BITS>
{
#[inline]
fn from(item: UInt<$from, BITS_FROM>) -> Self {
let _ = CompileTimeAssert::<BITS_FROM, BITS>::SMALLER_OR_EQUAL;
Self { value: item.value as $into }
}
}
)+
};
}
#[cfg(feature = "const_convert_and_const_trait_impl")]
macro_rules! from_native_impl {
($from:ty, [$($into:ty),+]) => {
$(
impl<const BITS: usize> const From<$from> for UInt<$into, BITS> {
#[inline]
fn from(from: $from) -> Self {
let _ = CompileTimeAssert::<{ <$from>::BITS as usize }, BITS>::SMALLER_OR_EQUAL;
Self { value: from as $into }
}
}
impl<const BITS: usize> const From<UInt<$from, BITS>> for $into {
#[inline]
fn from(from: UInt<$from, BITS>) -> Self {
let _ = CompileTimeAssert::<BITS, { <$into>::BITS as usize }>::SMALLER_OR_EQUAL;
from.value as $into
}
}
)+
};
}
#[cfg(not(feature = "const_convert_and_const_trait_impl"))]
macro_rules! from_native_impl {
($from:ty, [$($into:ty),+]) => {
$(
impl<const BITS: usize> From<$from> for UInt<$into, BITS> {
#[inline]
fn from(from: $from) -> Self {
let _ = CompileTimeAssert::<{ <$from>::BITS as usize }, BITS>::SMALLER_OR_EQUAL;
Self { value: from as $into }
}
}
impl<const BITS: usize> From<UInt<$from, BITS>> for $into {
#[inline]
fn from(from: UInt<$from, BITS>) -> Self {
let _ = CompileTimeAssert::<BITS, { <$into>::BITS as usize }>::SMALLER_OR_EQUAL;
from.value as $into
}
}
)+
};
}
from_arbitrary_int_impl!(u8, [u16, u32, u64, u128]);
from_arbitrary_int_impl!(u16, [u8, u32, u64, u128]);
from_arbitrary_int_impl!(u32, [u8, u16, u64, u128]);
from_arbitrary_int_impl!(u64, [u8, u16, u32, u128]);
from_arbitrary_int_impl!(u128, [u8, u32, u64, u16]);
from_native_impl!(u8, [u8, u16, u32, u64, u128]);
from_native_impl!(u16, [u8, u16, u32, u64, u128]);
from_native_impl!(u32, [u8, u16, u32, u64, u128]);
from_native_impl!(u64, [u8, u16, u32, u64, u128]);
from_native_impl!(u128, [u8, u16, u32, u64, u128]);
macro_rules! type_alias {
($storage:ty, $(($name:ident, $bits:expr)),+) => {
$( pub type $name = crate::UInt<$storage, $bits>; )+
}
}
pub use aliases::*;
#[allow(non_camel_case_types)]
#[rustfmt::skip]
mod aliases {
type_alias!(u8, (u1, 1), (u2, 2), (u3, 3), (u4, 4), (u5, 5), (u6, 6), (u7, 7));
type_alias!(u16, (u9, 9), (u10, 10), (u11, 11), (u12, 12), (u13, 13), (u14, 14), (u15, 15));
type_alias!(u32, (u17, 17), (u18, 18), (u19, 19), (u20, 20), (u21, 21), (u22, 22), (u23, 23), (u24, 24), (u25, 25), (u26, 26), (u27, 27), (u28, 28), (u29, 29), (u30, 30), (u31, 31));
type_alias!(u64, (u33, 33), (u34, 34), (u35, 35), (u36, 36), (u37, 37), (u38, 38), (u39, 39), (u40, 40), (u41, 41), (u42, 42), (u43, 43), (u44, 44), (u45, 45), (u46, 46), (u47, 47), (u48, 48), (u49, 49), (u50, 50), (u51, 51), (u52, 52), (u53, 53), (u54, 54), (u55, 55), (u56, 56), (u57, 57), (u58, 58), (u59, 59), (u60, 60), (u61, 61), (u62, 62), (u63, 63));
type_alias!(u128, (u65, 65), (u66, 66), (u67, 67), (u68, 68), (u69, 69), (u70, 70), (u71, 71), (u72, 72), (u73, 73), (u74, 74), (u75, 75), (u76, 76), (u77, 77), (u78, 78), (u79, 79), (u80, 80), (u81, 81), (u82, 82), (u83, 83), (u84, 84), (u85, 85), (u86, 86), (u87, 87), (u88, 88), (u89, 89), (u90, 90), (u91, 91), (u92, 92), (u93, 93), (u94, 94), (u95, 95), (u96, 96), (u97, 97), (u98, 98), (u99, 99), (u100, 100), (u101, 101), (u102, 102), (u103, 103), (u104, 104), (u105, 105), (u106, 106), (u107, 107), (u108, 108), (u109, 109), (u110, 110), (u111, 111), (u112, 112), (u113, 113), (u114, 114), (u115, 115), (u116, 116), (u117, 117), (u118, 118), (u119, 119), (u120, 120), (u121, 121), (u122, 122), (u123, 123), (u124, 124), (u125, 125), (u126, 126), (u127, 127));
}
#[cfg(feature = "const_convert_and_const_trait_impl")]
macro_rules! boolu1 {
() => {
impl const From<bool> for u1 {
#[inline]
fn from(value: bool) -> Self {
u1::new(value as u8)
}
}
impl const From<u1> for bool {
#[inline]
fn from(value: u1) -> Self {
match value.value() {
0 => false,
1 => true,
_ => panic!("arbitrary_int_type already validates that this is unreachable"), }
}
}
};
}
#[cfg(not(feature = "const_convert_and_const_trait_impl"))]
macro_rules! boolu1 {
() => {
impl From<bool> for u1 {
#[inline]
fn from(value: bool) -> Self {
u1::new(value as u8)
}
}
impl From<u1> for bool {
#[inline]
fn from(value: u1) -> Self {
match value.value() {
0 => false,
1 => true,
_ => panic!("arbitrary_int_type already validates that this is unreachable"), }
}
}
};
}
boolu1!();