use crate::{
access::BitAccess,
array::BitArray,
devel as dvl,
domain::{
Domain,
DomainMut,
},
index::BitMask,
mem::BitMemory,
order::{
BitOrder,
Lsb0,
Msb0,
},
slice::BitSlice,
store::BitStore,
view::BitView,
};
use core::{
mem,
ops::{
Shl,
Shr,
},
ptr,
};
use tap::pipe::Pipe;
#[cfg(feature = "alloc")]
use crate::{
boxed::BitBox,
vec::BitVec,
};
pub trait BitField {
#[inline(always)]
#[cfg(not(tarpaulin_include))]
fn load<M>(&self) -> M
where M: BitMemory {
#[cfg(target_endian = "little")]
return self.load_le::<M>();
#[cfg(target_endian = "big")]
return self.load_be::<M>();
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
fn store<M>(&mut self, value: M)
where M: BitMemory {
#[cfg(target_endian = "little")]
self.store_le(value);
#[cfg(target_endian = "big")]
self.store_be(value);
}
fn load_le<M>(&self) -> M
where M: BitMemory;
fn load_be<M>(&self) -> M
where M: BitMemory;
fn store_le<M>(&mut self, value: M)
where M: BitMemory;
fn store_be<M>(&mut self, value: M)
where M: BitMemory;
}
impl<T> BitField for BitSlice<Lsb0, T>
where T: BitStore
{
#[inline]
fn load_le<M>(&self) -> M
where M: BitMemory {
let len = self.len();
check("load", len, M::BITS);
match self.domain() {
Domain::Enclave { head, elem, tail } => {
get::<T, M>(elem, Lsb0::mask(head, tail), head.value())
},
Domain::Region { head, body, tail } => {
let mut accum = M::ZERO;
if let Some((elem, tail)) = tail {
accum = get::<T, M>(elem, Lsb0::mask(None, tail), 0);
}
for elem in body.iter().rev().copied() {
if M::BITS > T::Mem::BITS {
accum <<= T::Mem::BITS;
}
accum |= resize::<T::Mem, M>(elem);
}
if let Some((head, elem)) = head {
let shamt = head.value();
accum <<= T::Mem::BITS - shamt;
accum |= get::<T, M>(elem, Lsb0::mask(head, None), shamt);
}
accum
},
}
}
#[inline]
fn load_be<M>(&self) -> M
where M: BitMemory {
let len = self.len();
check("load", len, M::BITS);
match self.domain() {
Domain::Enclave { head, elem, tail } => {
get::<T, M>(elem, Lsb0::mask(head, tail), head.value())
},
Domain::Region { head, body, tail } => {
let mut accum = M::ZERO;
if let Some((head, elem)) = head {
accum =
get::<T, M>(elem, Lsb0::mask(head, None), head.value());
}
for elem in body.iter().copied() {
if M::BITS > T::Mem::BITS {
accum <<= T::Mem::BITS;
}
accum |= resize::<T::Mem, M>(elem);
}
if let Some((elem, tail)) = tail {
accum <<= tail.value();
accum |= get::<T, M>(elem, Lsb0::mask(None, tail), 0);
}
accum
},
}
}
#[inline]
fn store_le<M>(&mut self, mut value: M)
where M: BitMemory {
let len = self.len();
check("store", len, M::BITS);
match self.domain_mut() {
DomainMut::Enclave { head, elem, tail } => {
set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value())
},
DomainMut::Region { head, body, tail } => {
if let Some((head, elem)) = head {
let shamt = head.value();
set::<T, M>(elem, value, Lsb0::mask(head, None), shamt);
value >>= T::Mem::BITS - shamt;
}
for elem in body {
*elem = resize(value);
if M::BITS > T::Mem::BITS {
value >>= T::Mem::BITS;
}
}
if let Some((elem, tail)) = tail {
set::<T, M>(elem, value, Lsb0::mask(None, tail), 0);
}
},
}
}
#[inline]
fn store_be<M>(&mut self, mut value: M)
where M: BitMemory {
let len = self.len();
check("store", len, M::BITS);
match self.domain_mut() {
DomainMut::Enclave { head, elem, tail } => {
set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value())
},
DomainMut::Region { head, body, tail } => {
if let Some((elem, tail)) = tail {
set::<T, M>(elem, value, Lsb0::mask(None, tail), 0);
value >>= tail.value()
}
for elem in body.iter_mut().rev() {
*elem = resize(value);
if M::BITS > T::Mem::BITS {
value >>= T::Mem::BITS;
}
}
if let Some((head, elem)) = head {
set::<T, M>(
elem,
value,
Lsb0::mask(head, None),
head.value(),
);
}
},
}
}
}
impl<T> BitField for BitSlice<Msb0, T>
where T: BitStore
{
#[inline]
fn load_le<M>(&self) -> M
where M: BitMemory {
let len = self.len();
check("load", len, M::BITS);
match self.domain() {
Domain::Enclave { head, elem, tail } => get::<T, M>(
elem,
Msb0::mask(head, tail),
T::Mem::BITS - tail.value(),
),
Domain::Region { head, body, tail } => {
let mut accum = M::ZERO;
if let Some((elem, tail)) = tail {
accum = get::<T, M>(
elem,
Msb0::mask(None, tail),
T::Mem::BITS - tail.value(),
);
}
for elem in body.iter().rev().copied() {
if M::BITS > T::Mem::BITS {
accum <<= T::Mem::BITS;
}
accum |= resize::<T::Mem, M>(elem);
}
if let Some((head, elem)) = head {
accum <<= T::Mem::BITS - head.value();
accum |= get::<T, M>(elem, Msb0::mask(head, None), 0);
}
accum
},
}
}
#[inline]
fn load_be<M>(&self) -> M
where M: BitMemory {
let len = self.len();
check("load", len, M::BITS);
match self.domain() {
Domain::Enclave { head, elem, tail } => get::<T, M>(
elem,
Msb0::mask(head, tail),
T::Mem::BITS - tail.value(),
),
Domain::Region { head, body, tail } => {
let mut accum = M::ZERO;
if let Some((head, elem)) = head {
accum = get::<T, M>(elem, Msb0::mask(head, None), 0);
}
for elem in body.iter().copied() {
if M::BITS > T::Mem::BITS {
accum <<= T::Mem::BITS;
}
accum |= resize::<T::Mem, M>(elem);
}
if let Some((elem, tail)) = tail {
let width = tail.value();
accum <<= width;
accum |= get::<T, M>(
elem,
Msb0::mask(None, tail),
T::Mem::BITS - width,
);
}
accum
},
}
}
#[inline]
fn store_le<M>(&mut self, mut value: M)
where M: BitMemory {
let len = self.len();
check("store", len, M::BITS);
match self.domain_mut() {
DomainMut::Enclave { head, elem, tail } => set::<T, M>(
elem,
value,
Msb0::mask(head, tail),
T::Mem::BITS - tail.value(),
),
DomainMut::Region { head, body, tail } => {
if let Some((head, elem)) = head {
set::<T, M>(elem, value, Msb0::mask(head, None), 0);
value >>= T::Mem::BITS - head.value();
}
for elem in body.iter_mut() {
*elem = resize(value);
if M::BITS > T::Mem::BITS {
value >>= T::Mem::BITS;
}
}
if let Some((elem, tail)) = tail {
set::<T, M>(
elem,
value,
Msb0::mask(None, tail),
T::Mem::BITS - tail.value(),
);
}
},
}
}
#[inline]
fn store_be<M>(&mut self, mut value: M)
where M: BitMemory {
let len = self.len();
check("store", len, M::BITS);
match self.domain_mut() {
DomainMut::Enclave { head, elem, tail } => set::<T, M>(
elem,
value,
Msb0::mask(head, tail),
T::Mem::BITS - tail.value(),
),
DomainMut::Region { head, body, tail } => {
if let Some((elem, tail)) = tail {
set::<T, M>(
elem,
value,
Msb0::mask(None, tail),
T::Mem::BITS - tail.value(),
);
value >>= tail.value();
}
for elem in body.iter_mut().rev() {
*elem = resize(value);
if M::BITS > T::Mem::BITS {
value >>= T::Mem::BITS;
}
}
if let Some((head, elem)) = head {
set::<T, M>(elem, value, Msb0::mask(head, None), 0);
}
},
}
}
}
#[cfg(not(tarpaulin_include))]
impl<O, V> BitField for BitArray<O, V>
where
O: BitOrder,
V: BitView,
BitSlice<O, V::Store>: BitField,
{
#[inline]
fn load_le<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_le()
}
#[inline]
fn load_be<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_be()
}
#[inline]
fn store_le<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_le(value)
}
#[inline]
fn store_be<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_be(value)
}
}
#[cfg(feature = "alloc")]
#[cfg(not(tarpaulin_include))]
impl<O, T> BitField for BitBox<O, T>
where
O: BitOrder,
T: BitStore,
BitSlice<O, T>: BitField,
{
#[inline]
fn load_le<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_le()
}
#[inline]
fn load_be<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_be()
}
#[inline]
fn store_le<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_le(value)
}
#[inline]
fn store_be<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_be(value)
}
}
#[cfg(feature = "alloc")]
#[cfg(not(tarpaulin_include))]
impl<O, T> BitField for BitVec<O, T>
where
O: BitOrder,
T: BitStore,
BitSlice<O, T>: BitField,
{
#[inline]
fn load_le<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_le()
}
#[inline]
fn load_be<M>(&self) -> M
where M: BitMemory {
self.as_bitslice().load_be()
}
#[inline]
fn store_le<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_le(value)
}
#[inline]
fn store_be<M>(&mut self, value: M)
where M: BitMemory {
self.as_mut_bitslice().store_be(value)
}
}
#[inline]
fn check(action: &'static str, len: usize, width: u8) {
if !(1 ..= width as usize).contains(&len) {
panic!("Cannot {} {} bits from a {}-bit region", action, width, len);
}
}
#[inline]
fn get<T, M>(elem: &T, mask: BitMask<T::Mem>, shamt: u8) -> M
where
T: BitStore,
M: BitMemory,
{
elem.load_value()
.pipe(|val| mask & val)
.value()
.pipe(|val| Shr::<u8>::shr(val, shamt))
.pipe(resize::<T::Mem, M>)
}
#[inline]
fn set<T, M>(elem: &T::Alias, value: M, mask: BitMask<T::Mem>, shamt: u8)
where
T: BitStore,
M: BitMemory,
{
let elem = dvl::accessor(elem);
let mask = dvl::alias_mask::<T>(mask);
let value = value
.pipe(resize::<M, T::Mem>)
.pipe(dvl::alias_mem::<T>)
.pipe(|val| Shl::<u8>::shl(val, shamt))
.pipe(|val| mask & val);
elem.clear_bits(mask);
elem.set_bits(value);
}
#[inline]
fn resize<T, U>(value: T) -> U
where
T: BitMemory,
U: BitMemory,
{
let mut out = U::ZERO;
let size_t = mem::size_of::<T>();
let size_u = mem::size_of::<U>();
unsafe {
resize_inner::<T, U>(&value, &mut out, size_t, size_u);
}
out
}
#[inline(always)]
#[cfg(target_endian = "little")]
#[cfg(not(tarpaulin_include))]
unsafe fn resize_inner<T, U>(
src: &T,
dst: &mut U,
size_t: usize,
size_u: usize,
)
{
ptr::copy_nonoverlapping(
src as *const T as *const u8,
dst as *mut U as *mut u8,
core::cmp::min(size_t, size_u),
);
}
#[inline(always)]
#[cfg(target_endian = "big")]
#[cfg(not(tarpaulin_include))]
unsafe fn resize_inner<T, U>(
src: &T,
dst: &mut U,
size_t: usize,
size_u: usize,
)
{
let src = src as *const T as *const u8;
let dst = dst as *mut U as *mut u8;
if size_t > size_u {
ptr::copy_nonoverlapping(src.add(size_t - size_u), dst, size_u);
}
else {
ptr::copy_nonoverlapping(src, dst.add(size_u - size_t), size_t);
}
}
#[cfg(not(any(target_endian = "big", target_endian = "little")))]
compile_fail!(concat!(
"This architecture is currently not supported. File an issue at ",
env!(CARGO_PKG_REPOSITORY)
));
#[cfg(feature = "std")]
mod io;
#[cfg(test)]
mod tests;
#[cfg(all(test, feature = "std", not(miri), not(tarpaulin)))]
mod permutation_tests;