#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
use core::mem::ManuallyDrop;
use core::{
marker::PhantomData,
ops::RangeBounds,
ptr,
slice,
};
use funty::{
IsInteger,
IsNumber,
};
#[cfg(feature = "alloc")]
use tap::pipe::Pipe;
use crate::{
access::{
BitAccess,
BitSafe,
},
devel as dvl,
domain::{
BitDomain,
BitDomainMut,
Domain,
DomainMut,
},
index::BitMask,
mem::BitRegister,
mutability::{
Const,
Mut,
},
order::{
BitOrder,
Lsb0,
Msb0,
},
ptr::{
BitPtr,
BitPtrRange,
BitRef,
BitSpan,
BitSpanError,
},
store::BitStore,
};
#[cfg(feature = "alloc")]
use crate::{
ptr::Address,
vec::BitVec,
};
#[repr(transparent)]
pub struct BitSlice<O = Lsb0, T = usize>
where
O: BitOrder,
T: BitStore,
{
_ord: PhantomData<O>,
_typ: PhantomData<[T]>,
_mem: [()],
}
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
pub fn from_element(elem: &T) -> &Self {
unsafe { BitPtr::from_ref(elem).span_unchecked(T::Mem::BITS as usize) }
.to_bitslice_ref()
}
pub fn from_element_mut(elem: &mut T) -> &mut Self {
unsafe { BitPtr::from_mut(elem).span_unchecked(T::Mem::BITS as usize) }
.to_bitslice_mut()
}
pub fn from_slice(slice: &[T]) -> Result<&Self, BitSpanError<T>> {
let elts = slice.len();
if elts >= Self::MAX_ELTS {
return Err(BitSpanError::TooLong(
elts.saturating_mul(T::Mem::BITS as usize),
));
}
Ok(unsafe { Self::from_slice_unchecked(slice) })
}
pub fn from_slice_mut(
slice: &mut [T],
) -> Result<&mut Self, BitSpanError<T>> {
let elts = slice.len();
if elts >= Self::MAX_ELTS {
return Err(BitSpanError::TooLong(
elts.saturating_mul(T::Mem::BITS as usize),
));
}
Ok(unsafe { Self::from_slice_unchecked_mut(slice) })
}
pub unsafe fn from_slice_unchecked(slice: &[T]) -> &Self {
let bits = slice.len().wrapping_mul(T::Mem::BITS as usize);
BitPtr::from_slice(slice)
.span_unchecked(bits)
.to_bitslice_ref()
}
pub unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self {
let bits = slice.len().wrapping_mul(T::Mem::BITS as usize);
BitPtr::from_mut_slice(slice)
.span_unchecked(bits)
.to_bitslice_mut()
}
pub fn empty<'a>() -> &'a Self {
BitSpan::<Const, O, T>::EMPTY.to_bitslice_ref()
}
pub fn empty_mut<'a>() -> &'a mut Self {
BitSpan::EMPTY.to_bitslice_mut()
}
pub fn set(&mut self, index: usize, value: bool) {
self.assert_in_bounds(index, 0 .. self.len());
unsafe {
self.set_unchecked(index, value);
}
}
pub fn set_aliased(&self, index: usize, value: bool)
where T: radium::Radium {
self.assert_in_bounds(index, 0 .. self.len());
unsafe {
self.set_aliased_unchecked(index, value);
}
}
pub fn any(&self) -> bool {
match self.domain() {
Domain::Enclave { head, elem, tail } => {
O::mask(head, tail) & elem.load_value() != BitMask::ZERO
},
Domain::Region { head, body, tail } => {
head.map_or(false, |(head, elem)| {
O::mask(head, None) & elem.load_value() != BitMask::ZERO
}) || body.iter().any(|e| e.load_value() != T::Mem::ZERO)
|| tail.map_or(false, |(elem, tail)| {
O::mask(None, tail) & elem.load_value() != BitMask::ZERO
})
},
}
}
pub fn all(&self) -> bool {
match self.domain() {
Domain::Enclave { head, elem, tail } => {
!O::mask(head, tail) | elem.load_value() == BitMask::ALL
},
Domain::Region { head, body, tail } => {
head.map_or(true, |(head, elem)| {
!O::mask(head, None) | elem.load_value() == BitMask::ALL
}) && body
.iter()
.map(BitStore::load_value)
.all(|e| e == T::Mem::ALL)
&& tail.map_or(true, |(elem, tail)| {
!O::mask(None, tail) | elem.load_value() == BitMask::ALL
})
},
}
}
pub fn not_any(&self) -> bool {
!self.any()
}
pub fn not_all(&self) -> bool {
!self.all()
}
pub fn some(&self) -> bool {
self.any() && self.not_all()
}
pub fn count_ones(&self) -> usize {
match self.domain() {
Domain::Enclave { head, elem, tail } => (O::mask(head, tail)
& elem.load_value())
.value()
.count_ones() as usize,
Domain::Region { head, body, tail } => {
head.map_or(0, |(head, elem)| {
(O::mask(head, None) & elem.load_value())
.value()
.count_ones() as usize
}) + body
.iter()
.map(BitStore::load_value)
.map(|e| e.count_ones() as usize)
.sum::<usize>() + tail.map_or(0, |(elem, tail)| {
(O::mask(None, tail) & elem.load_value())
.value()
.count_ones() as usize
})
},
}
}
pub fn count_zeros(&self) -> usize {
match self.domain() {
Domain::Enclave { head, elem, tail } => (!O::mask(head, tail)
| elem.load_value())
.value()
.count_zeros() as usize,
Domain::Region { head, body, tail } => {
head.map_or(0, |(head, elem)| {
(!O::mask(head, None) | elem.load_value())
.value()
.count_zeros() as usize
}) + body
.iter()
.map(BitStore::load_value)
.map(|e| e.count_zeros() as usize)
.sum::<usize>() + tail.map_or(0, |(elem, tail)| {
(!O::mask(None, tail) | elem.load_value())
.value()
.count_zeros() as usize
})
},
}
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn iter_ones(&self) -> IterOnes<O, T> {
IterOnes::new(self)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn iter_zeros(&self) -> IterZeros<O, T> {
IterZeros::new(self)
}
#[inline]
pub fn first_one(&self) -> Option<usize> {
self.iter_ones().next()
}
#[inline]
pub fn first_zero(&self) -> Option<usize> {
self.iter_zeros().next()
}
#[inline]
pub fn last_one(&self) -> Option<usize> {
self.iter_ones().next_back()
}
#[inline]
pub fn last_zero(&self) -> Option<usize> {
self.iter_zeros().next_back()
}
#[inline]
pub fn leading_ones(&self) -> usize {
self.first_zero().unwrap_or(0)
}
#[inline]
pub fn leading_zeros(&self) -> usize {
self.first_one().unwrap_or(0)
}
#[inline]
pub fn trailing_ones(&self) -> usize {
self.last_zero()
.map(|idx| self.len() - 1 - idx)
.unwrap_or(0)
}
#[inline]
pub fn trailing_zeros(&self) -> usize {
self.last_one().map(|idx| self.len() - 1 - idx).unwrap_or(0)
}
pub fn clone_from_bitslice<O2, T2>(&mut self, src: &BitSlice<O2, T2>)
where
O2: BitOrder,
T2: BitStore,
{
assert_eq!(
self.len(),
src.len(),
"Cloning between slices requires equal lengths"
);
if dvl::match_types::<O, T, O2, T2>() {
let that = src as *const _ as *const _;
unsafe {
self.copy_from_bitslice(&*that);
}
}
else {
for (to, from) in
self.as_mut_bitptr_range().zip(src.as_bitptr_range())
{
unsafe {
to.write(from.read());
}
}
}
}
pub fn copy_from_bitslice(&mut self, src: &Self) {
assert_eq!(
self.len(),
src.len(),
"Copying between slices requires equal lengths"
);
let (d_head, s_head) =
(self.as_bitspan().head(), src.as_bitspan().head());
if d_head == s_head {
match (self.domain_mut(), src.domain()) {
(
DomainMut::Enclave {
elem: d_elem, tail, ..
},
Domain::Enclave { elem: s_elem, .. },
) => {
let mask = O::mask(d_head, tail);
d_elem.clear_bits(mask);
d_elem.set_bits(mask & s_elem.load_value());
},
(
DomainMut::Region {
head: d_head,
body: d_body,
tail: d_tail,
},
Domain::Region {
head: s_head,
body: s_body,
tail: s_tail,
},
) => {
if let (Some((h_idx, dh_elem)), Some((_, sh_elem))) =
(d_head, s_head)
{
let mask = O::mask(h_idx, None);
dh_elem.clear_bits(mask);
dh_elem.set_bits(mask & sh_elem.load_value());
}
for (dst, src) in d_body.iter_mut().zip(s_body.iter()) {
dst.store_value(src.load_value())
}
if let (Some((dt_elem, t_idx)), Some((st_elem, _))) =
(d_tail, s_tail)
{
let mask = O::mask(None, t_idx);
dt_elem.clear_bits(mask);
dt_elem.set_bits(mask & st_elem.load_value());
}
},
_ => unreachable!(
"Slices with equal type parameters, lengths, and heads \
will always have equal domains"
),
}
}
else if dvl::match_order::<O, Lsb0>() {
let this: &mut BitSlice<Lsb0, T> =
unsafe { &mut *(self as *mut _ as *mut _) };
let that: &BitSlice<Lsb0, T> =
unsafe { &*(src as *const _ as *const _) };
this.sp_copy_from_bitslice(that);
}
else if dvl::match_order::<O, Msb0>() {
let this: &mut BitSlice<Msb0, T> =
unsafe { &mut *(self as *mut _ as *mut _) };
let that: &BitSlice<Msb0, T> =
unsafe { &*(src as *const _ as *const _) };
this.sp_copy_from_bitslice(that);
}
else {
for (from, to) in
src.as_bitptr_range().zip(self.as_mut_bitptr_range())
{
unsafe {
to.write(from.read());
}
}
}
}
pub fn swap_with_bitslice<O2, T2>(&mut self, other: &mut BitSlice<O2, T2>)
where
O2: BitOrder,
T2: BitStore,
{
let len = self.len();
assert_eq!(len, other.len());
for (to, from) in unsafe {
self.iter_mut()
.remove_alias()
.zip(other.iter_mut().remove_alias())
} {
let (this, that) = (*to, *from);
to.set(that);
from.set(this);
}
}
pub fn shift_left(&mut self, by: usize) {
let len = self.len();
if by == 0 {
return;
}
assert!(
by < len,
"Cannot shift a slice by more than its length: {} exceeds {}",
by,
len
);
unsafe {
self.copy_within_unchecked(by .., 0);
let trunc = len - by;
self.get_unchecked_mut(trunc ..).set_all(false);
}
}
pub fn shift_right(&mut self, by: usize) {
let len = self.len();
if by == 0 {
return;
}
assert!(
by < len,
"Cannot shift a slice by more than its length: {} exceeds {}",
by,
len
);
let trunc = len - by;
unsafe {
self.copy_within_unchecked(.. trunc, by);
self.get_unchecked_mut(.. by).set_all(false);
}
}
pub fn set_all(&mut self, value: bool) {
let setter = <T::Access>::get_writers(value);
match self.domain_mut() {
DomainMut::Enclave { head, elem, tail } => {
setter(elem, O::mask(head, tail));
},
DomainMut::Region { head, body, tail } => {
if let Some((head, elem)) = head {
setter(elem, O::mask(head, None));
}
unsafe {
ptr::write_bytes(
body.as_mut_ptr(),
[0, !0][value as usize],
body.len(),
);
}
if let Some((elem, tail)) = tail {
setter(elem, O::mask(None, tail));
}
},
}
}
pub fn for_each<F>(&mut self, mut func: F)
where F: FnMut(usize, bool) -> bool {
for idx in 0 .. self.len() {
unsafe {
let tmp = *self.get_unchecked(idx);
let new = func(idx, tmp);
self.set_unchecked(idx, new);
}
}
}
pub fn offset_from(&self, other: &Self) -> isize {
unsafe { other.as_bitptr().offset_from(self.as_bitptr()) }
}
#[doc(hidden)]
#[deprecated = "Use `BitPtr::offset_from`"]
pub fn electrical_distance(&self, _other: &Self) -> isize {
unimplemented!(
"This no longer exists! Offsets are only defined between two \
bit-pointers in the same bit-region, and `bitvec` considers two \
regions with different orderings, *even if they cover the same \
locations*, to be different. Use `BitPtr::offset_from`."
);
}
}
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
self.as_mut_bitptr().add(index).write(value);
}
pub unsafe fn set_aliased_unchecked(&self, index: usize, value: bool)
where T: radium::Radium {
self.as_bitptr().add(index).assert_mut().write(value);
}
pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
let bit_a = *self.get_unchecked(a);
let bit_b = *self.get_unchecked(b);
self.set_unchecked(a, bit_b);
self.set_unchecked(b, bit_a);
}
pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&Self, &Self) {
(self.get_unchecked(.. mid), self.get_unchecked(mid ..))
}
#[allow(clippy::type_complexity)]
pub unsafe fn split_at_unchecked_mut(
&mut self,
mid: usize,
) -> (&mut BitSlice<O, T::Alias>, &mut BitSlice<O, T::Alias>) {
let bp = self.alias_mut().as_mut_bitspan();
(
bp.to_bitslice_mut().get_unchecked_mut(.. mid),
bp.to_bitslice_mut().get_unchecked_mut(mid ..),
)
}
pub unsafe fn copy_within_unchecked<R>(&mut self, src: R, dest: usize)
where R: RangeBounds<usize> {
if dvl::match_order::<O, Lsb0>() {
let this: &mut BitSlice<Lsb0, T> = &mut *(self as *mut _ as *mut _);
this.sp_copy_within_unchecked(src, dest);
}
else if dvl::match_order::<O, Msb0>() {
let this: &mut BitSlice<Msb0, T> = &mut *(self as *mut _ as *mut _);
this.sp_copy_within_unchecked(src, dest);
}
else {
let source = dvl::normalize_range(src, self.len());
let source_len = source.len();
let rev = source.contains(&dest);
let iter = source.zip(dest .. dest + source_len);
if rev {
for (from, to) in iter.rev() {
let bit = *self.get_unchecked(from);
self.set_unchecked(to, bit);
}
}
else {
for (from, to) in iter {
let bit = *self.get_unchecked(from);
self.set_unchecked(to, bit);
}
}
}
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline(always)]
pub fn as_bitptr(&self) -> BitPtr<Const, O, T> {
self.as_bitspan().as_bitptr()
}
#[inline(always)]
pub fn as_mut_bitptr(&mut self) -> BitPtr<Mut, O, T> {
self.as_mut_bitspan().as_bitptr()
}
pub fn as_bitptr_range(&self) -> BitPtrRange<Const, O, T> {
unsafe { self.as_bitptr().range(self.len()) }
}
pub fn as_mut_bitptr_range(&mut self) -> BitPtrRange<Mut, O, T> {
unsafe { self.as_mut_bitptr().range(self.len()) }
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn bit_domain(&self) -> BitDomain<O, T> {
BitDomain::new(self)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn bit_domain_mut(&mut self) -> BitDomainMut<O, T> {
BitDomainMut::new(self)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn domain(&self) -> Domain<T> {
Domain::new(self)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn domain_mut(&mut self) -> DomainMut<T> {
DomainMut::new(self)
}
pub fn as_raw_slice(&self) -> &[T] {
let bitspan = self.as_bitspan();
let (base, elts) = (bitspan.address().to_const(), bitspan.elements());
unsafe { slice::from_raw_parts(base, elts) }
}
}
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
pub(crate) fn as_bitspan(&self) -> BitSpan<Const, O, T> {
BitSpan::from_bitslice_ptr(self)
}
pub(crate) fn as_mut_bitspan(&mut self) -> BitSpan<Mut, O, T> {
BitSpan::from_bitslice_ptr_mut(self)
}
pub(crate) fn assert_in_bounds<R>(&self, index: usize, bounds: R)
where R: RangeBounds<usize> {
assert!(
bounds.contains(&index),
"Index {} out of range: {:?}",
index,
bounds.end_bound()
);
}
pub(crate) fn alias(&self) -> &BitSlice<O, T::Alias> {
unsafe { &*(self as *const Self as *const BitSlice<O, T::Alias>) }
}
pub(crate) fn alias_mut(&mut self) -> &mut BitSlice<O, T::Alias> {
unsafe { &mut *(self as *mut Self as *mut BitSlice<O, T::Alias>) }
}
#[cfg(not(tarpaulin_include))]
pub(crate) unsafe fn unalias_mut(
this: &mut BitSlice<O, T::Alias>,
) -> &mut Self {
&mut *(this as *mut BitSlice<O, T::Alias> as *mut Self)
}
pub(crate) unsafe fn split_at_unchecked_mut_noalias(
&mut self,
mid: usize,
) -> (&mut Self, &mut Self) {
let (head, tail) = self.split_at_unchecked_mut(mid);
(Self::unalias_mut(head), Self::unalias_mut(tail))
}
}
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitSafe + BitStore,
{
pub fn split_at_aliased_mut(
&mut self,
mid: usize,
) -> (&mut Self, &mut Self) {
let (head, tail) = self.split_at_mut(mid);
unsafe { (Self::unalias_mut(head), Self::unalias_mut(tail)) }
}
}
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
pub const MAX_BITS: usize = BitSpan::<Const, O, T>::REGION_MAX_BITS;
pub const MAX_ELTS: usize = BitSpan::<Const, O, T>::REGION_MAX_ELTS;
}
#[cfg(feature = "alloc")]
impl<O, T> BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
pub fn to_bitvec(&self) -> BitVec<O, T::Unalias> {
let mut bitspan = self.as_bitspan();
let mut vec = self.domain().collect::<Vec<_>>().pipe(ManuallyDrop::new);
let capacity = vec.capacity();
unsafe {
bitspan
.set_address(Address::new_unchecked(vec.as_mut_ptr() as usize));
BitVec::from_fields(
bitspan.assert_mut().cast::<T::Unalias>(),
capacity,
)
}
}
}
pub unsafe fn from_raw_parts_unchecked<'a, O, T>(
data: BitPtr<Const, O, T>,
len: usize,
) -> &'a BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
data.span_unchecked(len).to_bitslice_ref()
}
pub unsafe fn from_raw_parts_unchecked_mut<'a, O, T>(
data: BitPtr<Mut, O, T>,
len: usize,
) -> &'a mut BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
data.span_unchecked(len).to_bitslice_mut()
}
mod api;
mod iter;
mod ops;
mod specialization;
mod traits;
pub use self::{
api::{
from_mut,
from_raw_parts,
from_raw_parts_mut,
from_ref,
BitSliceIndex,
},
iter::{
Chunks,
ChunksExact,
ChunksExactMut,
ChunksMut,
Iter,
IterMut,
IterOnes,
IterZeros,
RChunks,
RChunksExact,
RChunksExactMut,
RChunksMut,
RSplit,
RSplitMut,
RSplitN,
RSplitNMut,
Split,
SplitMut,
SplitN,
SplitNMut,
Windows,
},
};
#[cfg(test)]
mod tests;