use crate::prelude::{bit_field_slice::*, *};
use crate::traits::ambassador_impl_Backend;
use crate::traits::{Backend, Word};
use crate::utils::PrimitiveUnsignedExt;
use crate::utils::{
CannotCastToAtomicError, transmute_boxed_slice_from_atomic, transmute_boxed_slice_into_atomic,
transmute_vec_from_atomic, transmute_vec_into_atomic,
};
use crate::{panic_if_out_of_bounds, panic_if_value};
use ambassador::Delegate;
use anyhow::{Result, bail};
use atomic_primitive::{Atomic, AtomicPrimitive, PrimitiveAtomic, PrimitiveAtomicUnsigned};
use mem_dbg::*;
use num_primitive::{PrimitiveInteger, PrimitiveNumber, PrimitiveNumberAs};
#[cfg(feature = "rayon")]
use rayon::prelude::*;
use std::iter::FusedIterator;
use std::sync::atomic::{Ordering, compiler_fence, fence};
use value_traits::slices::{SliceByValue, SliceByValueMut};
#[macro_export]
macro_rules! bit_field_vec {
($w:expr) => {
<$crate::bits::BitFieldVec>::new($w, 0)
};
($w:expr; $n:expr; $v:expr) => {
compile_error!(
"the syntax bit_field_vec![width; length; value] has been removed: use bit_field_vec![width => value; length] instead"
)
};
($w:expr => $v:expr; $n:expr) => {
{
let mut bit_field_vec = <$crate::bits::BitFieldVec>::with_capacity($w, $n);
let v: usize = $v;
bit_field_vec.resize($n, v);
bit_field_vec
}
};
($w:expr; $($x:expr),+ $(,)?) => {
{
let mut b = <$crate::bits::BitFieldVec>::with_capacity($w, [$($x),+].len());
$(
let x: usize = $x;
b.push(x);
)*
b
}
};
}
#[derive(Debug, Clone, MemSize, MemDbg, Delegate, value_traits::Subslices)]
#[value_traits_subslices(bound = "B: AsRef<[B::Word]>")]
#[value_traits_subslices(bound = "B::Word: Word")]
#[derive(value_traits::SubslicesMut)]
#[value_traits_subslices_mut(bound = "B: AsRef<[B::Word]> + AsMut<[B::Word]>")]
#[value_traits_subslices_mut(bound = "B::Word: Word")]
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
#[cfg_attr(
feature = "epserde",
epserde(bound(
deser = "for<'a> <B as epserde::deser::DeserInner>::DeserType<'a>: Backend<Word = B::Word>"
))
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[delegate(crate::traits::Backend, target = "bits")]
pub struct BitFieldVec<B: Backend = Vec<usize>> {
bits: B,
bit_width: usize,
mask: B::Word,
len: usize,
}
fn mask<W: Word>(bit_width: usize) -> W {
if bit_width == 0 {
W::ZERO
} else {
W::MAX
>> (W::BITS as usize)
.checked_sub(bit_width)
.expect("bit_width > W::BITS as usize")
}
}
impl<B: Backend<Word: Word>> BitFieldVec<B> {
#[inline(always)]
#[must_use]
pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
Self {
bits,
bit_width,
mask: mask(bit_width),
len,
}
}
#[inline(always)]
pub fn into_raw_parts(self) -> (B, usize, usize) {
(self.bits, self.bit_width, self.len)
}
pub const fn mask(&self) -> B::Word {
self.mask
}
#[inline(always)]
pub unsafe fn map<B2: Backend<Word: Word>>(self, f: impl FnOnce(B) -> B2) -> BitFieldVec<B2> {
BitFieldVec {
bits: f(self.bits),
bit_width: self.bit_width,
mask: mask(self.bit_width),
len: self.len,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVec<B> {
pub fn addr_of(&self, index: usize) -> *const B::Word {
let start_bit = index * self.bit_width;
let word_index = start_bit / B::Word::BITS as usize;
(&self.bits.as_ref()[word_index]) as *const _
}
pub fn get_unaligned(&self, index: usize) -> B::Word {
assert_unaligned!(B::Word, self.bit_width);
panic_if_out_of_bounds!(index, self.len);
assert!(
(index * self.bit_width) / 8 + size_of::<B::Word>()
<= std::mem::size_of_val(self.bits.as_ref())
);
unsafe { self.get_unaligned_unchecked(index) }
}
pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> B::Word {
debug_assert_unaligned!(B::Word, self.bit_width);
let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
let start_bit = index * self.bit_width;
debug_assert!(
start_bit / 8 + size_of::<B::Word>() <= std::mem::size_of_val(self.bits.as_ref())
);
let ptr = unsafe { base_ptr.add(start_bit / 8) } as *const B::Word;
let word = unsafe { core::ptr::read_unaligned(ptr) };
(word >> (start_bit % 8)) & self.mask
}
pub fn as_slice(&self) -> &[B::Word] {
self.bits.as_ref()
}
}
pub struct ChunksMut<'a, W: Word> {
remaining: usize,
bit_width: usize,
chunk_size: usize,
iter: std::slice::ChunksMut<'a, W>,
}
impl<'a, W: Word> Iterator for ChunksMut<'a, W> {
type Item = BitFieldVec<&'a mut [W]>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|chunk| {
let size = Ord::min(self.chunk_size, self.remaining);
let next = unsafe { BitFieldVec::from_raw_parts(chunk, self.bit_width, size) };
self.remaining -= size;
next
})
}
}
impl<'a, W: Word> ExactSizeIterator for ChunksMut<'a, W> where
std::slice::ChunksMut<'a, W>: ExactSizeIterator
{
}
impl<'a, W: Word> FusedIterator for ChunksMut<'a, W> where
std::slice::ChunksMut<'a, W>: FusedIterator
{
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVec<B> {}
impl<W: Word> BitFieldVec<Vec<W>> {
#[must_use]
pub fn new(bit_width: usize, len: usize) -> Self {
let n_of_words = Ord::max(1, (len * bit_width).div_ceil(W::BITS as usize));
Self {
bits: vec![W::ZERO; n_of_words],
bit_width,
mask: mask(bit_width),
len,
}
}
#[must_use]
pub fn with_capacity(bit_width: usize, capacity: usize) -> Self {
let n_of_words = Ord::max(1, (capacity * bit_width).div_ceil(W::BITS as usize));
Self {
bits: Vec::with_capacity(n_of_words),
bit_width,
mask: mask(bit_width),
len: 0,
}
}
pub unsafe fn set_len(&mut self, len: usize) {
debug_assert!(len * self.bit_width <= self.bits.len() * W::BITS as usize);
self.len = len;
}
pub fn clear(&mut self) {
self.len = 0;
}
pub const fn bit_width(&self) -> usize {
debug_assert!(self.bit_width <= W::BITS as usize);
self.bit_width
}
pub fn from_slice<S: BitFieldSlice<Value: Word + PrimitiveNumberAs<W>>>(
slice: &S,
) -> Result<Self> {
let mut max_len: usize = 0;
for i in 0..slice.len() {
max_len = Ord::max(max_len, unsafe {
slice.get_value_unchecked(i).bit_len() as usize
});
}
if max_len > W::BITS as usize {
bail!(
"Cannot convert a slice of bit width {} into a slice with W = {}",
max_len,
std::any::type_name::<W>()
);
}
let mut result = Self::new(max_len, slice.len());
for i in 0..slice.len() {
unsafe { result.set_value_unchecked(i, slice.get_value_unchecked(i).as_to::<W>()) };
}
Ok(result)
}
pub fn push(&mut self, value: W) {
panic_if_value!(value, self.mask, self.bit_width);
if (self.len + 1) * self.bit_width > self.bits.len() * W::BITS as usize {
self.bits.push(W::ZERO);
}
unsafe {
self.set_value_unchecked(self.len, value);
}
self.len += 1;
}
pub fn resize(&mut self, new_len: usize, value: W) {
panic_if_value!(value, self.mask, self.bit_width);
if new_len > self.len {
if new_len * self.bit_width > self.bits.len() * W::BITS as usize {
self.bits.resize(
(new_len * self.bit_width).div_ceil(W::BITS as usize),
W::ZERO,
);
}
for i in self.len..new_len {
unsafe {
self.set_value_unchecked(i, value);
}
}
}
self.len = new_len;
}
pub fn pop(&mut self) -> Option<W> {
if self.len == 0 {
return None;
}
let value = self.index_value(self.len - 1);
self.len -= 1;
Some(value)
}
pub fn into_padded(mut self) -> BitFieldVec<Box<[W]>> {
let needed = (self.len * self.bit_width).div_ceil(W::BITS as usize);
if self.bits.len() <= needed {
self.bits.push(W::ZERO);
}
unsafe {
BitFieldVec::from_raw_parts(self.bits.into_boxed_slice(), self.bit_width, self.len)
}
}
}
impl<W: Word> BitFieldVec<Box<[W]>> {
pub fn new_padded(bit_width: usize, len: usize) -> Self {
let n_of_words = (len * bit_width).div_ceil(W::BITS as usize);
Self {
bits: vec![W::ZERO; n_of_words + 1].into_boxed_slice(),
bit_width,
mask: mask(bit_width),
len,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]> + AsMut<[B::Word]>> BitFieldVec<B> {}
impl<B: Backend<Word: Word>> BitWidth for BitFieldVec<B> {
#[inline(always)]
fn bit_width(&self) -> usize {
debug_assert!(self.bit_width <= B::Word::BITS as usize);
self.bit_width
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> SliceByValue for BitFieldVec<B> {
type Value = B::Word;
#[inline(always)]
fn len(&self) -> usize {
self.len
}
unsafe fn get_value_unchecked(&self, index: usize) -> B::Word {
let bits = B::Word::BITS as usize;
let pos = index * self.bit_width;
let word_index = pos / bits;
let bit_index = pos % bits;
let data = self.bits.as_ref();
unsafe {
if bit_index + self.bit_width <= bits {
(*data.get_unchecked(word_index) >> bit_index) & self.mask
} else {
((*data.get_unchecked(word_index) >> bit_index)
| (*data.get_unchecked(word_index + 1) << (bits - bit_index)))
& self.mask
}
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldSlice for BitFieldVec<B> {
fn as_slice(&self) -> &[Self::Value] {
self.bits.as_ref()
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]> + AsMut<[B::Word]>> BitFieldSliceMut
for BitFieldVec<B>
{
fn reset(&mut self) {
let bit_len = self.len * self.bit_width;
let full_words = bit_len / B::Word::BITS as usize;
let residual = bit_len % B::Word::BITS as usize;
let bits = self.bits.as_mut();
bits[..full_words]
.iter_mut()
.for_each(|x| *x = B::Word::ZERO);
if residual != 0 {
bits[full_words] &= B::Word::MAX << residual;
}
}
#[cfg(feature = "rayon")]
fn par_reset(&mut self) {
let bit_len = self.len * self.bit_width;
let full_words = bit_len / B::Word::BITS as usize;
let residual = bit_len % B::Word::BITS as usize;
let bits = self.bits.as_mut();
bits[..full_words]
.par_iter_mut()
.with_min_len(crate::RAYON_MIN_LEN)
.for_each(|x| *x = B::Word::ZERO);
if residual != 0 {
bits[full_words] &= B::Word::MAX << residual;
}
}
fn as_mut_slice(&mut self) -> &mut [B::Word] {
self.bits.as_mut()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ChunksMutError<W: Word> {
bit_width: usize,
chunk_size: usize,
_marker: core::marker::PhantomData<W>,
}
impl<W: Word> core::fmt::Display for ChunksMutError<W> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"try_chunks_mut needs that the bit width ({}) times the chunk size ({}) is a multiple of W::BITS ({}) to return more than one chunk",
self.bit_width,
self.chunk_size,
W::BITS as usize
)
}
}
impl<W: Word> std::error::Error for ChunksMutError<W> {}
impl<B: Backend<Word: Word> + AsRef<[B::Word]> + AsMut<[B::Word]>> SliceByValueMut
for BitFieldVec<B>
{
#[inline(always)]
fn set_value(&mut self, index: usize, value: B::Word) {
panic_if_out_of_bounds!(index, self.len);
panic_if_value!(value, self.mask, self.bit_width);
unsafe {
self.set_value_unchecked(index, value);
}
}
unsafe fn set_value_unchecked(&mut self, index: usize, value: B::Word) {
let bits = B::Word::BITS as usize;
let pos = index * self.bit_width;
let word_index = pos / bits;
let bit_index = pos % bits;
let data = self.bits.as_mut();
unsafe {
if bit_index + self.bit_width <= bits {
let mut word = *data.get_unchecked_mut(word_index);
word &= !(self.mask << bit_index);
word |= value << bit_index;
*data.get_unchecked_mut(word_index) = word;
} else {
let mut word = *data.get_unchecked_mut(word_index);
word &= (B::Word::ONE << bit_index) - B::Word::ONE;
word |= value << bit_index;
*data.get_unchecked_mut(word_index) = word;
let mut word = *data.get_unchecked_mut(word_index + 1);
word &= !(self.mask >> (bits - bit_index));
word |= value >> (bits - bit_index);
*data.get_unchecked_mut(word_index + 1) = word;
}
}
}
fn replace_value(&mut self, index: usize, value: B::Word) -> B::Word {
panic_if_out_of_bounds!(index, self.len);
panic_if_value!(value, self.mask, self.bit_width);
unsafe { self.replace_value_unchecked(index, value) }
}
unsafe fn replace_value_unchecked(&mut self, index: usize, value: B::Word) -> B::Word {
let bits = B::Word::BITS as usize;
let pos = index * self.bit_width;
let word_index = pos / bits;
let bit_index = pos % bits;
let data = self.bits.as_mut();
unsafe {
if bit_index + self.bit_width <= bits {
let mut word = *data.get_unchecked_mut(word_index);
let old_value = (word >> bit_index) & self.mask;
word &= !(self.mask << bit_index);
word |= value << bit_index;
*data.get_unchecked_mut(word_index) = word;
old_value
} else {
let mut word = *data.get_unchecked_mut(word_index);
let mut old_value = word >> bit_index;
word &= (B::Word::ONE << bit_index) - B::Word::ONE;
word |= value << bit_index;
*data.get_unchecked_mut(word_index) = word;
let mut word = *data.get_unchecked_mut(word_index + 1);
old_value |= word << (bits - bit_index);
word &= !(self.mask >> (bits - bit_index));
word |= value >> (bits - bit_index);
*data.get_unchecked_mut(word_index + 1) = word;
old_value & self.mask
}
}
}
fn copy(&self, from: usize, dst: &mut Self, to: usize, len: usize) {
assert_eq!(
self.bit_width, dst.bit_width,
"Bit widths must be equal (self: {}, dest: {})",
self.bit_width, dst.bit_width
);
let len = Ord::min(Ord::min(len, dst.len - to), self.len - from);
if len == 0 {
return;
}
let bit_width = Ord::min(self.bit_width, dst.bit_width);
let bit_len = len * bit_width;
let src_pos = from * self.bit_width;
let dst_pos = to * dst.bit_width;
let bits = B::Word::BITS as usize;
let src_bit = src_pos % bits;
let dst_bit = dst_pos % bits;
let src_first_word = src_pos / bits;
let dst_first_word = dst_pos / bits;
let src_last_word = (src_pos + bit_len - 1) / bits;
let dst_last_word = (dst_pos + bit_len - 1) / bits;
let source = self.bits.as_ref();
let dest = dst.bits.as_mut();
if src_first_word == src_last_word && dst_first_word == dst_last_word {
let mask = B::Word::MAX >> (bits - bit_len);
let word = (source[src_first_word] >> src_bit) & mask;
dest[dst_first_word] &= !(mask << dst_bit);
dest[dst_first_word] |= word << dst_bit;
} else if src_first_word == src_last_word {
let mask = B::Word::MAX >> (bits - bit_len);
let word = (source[src_first_word] >> src_bit) & mask;
dest[dst_first_word] &= !(mask << dst_bit);
dest[dst_first_word] |= (word & mask) << dst_bit;
dest[dst_last_word] &= !(mask >> (bits - dst_bit));
dest[dst_last_word] |= (word & mask) >> (bits - dst_bit);
} else if dst_first_word == dst_last_word {
let mask = B::Word::MAX >> (bits - bit_len);
let word = ((source[src_first_word] >> src_bit)
| (source[src_last_word] << (bits - src_bit)))
& mask;
dest[dst_first_word] &= !(mask << dst_bit);
dest[dst_first_word] |= word << dst_bit;
} else if src_bit == dst_bit {
let mask = B::Word::MAX << dst_bit;
dest[dst_first_word] &= !mask;
dest[dst_first_word] |= source[src_first_word] & mask;
dest[(1 + dst_first_word)..dst_last_word]
.copy_from_slice(&source[(1 + src_first_word)..src_last_word]);
let residual = bit_len - (bits - src_bit) - (dst_last_word - dst_first_word - 1) * bits;
let mask = B::Word::MAX >> (bits - residual);
dest[dst_last_word] &= !mask;
dest[dst_last_word] |= source[src_last_word] & mask;
} else if src_bit < dst_bit {
let dst_mask = B::Word::MAX << dst_bit;
let src_mask = B::Word::MAX << src_bit;
let shift = dst_bit - src_bit;
dest[dst_first_word] &= !dst_mask;
dest[dst_first_word] |= (source[src_first_word] & src_mask) << shift;
let mut word = source[src_first_word] >> (bits - shift);
for i in 1..dst_last_word - dst_first_word {
dest[dst_first_word + i] = word | (source[src_first_word + i] << shift);
word = source[src_first_word + i] >> (bits - shift);
}
let residual = bit_len - (bits - dst_bit) - (dst_last_word - dst_first_word - 1) * bits;
let mask = B::Word::MAX >> (bits - residual);
dest[dst_last_word] &= !mask;
dest[dst_last_word] |= (word | (source[src_last_word] << shift)) & mask;
} else {
let dst_mask = B::Word::MAX << dst_bit;
let src_mask = B::Word::MAX << src_bit;
let shift = src_bit - dst_bit;
dest[dst_first_word] &= !dst_mask;
dest[dst_first_word] |= (source[src_first_word] & src_mask) >> shift;
dest[dst_first_word] |= source[src_first_word + 1] << (bits - shift);
let mut word = source[src_first_word + 1] >> shift;
for i in 1..dst_last_word - dst_first_word {
word |= source[src_first_word + i + 1] << (bits - shift);
dest[dst_first_word + i] = word;
word = source[src_first_word + i + 1] >> shift;
}
word |= source[src_last_word] << (bits - shift);
let residual = bit_len - (bits - dst_bit) - (dst_last_word - dst_first_word - 1) * bits;
let mask = B::Word::MAX >> (bits - residual);
dest[dst_last_word] &= !mask;
dest[dst_last_word] |= word & mask;
}
}
#[inline]
unsafe fn apply_in_place_unchecked<F>(&mut self, mut f: F)
where
F: FnMut(Self::Value) -> Self::Value,
{
if self.is_empty() {
return;
}
let bit_width = self.bit_width();
if bit_width == 0 {
return;
}
let mask = self.mask;
let number_of_words: usize = self.bits.as_ref().len();
let last_word_idx = number_of_words.saturating_sub(1);
let bits = B::Word::BITS as usize;
let mut write_buffer: B::Word = B::Word::ZERO;
let mut read_buffer: B::Word = *unsafe { self.bits.as_ref().get_unchecked(0) };
if bit_width.is_power_of_two() {
let mut bits_in_buffer = 0;
let mut buffer_limit = (self.len() * bit_width) % bits;
if buffer_limit == 0 {
buffer_limit = bits;
}
for read_idx in 1..number_of_words {
let next_word: B::Word = *unsafe { self.bits.as_ref().get_unchecked(read_idx) };
loop {
let next_bits_in_buffer = bits_in_buffer + bit_width;
if next_bits_in_buffer > bits {
break;
}
let value = read_buffer & mask;
read_buffer >>= bit_width;
let new_value = f(value);
write_buffer |= new_value << bits_in_buffer;
bits_in_buffer = next_bits_in_buffer;
}
debug_assert_eq!(read_buffer, B::Word::ZERO);
*unsafe { self.bits.as_mut().get_unchecked_mut(read_idx - 1) } = write_buffer;
read_buffer = next_word;
write_buffer = B::Word::ZERO;
bits_in_buffer = 0;
}
while bits_in_buffer < buffer_limit {
let value = read_buffer & mask;
read_buffer >>= bit_width;
let new_value = f(value);
write_buffer |= new_value << bits_in_buffer;
bits_in_buffer += bit_width;
}
*unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
return;
}
let mut global_bit_index: usize = 0;
let mut lower_word_limit = 0;
let mut upper_word_limit = bits;
for word_number in 0..last_word_idx {
while global_bit_index + bit_width <= upper_word_limit {
let offset = global_bit_index - lower_word_limit;
global_bit_index += bit_width;
let element = self.mask & (read_buffer >> offset);
let new_element = f(element);
write_buffer |= new_element << offset;
}
let next_word = *unsafe { self.bits.as_ref().get_unchecked(word_number + 1) };
let mut new_write_buffer = B::Word::ZERO;
if upper_word_limit != global_bit_index {
let remainder = upper_word_limit - global_bit_index;
let offset = global_bit_index - lower_word_limit;
let element = ((read_buffer >> offset) | (next_word << remainder)) & self.mask;
global_bit_index += bit_width;
let new_element = f(element);
write_buffer |= new_element << offset;
new_write_buffer = new_element >> remainder;
};
read_buffer = next_word;
*unsafe { self.bits.as_mut().get_unchecked_mut(word_number) } = write_buffer;
write_buffer = new_write_buffer;
lower_word_limit = upper_word_limit;
upper_word_limit += bits;
}
let mut offset = global_bit_index - lower_word_limit;
while offset < self.len() * bit_width - global_bit_index {
let element = self.mask & (read_buffer >> offset);
let new_element = f(element);
write_buffer |= new_element << offset;
offset += bit_width;
}
*unsafe { self.bits.as_mut().get_unchecked_mut(last_word_idx) } = write_buffer;
}
type ChunksMut<'a>
= ChunksMut<'a, B::Word>
where
Self: 'a;
type ChunksMutError = ChunksMutError<B::Word>;
fn try_chunks_mut(
&mut self,
chunk_size: usize,
) -> Result<Self::ChunksMut<'_>, ChunksMutError<B::Word>> {
let len = self.len();
let bit_width = self.bit_width();
let bits = B::Word::BITS as usize;
if len <= chunk_size || (chunk_size * bit_width) % bits == 0 {
let words_per_chunk = Ord::max(1, (chunk_size * bit_width).div_ceil(bits));
Ok(ChunksMut {
remaining: len,
bit_width: self.bit_width,
chunk_size,
iter: self.bits.as_mut()[..(len * bit_width).div_ceil(bits)]
.chunks_mut(words_per_chunk),
})
} else {
Err(ChunksMutError {
bit_width,
chunk_size,
_marker: core::marker::PhantomData,
})
}
}
}
#[derive(Debug, Clone, MemSize, MemDbg)]
pub struct BitFieldVecUncheckedIter<'a, B: Backend<Word: Word>> {
vec: &'a BitFieldVec<B>,
word_index: usize,
window: B::Word,
fill: usize,
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVecUncheckedIter<'a, B> {
fn new(vec: &'a BitFieldVec<B>, index: usize) -> Self {
if index > vec.len() {
panic!("Start index out of bounds: {} > {}", index, vec.len());
}
let bits = B::Word::BITS as usize;
let (fill, word_index);
let window = if index == vec.len() {
word_index = 0;
fill = 0;
B::Word::ZERO
} else {
let bit_offset = index * vec.bit_width;
let bit_index = bit_offset % bits;
word_index = bit_offset / bits;
fill = bits - bit_index;
unsafe {
*vec.bits.as_ref().get_unchecked(word_index) >> bit_index
}
};
Self {
vec,
word_index,
window,
fill,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> crate::traits::UncheckedIterator
for BitFieldVecUncheckedIter<'_, B>
{
type Item = B::Word;
unsafe fn next_unchecked(&mut self) -> B::Word {
let bit_width = self.vec.bit_width;
if self.fill >= bit_width {
self.fill -= bit_width;
let res = self.window & self.vec.mask;
self.window >>= bit_width;
return res;
}
let res = self.window;
self.word_index += 1;
self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
let res = (res | (self.window << self.fill)) & self.vec.mask;
let used = bit_width - self.fill;
self.window >>= used;
self.fill = B::Word::BITS as usize - used; res
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoUncheckedIterator for &'a BitFieldVec<B> {
type Item = B::Word;
type IntoUncheckedIter = BitFieldVecUncheckedIter<'a, B>;
fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
BitFieldVecUncheckedIter::new(self, from)
}
}
#[derive(Debug, Clone, MemSize, MemDbg)]
pub struct BitFieldVecUncheckedBackIter<'a, B: Backend<Word: Word>> {
vec: &'a BitFieldVec<B>,
word_index: usize,
window: B::Word,
fill: usize,
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVecUncheckedBackIter<'a, B> {
fn new(vec: &'a BitFieldVec<B>, index: usize) -> Self {
if index > vec.len() {
panic!("Start index out of bounds: {} > {}", index, vec.len());
}
let bits = B::Word::BITS as usize;
let (word_index, fill);
let window = if index == 0 {
word_index = 0;
fill = 0;
B::Word::ZERO
} else {
let bit_offset = (index * vec.bit_width).saturating_sub(1);
let bit_index = bit_offset % bits;
word_index = bit_offset / bits;
fill = bit_index + 1;
unsafe {
*vec.bits.as_ref().get_unchecked(word_index) << (bits - fill)
}
};
Self {
vec,
word_index,
window,
fill,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> crate::traits::UncheckedIterator
for BitFieldVecUncheckedBackIter<'_, B>
{
type Item = B::Word;
unsafe fn next_unchecked(&mut self) -> B::Word {
let bit_width = self.vec.bit_width;
if self.fill >= bit_width {
self.fill -= bit_width;
self.window = self.window.rotate_left(bit_width as u32);
return self.window & self.vec.mask;
}
let mut res = self.window.rotate_left(self.fill as u32);
self.word_index -= 1;
self.window = *unsafe { self.vec.bits.as_ref().get_unchecked(self.word_index) };
let used = bit_width - self.fill;
res = ((res << used) | (self.window >> (B::Word::BITS as usize - used))) & self.vec.mask; self.window <<= used;
self.fill = B::Word::BITS as usize - used;
res
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoUncheckedBackIterator
for &'a BitFieldVec<B>
{
type Item = B::Word;
type IntoUncheckedIterBack = BitFieldVecUncheckedBackIter<'a, B>;
fn into_unchecked_iter_back(self) -> Self::IntoUncheckedIterBack {
BitFieldVecUncheckedBackIter::new(self, self.len())
}
fn into_unchecked_iter_back_from(self, from: usize) -> Self::IntoUncheckedIterBack {
BitFieldVecUncheckedBackIter::new(self, from)
}
}
#[derive(Debug, Clone, MemSize, MemDbg)]
pub struct BitFieldVecIter<'a, B: Backend<Word: Word>> {
unchecked: BitFieldVecUncheckedIter<'a, B>,
range: core::ops::Range<usize>,
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVecIter<'a, B> {
fn new(vec: &'a BitFieldVec<B>, from: usize) -> Self {
let len = vec.len();
if from > len {
panic!("Start index out of bounds: {} > {}", from, len);
}
Self {
unchecked: BitFieldVecUncheckedIter::new(vec, from),
range: from..len,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> Iterator for BitFieldVecIter<'_, B> {
type Item = B::Word;
fn next(&mut self) -> Option<Self::Item> {
if self.range.is_empty() {
return None;
}
let res = unsafe { self.unchecked.next_unchecked() };
self.range.start += 1;
Some(res)
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.range.len(), Some(self.range.len()))
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> ExactSizeIterator for BitFieldVecIter<'_, B> {
#[inline(always)]
fn len(&self) -> usize {
self.range.len()
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> FusedIterator for BitFieldVecIter<'_, B> {}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> DoubleEndedIterator for BitFieldVecIter<'_, B> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.range.is_empty() {
return None;
}
self.range.end -= 1;
let res = unsafe { self.unchecked.vec.get_value_unchecked(self.range.end) };
Some(res)
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>, C: Backend<Word = B::Word> + AsRef<[B::Word]>>
PartialEq<BitFieldVec<C>> for BitFieldVec<B>
{
fn eq(&self, other: &BitFieldVec<C>) -> bool {
if self.bit_width() != other.bit_width() {
return false;
}
if self.len() != other.len() {
return false;
}
let bits = B::Word::BITS as usize;
let bit_len = self.len() * self.bit_width();
if self.bits.as_ref()[..bit_len / bits] != other.bits.as_ref()[..bit_len / bits] {
return false;
}
let residual = bit_len % bits;
residual == 0
|| (self.bits.as_ref()[bit_len / bits] ^ other.bits.as_ref()[bit_len / bits])
<< (bits - residual)
== B::Word::ZERO
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> Eq for BitFieldVec<B> {}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> std::hash::Hash for BitFieldVec<B> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.bit_width().hash(state);
self.len().hash(state);
let bits = B::Word::BITS as usize;
let bit_len = self.len() * self.bit_width();
let full_words = bit_len / bits;
self.bits.as_ref()[..full_words].hash(state);
let residual = bit_len % bits;
if residual != 0 {
(self.bits.as_ref()[full_words] << (bits - residual)).hash(state);
}
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoIterator for &'a BitFieldVec<B> {
type Item = B::Word;
type IntoIter = BitFieldVecIter<'a, B>;
fn into_iter(self) -> Self::IntoIter {
BitFieldVecIter::new(self, 0)
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoIteratorFrom for &'a BitFieldVec<B> {
type IntoIterFrom = BitFieldVecIter<'a, B>;
fn into_iter_from(self, from: usize) -> Self::IntoIterFrom {
BitFieldVecIter::new(self, from)
}
}
impl<W: Word> core::iter::Extend<W> for BitFieldVec<Vec<W>> {
fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) {
for value in iter {
self.push(value);
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldVec<B> {
pub fn iter_from(&self, from: usize) -> BitFieldVecIter<'_, B> {
BitFieldVecIter::new(self, from)
}
pub fn iter(&self) -> BitFieldVecIter<'_, B> {
self.iter_from(0)
}
}
#[derive(Debug, Clone, Hash, MemSize, MemDbg, Delegate)]
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
#[cfg_attr(
feature = "epserde",
epserde(bound(
deser = "for<'a> <B as epserde::deser::DeserInner>::DeserType<'a>: Backend<Word = B::Word>"
))
)]
#[delegate(crate::traits::Backend, target = "bits")]
pub struct AtomicBitFieldVec<
B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> = Vec<Atomic<usize>>,
> {
bits: B,
bit_width: usize,
mask: <B::Word as PrimitiveAtomic>::Value,
len: usize,
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>>> AtomicBitFieldVec<B> {
#[inline(always)]
#[must_use]
pub unsafe fn from_raw_parts(bits: B, bit_width: usize, len: usize) -> Self {
Self {
bits,
bit_width,
mask: mask(bit_width),
len,
}
}
#[inline(always)]
pub fn into_raw_parts(self) -> (B, usize, usize) {
(self.bits, self.bit_width, self.len)
}
pub const fn mask(&self) -> <B::Word as PrimitiveAtomic>::Value {
self.mask
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>>
AtomicBitFieldVec<B>
{
pub fn as_slice(&self) -> &[B::Word] {
self.bits.as_ref()
}
}
impl<A: PrimitiveAtomicUnsigned<Value: Word>> AtomicBitFieldVec<Vec<A>> {
#[must_use]
pub fn new(bit_width: usize, len: usize) -> AtomicBitFieldVec<Vec<A>> {
let n_of_words = Ord::max(1, (len * bit_width).div_ceil(A::Value::BITS as usize));
AtomicBitFieldVec {
bits: (0..n_of_words).map(|_| A::new(A::Value::ZERO)).collect(),
bit_width,
mask: mask(bit_width),
len,
}
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>>> AtomicBitWidth
for AtomicBitFieldVec<B>
{
#[inline(always)]
fn atomic_bit_width(&self) -> usize {
debug_assert!(self.bit_width <= <B::Word as PrimitiveAtomic>::Value::BITS as usize);
self.bit_width
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>> SliceByValue
for AtomicBitFieldVec<B>
{
type Value = <B::Word as PrimitiveAtomic>::Value;
#[inline(always)]
fn len(&self) -> usize {
self.len
}
unsafe fn get_value_unchecked(&self, index: usize) -> Self::Value {
unsafe { self.get_atomic_unchecked(index, Ordering::Relaxed) }
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>>
AtomicBitFieldSlice<B::Word> for AtomicBitFieldVec<B>
{
#[inline]
fn len(&self) -> usize {
self.len
}
#[inline]
unsafe fn get_atomic_unchecked(
&self,
index: usize,
order: Ordering,
) -> <B::Word as PrimitiveAtomic>::Value {
let wbits = <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let pos = index * self.bit_width;
let word_index = pos / wbits;
let bit_index = pos % wbits;
let data = self.bits.as_ref();
unsafe {
if bit_index + self.bit_width <= wbits {
(data.get_unchecked(word_index).load(order) >> bit_index) & self.mask
} else {
((data.get_unchecked(word_index).load(order) >> bit_index)
| (data.get_unchecked(word_index + 1).load(order) << (wbits - bit_index)))
& self.mask
}
}
}
#[inline(always)]
fn set_atomic(
&self,
index: usize,
value: <B::Word as PrimitiveAtomic>::Value,
order: Ordering,
) {
panic_if_out_of_bounds!(index, self.len);
panic_if_value!(value, self.mask, self.bit_width);
unsafe {
self.set_atomic_unchecked(index, value, order);
}
}
#[inline]
unsafe fn set_atomic_unchecked(
&self,
index: usize,
value: <B::Word as PrimitiveAtomic>::Value,
order: Ordering,
) {
unsafe {
let wbits = <B::Word as PrimitiveAtomic>::Value::BITS as usize;
debug_assert!(self.bit_width <= wbits);
let pos = index * self.bit_width;
let word_index = pos / wbits;
let bit_index = pos % wbits;
let data = self.bits.as_ref();
if bit_index + self.bit_width <= wbits {
let mut current = data.get_unchecked(word_index).load(order);
loop {
let mut new = current;
new &= !(self.mask << bit_index);
new |= value << bit_index;
match data
.get_unchecked(word_index)
.compare_exchange(current, new, order, order)
{
Ok(_) => break,
Err(e) => current = e,
}
}
} else {
let mut word = data.get_unchecked(word_index).load(order);
fence(Ordering::Acquire);
loop {
let mut new = word;
new &= (<B::Word as PrimitiveAtomic>::Value::ONE << bit_index)
- <B::Word as PrimitiveAtomic>::Value::ONE;
new |= value << bit_index;
match data
.get_unchecked(word_index)
.compare_exchange(word, new, order, order)
{
Ok(_) => break,
Err(e) => word = e,
}
}
fence(Ordering::Release);
compiler_fence(Ordering::SeqCst);
let mut word = data.get_unchecked(word_index + 1).load(order);
fence(Ordering::Acquire);
loop {
let mut new = word;
new &= !(self.mask >> (wbits - bit_index));
new |= value >> (wbits - bit_index);
match data
.get_unchecked(word_index + 1)
.compare_exchange(word, new, order, order)
{
Ok(_) => break,
Err(e) => word = e,
}
}
fence(Ordering::Release);
}
}
}
fn reset_atomic(&mut self, ordering: Ordering) {
let bit_len = self.len * self.bit_width;
let full_words = bit_len / <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let residual = bit_len % <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let bits = self.bits.as_ref();
bits[..full_words]
.iter()
.for_each(|x| x.store(<B::Word as PrimitiveAtomic>::Value::ZERO, ordering));
if residual != 0 {
bits[full_words].fetch_and(
<B::Word as PrimitiveAtomic>::Value::MAX << residual,
ordering,
);
}
}
#[cfg(feature = "rayon")]
fn par_reset_atomic(&mut self, ordering: Ordering) {
let bit_len = self.len * self.bit_width;
let full_words = bit_len / <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let residual = bit_len % <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let bits = self.bits.as_ref();
bits[..full_words]
.par_iter()
.with_min_len(crate::RAYON_MIN_LEN)
.for_each(|x| x.store(<B::Word as PrimitiveAtomic>::Value::ZERO, ordering));
if residual != 0 {
bits[full_words].fetch_and(
<B::Word as PrimitiveAtomic>::Value::MAX << residual,
ordering,
);
}
}
}
impl<W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>>
From<AtomicBitFieldVec<Vec<W::Atomic>>> for BitFieldVec<Vec<W>>
{
#[inline]
fn from(value: AtomicBitFieldVec<Vec<W::Atomic>>) -> Self {
BitFieldVec {
bits: transmute_vec_from_atomic(value.bits),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>>
From<AtomicBitFieldVec<Box<[W::Atomic]>>> for BitFieldVec<Box<[W]>>
{
#[inline]
fn from(value: AtomicBitFieldVec<Box<[W::Atomic]>>) -> Self {
BitFieldVec {
bits: transmute_boxed_slice_from_atomic(value.bits),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<'a, W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>>
From<AtomicBitFieldVec<&'a [W::Atomic]>> for BitFieldVec<&'a [W]>
{
#[inline]
fn from(value: AtomicBitFieldVec<&'a [W::Atomic]>) -> Self {
BitFieldVec {
bits: unsafe { core::mem::transmute::<&'a [W::Atomic], &'a [W]>(value.bits) },
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<'a, W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>>
From<AtomicBitFieldVec<&'a mut [W::Atomic]>> for BitFieldVec<&'a mut [W]>
{
#[inline]
fn from(value: AtomicBitFieldVec<&'a mut [W::Atomic]>) -> Self {
BitFieldVec {
bits: unsafe { core::mem::transmute::<&'a mut [W::Atomic], &'a mut [W]>(value.bits) },
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>> From<BitFieldVec<Vec<W>>>
for AtomicBitFieldVec<Vec<W::Atomic>>
{
#[inline]
fn from(value: BitFieldVec<Vec<W>>) -> Self {
AtomicBitFieldVec {
bits: transmute_vec_into_atomic(value.bits),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>> From<BitFieldVec<Box<[W]>>>
for AtomicBitFieldVec<Box<[W::Atomic]>>
{
#[inline]
fn from(value: BitFieldVec<Box<[W]>>) -> Self {
AtomicBitFieldVec {
bits: transmute_boxed_slice_into_atomic(value.bits),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<'a, W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>> TryFrom<BitFieldVec<&'a [W]>>
for AtomicBitFieldVec<&'a [W::Atomic]>
{
type Error = CannotCastToAtomicError<W>;
#[inline]
fn try_from(value: BitFieldVec<&'a [W]>) -> Result<Self, Self::Error> {
if core::mem::align_of::<W::Atomic>() != core::mem::align_of::<W>() {
return Err(CannotCastToAtomicError::default());
}
Ok(AtomicBitFieldVec {
bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::Atomic]>(value.bits) },
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
})
}
}
impl<'a, W: Word + AtomicPrimitive<Atomic: PrimitiveAtomicUnsigned>>
TryFrom<BitFieldVec<&'a mut [W]>> for AtomicBitFieldVec<&'a mut [W::Atomic]>
{
type Error = CannotCastToAtomicError<W>;
#[inline]
fn try_from(value: BitFieldVec<&'a mut [W]>) -> Result<Self, Self::Error> {
if core::mem::align_of::<W::Atomic>() != core::mem::align_of::<W>() {
return Err(CannotCastToAtomicError::default());
}
Ok(AtomicBitFieldVec {
bits: unsafe { core::mem::transmute::<&'a mut [W], &'a mut [W::Atomic]>(value.bits) },
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
})
}
}
impl<W: Word> From<BitFieldVec<Vec<W>>> for BitFieldVec<Box<[W]>> {
fn from(value: BitFieldVec<Vec<W>>) -> Self {
BitFieldVec {
bits: value.bits.into_boxed_slice(),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<W: Word> From<BitFieldVec<Box<[W]>>> for BitFieldVec<Vec<W>> {
fn from(value: BitFieldVec<Box<[W]>>) -> Self {
BitFieldVec {
bits: value.bits.into_vec(),
len: value.len,
bit_width: value.bit_width,
mask: value.mask,
}
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueGat<'a>
for BitFieldVec<B>
{
type Item = B::Word;
type Iter = BitFieldVecIter<'a, B>;
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValue
for BitFieldVec<B>
{
fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.iter_from(0)
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueFromGat<'a>
for BitFieldVec<B>
{
type Item = B::Word;
type IterFrom = BitFieldVecIter<'a, B>;
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueFrom
for BitFieldVec<B>
{
fn iter_value_from(
&self,
from: usize,
) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.iter_from(from)
}
}
impl<'a, 'b, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueGat<'a>
for BitFieldVecSubsliceImpl<'b, B>
{
type Item = B::Word;
type Iter = BitFieldVecIter<'a, B>;
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValue
for BitFieldVecSubsliceImpl<'a, B>
{
fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.slice.iter_from(0)
}
}
impl<'a, 'b, B: Backend<Word: Word> + AsRef<[B::Word]>>
value_traits::iter::IterateByValueFromGat<'a> for BitFieldVecSubsliceImpl<'b, B>
{
type Item = B::Word;
type IterFrom = BitFieldVecIter<'a, B>;
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueFrom
for BitFieldVecSubsliceImpl<'a, B>
{
fn iter_value_from(
&self,
from: usize,
) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.slice.iter_from(from)
}
}
impl<'a, 'b, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueGat<'a>
for BitFieldVecSubsliceImplMut<'b, B>
{
type Item = B::Word;
type Iter = BitFieldVecIter<'a, B>;
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValue
for BitFieldVecSubsliceImplMut<'a, B>
{
fn iter_value(&self) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.slice.iter_from(0)
}
}
impl<'a, 'b, B: Backend<Word: Word> + AsRef<[B::Word]>>
value_traits::iter::IterateByValueFromGat<'a> for BitFieldVecSubsliceImplMut<'b, B>
{
type Item = B::Word;
type IterFrom = BitFieldVecIter<'a, B>;
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> value_traits::iter::IterateByValueFrom
for BitFieldVecSubsliceImplMut<'a, B>
{
fn iter_value_from(
&self,
from: usize,
) -> <Self as value_traits::iter::IterateByValueGat<'_>>::Iter {
self.slice.iter_from(from)
}
}
#[derive(Debug, Clone, MemSize, MemDbg, value_traits::Subslices)]
#[value_traits_subslices(bound = "B: AsRef<[B::Word]>")]
#[value_traits_subslices(bound = "B::Word: Word")]
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
#[repr(transparent)]
#[cfg_attr(
feature = "epserde",
epserde(bound(
deser = "B: Backend + epserde::deser::DeserInner, for<'a> <B as epserde::deser::DeserInner>::DeserType<'a>: Backend<Word = B::Word>"
))
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "serde",
serde(bound(
serialize = "B: Backend + serde::Serialize, B::Word: serde::Serialize",
deserialize = "B: Backend + serde::Deserialize<'de>, B::Word: serde::Deserialize<'de>"
))
)]
pub struct BitFieldVecU<B: Backend<Word: Word> = Vec<usize>>(BitFieldVec<B>);
impl<B: Backend<Word: Word>> BitFieldVecU<B> {
pub const fn mask(&self) -> B::Word {
self.0.mask()
}
}
impl<B: Backend<Word: Word>> Backend for BitFieldVecU<B> {
type Word = B::Word;
}
impl<B: Backend<Word: Word>> BitWidth for BitFieldVecU<B> {
#[inline(always)]
fn bit_width(&self) -> usize {
self.0.bit_width()
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitFieldSlice for BitFieldVecU<B> {
fn as_slice(&self) -> &[Self::Value] {
self.0.as_slice()
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> AsRef<[B::Word]> for BitFieldVecU<B> {
fn as_ref(&self) -> &[B::Word] {
self.0.bits.as_ref()
}
}
impl<W: Word> crate::traits::TryIntoUnaligned for Box<[W]> {
type Unaligned = Box<[W]>;
fn try_into_unaligned(
self,
) -> Result<Self::Unaligned, crate::traits::UnalignedConversionError> {
Ok(self)
}
}
impl<W: Word> crate::traits::TryIntoUnaligned for Vec<W> {
type Unaligned = Vec<W>;
fn try_into_unaligned(
self,
) -> Result<Self::Unaligned, crate::traits::UnalignedConversionError> {
Ok(self)
}
}
impl<W: Word, const N: usize> crate::traits::TryIntoUnaligned for [W; N] {
type Unaligned = [W; N];
fn try_into_unaligned(
self,
) -> Result<Self::Unaligned, crate::traits::UnalignedConversionError> {
Ok(self)
}
}
impl<W: Word> crate::traits::TryIntoUnaligned for BitFieldVec<Box<[W]>> {
type Unaligned = BitFieldVecU<Box<[W]>>;
fn try_into_unaligned(
self,
) -> Result<Self::Unaligned, crate::traits::UnalignedConversionError> {
let bw = self.bit_width();
ensure_unaligned!(W, bw);
let needed = (SliceByValue::len(&self) * bw).div_ceil(W::BITS as usize);
if self.as_slice().len() > needed {
Ok(BitFieldVecU(self))
} else {
let (raw_bits, bit_width, len) = self.into_raw_parts();
let mut v = raw_bits.into_vec();
v.reserve_exact(1);
v.push(W::ZERO);
Ok(BitFieldVecU(unsafe {
BitFieldVec::from_raw_parts(v.into_boxed_slice(), bit_width, len)
}))
}
}
}
impl<W: Word> From<BitFieldVecU<Box<[W]>>> for BitFieldVec<Box<[W]>> {
fn from(unaligned: BitFieldVecU<Box<[W]>>) -> Self {
unaligned.0
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> SliceByValue for BitFieldVecU<B> {
type Value = B::Word;
#[inline(always)]
fn len(&self) -> usize {
SliceByValue::len(&self.0)
}
#[inline(always)]
unsafe fn get_value_unchecked(&self, index: usize) -> B::Word {
unsafe { self.0.get_unaligned_unchecked(index) }
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoUncheckedIterator for &'a BitFieldVecU<B> {
type Item = B::Word;
type IntoUncheckedIter = BitFieldVecUncheckedIter<'a, B>;
fn into_unchecked_iter_from(self, from: usize) -> Self::IntoUncheckedIter {
BitFieldVecUncheckedIter::new(&self.0, from)
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoUncheckedBackIterator
for &'a BitFieldVecU<B>
{
type Item = B::Word;
type IntoUncheckedIterBack = BitFieldVecUncheckedBackIter<'a, B>;
fn into_unchecked_iter_back(self) -> Self::IntoUncheckedIterBack {
BitFieldVecUncheckedBackIter::new(&self.0, SliceByValue::len(&self.0))
}
fn into_unchecked_iter_back_from(self, from: usize) -> Self::IntoUncheckedIterBack {
BitFieldVecUncheckedBackIter::new(&self.0, from)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_with_capacity() {
let mut b = BitFieldVec::<Vec<usize>>::with_capacity(10, 100);
let capacity = b.bits.capacity();
for _ in 0..100 {
b.push(0);
}
assert_eq!(b.bits.capacity(), capacity);
}
fn copy<
B: Backend<Word: Word> + AsRef<[B::Word]>,
C: Backend<Word = B::Word> + AsRef<[B::Word]> + AsMut<[B::Word]>,
>(
source: &BitFieldVec<B>,
from: usize,
dest: &mut BitFieldVec<C>,
to: usize,
len: usize,
) {
let len = Ord::min(Ord::min(len, dest.len - to), source.len - from);
for i in 0..len {
dest.set_value(to + i, source.index_value(from + i));
}
}
#[test]
fn test_copy() {
for src_pattern in 0..8 {
for dst_pattern in 0..8 {
let source = bit_field_vec![3 => src_pattern; 100];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
let mut dest_expected = dest_actual.clone();
source.copy(1, &mut dest_actual, 2, 10);
copy(&source, 1, &mut dest_expected, 2, 10);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 100];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
let mut dest_expected = dest_actual.clone();
source.copy(1, &mut dest_actual, 20, 10);
copy(&source, 1, &mut dest_expected, 20, 10);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 100];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 100];
let mut dest_expected = dest_actual.clone();
source.copy(20, &mut dest_actual, 1, 10);
copy(&source, 20, &mut dest_expected, 1, 10);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(3, &mut dest_actual, 3 + 3 * 128, 40);
copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 40);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(3, &mut dest_actual, 3 + 3 * 128, 61);
copy(&source, 3, &mut dest_expected, 3 + 3 * 128, 61);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(3, &mut dest_actual, 7 + 64 * 3, 40);
copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(3, &mut dest_actual, 7 + 64 * 3, 40 + 17);
copy(&source, 3, &mut dest_expected, 7 + 64 * 3, 40 + 17);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 64);
copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 64);
assert_eq!(dest_actual, dest_expected);
let source = bit_field_vec![3 => src_pattern; 1000];
let mut dest_actual = bit_field_vec![3 => dst_pattern; 1000];
let mut dest_expected = dest_actual.clone();
source.copy(7, &mut dest_actual, 3 + 64 * 3, 40 + 21 + 64);
copy(&source, 7, &mut dest_expected, 3 + 64 * 3, 40 + 21 + 64);
assert_eq!(dest_actual, dest_expected);
}
}
}
}