#[macro_use]
pub mod macros;
pub mod builder;
pub mod iter;
pub mod iter_mut;
#[cfg(feature = "parallel")]
pub mod parallel;
pub mod proxy;
pub mod slice;
pub mod traits;
pub mod atomic;
#[cfg(feature = "serde")]
mod serde;
use dsi_bitstream::{
prelude::Endianness,
traits::{BE, LE},
};
use mem_dbg::{MemDbg, MemSize};
use num_traits::ToPrimitive;
use std::{error::Error as StdError, fmt, iter::FromIterator, marker::PhantomData};
use traits::{Storable, Word};
use crate::fixed::{proxy::MutProxy, traits::DefaultParams};
pub use atomic::{AtomicFixedVec, SAtomicFixedVec, UAtomicFixedVec};
pub type UFixedVec<T, B = Vec<usize>> = FixedVec<T, usize, LE, B>;
pub type SFixedVec<T, B = Vec<usize>> = FixedVec<T, usize, LE, B>;
pub type LEFixedVec<B = Vec<u64>> = FixedVec<u64, u64, LE, B>;
pub type LESFixedVec<B = Vec<u64>> = FixedVec<i64, u64, LE, B>;
pub type BEFixedVec<B = Vec<u64>> = FixedVec<u64, u64, BE, B>;
pub type BESFixedVec<B = Vec<u64>> = FixedVec<i64, u64, BE, B>;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub enum BitWidth {
#[default]
Minimal,
PowerOfTwo,
Explicit(usize),
}
#[derive(Debug)]
pub enum Error {
ValueTooLarge {
value: u128,
index: usize,
bit_width: usize,
},
InvalidParameters(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::ValueTooLarge {
value,
index,
bit_width,
} => write!(
f,
"value {value} at index {index} does not fit in {bit_width} bits"
),
Error::InvalidParameters(s) => write!(f, "Invalid parameters: {s}"),
}
}
}
impl StdError for Error {}
#[derive(Debug, Clone, MemDbg, MemSize)]
pub struct FixedVec<T: Storable<W>, W: Word, E: Endianness, B: AsRef<[W]> = Vec<W>> {
pub(crate) bits: B,
pub(crate) bit_width: usize,
pub(crate) mask: W,
pub(crate) len: usize,
pub(crate) _phantom: PhantomData<(T, W, E)>,
}
impl<T, W, E> FixedVec<T, W, E, Vec<W>>
where
T: Storable<W>,
W: Word,
E: Endianness,
dsi_bitstream::impls::BufBitWriter<E, dsi_bitstream::impls::MemWordWriterVec<W, Vec<W>>>:
dsi_bitstream::prelude::BitWrite<E, Error = std::convert::Infallible>,
{
pub fn builder() -> builder::FixedVecBuilder<T, W, E> {
builder::FixedVecBuilder::new()
}
pub fn from_iter_builder<I: IntoIterator<Item = T>>(
iter: I,
bit_width: usize,
) -> builder::FixedVecFromIterBuilder<T, W, E, I> {
builder::FixedVecFromIterBuilder::new(iter, bit_width)
}
pub fn from_slice(slice: &[T]) -> Result<Self, Error> {
Self::builder().build(slice)
}
}
impl<T, W, E, B> FixedVec<T, W, E, B>
where
T: Storable<W>,
W: Word,
E: Endianness,
B: AsRef<[W]>,
{
pub fn from_parts(bits: B, len: usize, bit_width: usize) -> Result<Self, Error> {
if bit_width > <W as traits::Word>::BITS {
return Err(Error::InvalidParameters(format!(
"bit_width ({}) cannot be greater than the word size ({})",
bit_width,
<W as traits::Word>::BITS
)));
}
let total_bits = len * bit_width;
let data_words = total_bits.div_ceil(<W as traits::Word>::BITS);
if bits.as_ref().len() < data_words + 1 {
return Err(Error::InvalidParameters(format!(
"The provided buffer is too small. It has {} words, but {} data words + 1 padding word are required.",
bits.as_ref().len(),
data_words
)));
}
Ok(unsafe { Self::new_unchecked(bits, len, bit_width) })
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn bit_width(&self) -> usize {
self.bit_width
}
#[inline]
pub fn as_limbs(&self) -> &[W] {
self.bits.as_ref()
}
pub fn as_raw_parts(&self) -> (*const W, usize) {
let slice = self.bits.as_ref();
(slice.as_ptr(), slice.len())
}
pub(crate) unsafe fn new_unchecked(bits: B, len: usize, bit_width: usize) -> Self {
let mask = if bit_width == <W as traits::Word>::BITS {
W::max_value()
} else {
(W::ONE << bit_width) - W::ONE
};
Self {
bits,
len,
bit_width,
mask,
_phantom: PhantomData,
}
}
#[inline]
pub fn get(&self, index: usize) -> Option<T> {
if index >= self.len {
return None;
}
Some(unsafe { self.get_unchecked(index) })
}
#[inline(always)]
pub unsafe fn get_unchecked(&self, index: usize) -> T {
debug_assert!(index < self.len);
let bits_per_word = <W as traits::Word>::BITS;
if self.bit_width == bits_per_word {
let val = unsafe { *self.as_limbs().get_unchecked(index) };
let final_val = if E::IS_BIG { val.to_be() } else { val };
return <T as Storable<W>>::from_word(final_val);
}
let bit_pos = index * self.bit_width;
let word_index = bit_pos / bits_per_word;
let bit_offset = bit_pos % bits_per_word;
let limbs = self.as_limbs();
let final_word: W;
if E::IS_LITTLE {
if bit_offset + self.bit_width <= bits_per_word {
final_word =
unsafe { (*limbs.get_unchecked(word_index) >> bit_offset) & self.mask };
} else {
let low = unsafe { *limbs.get_unchecked(word_index) >> bit_offset };
let high =
unsafe { *limbs.get_unchecked(word_index + 1) << (bits_per_word - bit_offset) };
final_word = (low | high) & self.mask;
}
} else {
let word_hi = unsafe { (*limbs.get_unchecked(word_index)).to_be() };
if bit_offset + self.bit_width <= bits_per_word {
final_word = (word_hi << bit_offset) >> (bits_per_word - self.bit_width);
} else {
let word_lo = unsafe { (*limbs.get_unchecked(word_index + 1)).to_be() };
let bits_in_first = bits_per_word - bit_offset;
let high = word_hi << bit_offset >> (bits_per_word - bits_in_first);
let low = word_lo >> (bits_per_word - (self.bit_width - bits_in_first));
final_word = (high << (self.bit_width - bits_in_first)) | low;
}
}
<T as Storable<W>>::from_word(final_word)
}
#[inline]
pub fn get_unaligned(&self, index: usize) -> Option<T> {
if index >= self.len {
return None;
}
unsafe {
let bits_per_word = <W as Word>::BITS;
let bit_width = self.bit_width;
let is_safe = (bit_width <= bits_per_word.saturating_sub(6)) || (bit_width == bits_per_word.saturating_sub(4)) || (bit_width == bits_per_word);
let value = if is_safe {
self.get_unaligned_unchecked(index)
} else {
self.get_unchecked(index)
};
Some(value)
}
}
#[inline(always)]
pub unsafe fn get_unaligned_unchecked(&self, index: usize) -> T {
debug_assert!(index < self.len);
if E::IS_LITTLE {
let bits_per_word = <W as Word>::BITS;
let bit_width = self.bit_width;
if bit_width == bits_per_word {
return unsafe { self.get_unchecked(index) };
}
debug_assert!(
{
let is_safe_contiguous = bit_width <= bits_per_word.saturating_sub(6); let is_safe_case_60 = bit_width == bits_per_word.saturating_sub(4); is_safe_contiguous || is_safe_case_60
},
"get_unaligned_unchecked is not safe for this bit_width ({bit_width}). \
The value may span more than {bits_per_word} bits, making a single read insufficient. \
Use get_unaligned() for a safe version with an automatic fallback."
);
let bit_pos = index * bit_width;
let byte_pos = bit_pos / 8;
let bit_rem = bit_pos % 8;
let limbs_ptr = self.as_limbs().as_ptr() as *const u8;
let word: W = unsafe { (limbs_ptr.add(byte_pos) as *const W).read_unaligned() };
let extracted_word = word >> bit_rem;
<T as Storable<W>>::from_word(extracted_word & self.mask)
} else {
unsafe { self.get_unchecked(index) }
}
}
pub fn iter(&self) -> iter::FixedVecIter<'_, T, W, E, B> {
iter::FixedVecIter::new(self)
}
pub unsafe fn iter_unchecked(&self) -> iter::FixedVecUncheckedIter<'_, T, W, E, B> {
iter::FixedVecUncheckedIter::new(self)
}
pub fn slice(&self, start: usize, len: usize) -> Option<slice::FixedVecSlice<&Self>> {
if start.saturating_add(len) > self.len {
return None;
}
Some(slice::FixedVecSlice::new(self, start..start + len))
}
pub fn split_at(
&self,
mid: usize,
) -> Option<(slice::FixedVecSlice<&Self>, slice::FixedVecSlice<&Self>)> {
if mid > self.len {
return None;
}
let left = slice::FixedVecSlice::new(self, 0..mid);
let right = slice::FixedVecSlice::new(self, mid..self.len);
Some((left, right))
}
pub fn chunks(&self, chunk_size: usize) -> iter::Chunks<'_, T, W, E, B> {
iter::Chunks::new(self, chunk_size)
}
pub fn windows(&self, size: usize) -> iter::Windows<'_, T, W, E, B> {
assert!(size != 0, "window size cannot be zero");
iter::Windows::new(self, size)
}
pub fn addr_of(&self, index: usize) -> Option<*const W> {
if index >= self.len {
return None;
}
let bit_pos = index * self.bit_width;
let word_idx = bit_pos / <W as Word>::BITS;
let limbs = self.as_limbs();
if word_idx < limbs.len() {
Some(limbs.as_ptr().wrapping_add(word_idx))
} else {
None
}
}
pub fn prefetch(&self, index: usize) {
if index >= self.len {
return;
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
use std::arch::x86_64::{_MM_HINT_T0, _mm_prefetch};
let bit_pos = index * self.bit_width;
let byte_pos = bit_pos / 8;
let limbs_ptr = self.as_limbs().as_ptr() as *const i8;
unsafe {
_mm_prefetch(limbs_ptr.add(byte_pos), _MM_HINT_T0);
}
}
}
pub fn binary_search(&self, value: &T) -> Result<usize, usize>
where
T: Ord,
{
let mut low = 0;
let mut high = self.len();
while low < high {
let mid = low + (high - low) / 2;
let mid_val = unsafe { self.get_unchecked(mid) };
match mid_val.cmp(value) {
std::cmp::Ordering::Less => low = mid + 1,
std::cmp::Ordering::Equal => return Ok(mid),
std::cmp::Ordering::Greater => high = mid,
}
}
Err(low)
}
pub fn binary_search_by_key<K: Ord, F>(&self, key: &K, mut f: F) -> Result<usize, usize>
where
F: FnMut(T) -> K,
{
self.binary_search_by(|probe| f(*probe).cmp(key))
}
pub fn binary_search_by<F>(&self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&T) -> std::cmp::Ordering,
{
let mut low = 0;
let mut high = self.len();
while low < high {
let mid = low + (high - low) / 2;
let mid_val = unsafe { self.get_unchecked(mid) };
match f(&mid_val) {
std::cmp::Ordering::Less => low = mid + 1,
std::cmp::Ordering::Equal => return Ok(mid),
std::cmp::Ordering::Greater => high = mid,
}
}
Err(low)
}
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let mut len = self.len();
let mut left = 0;
while len > 0 {
let half = len / 2;
let mid = left + half;
let value = unsafe { self.get_unchecked(mid) };
if pred(&value) {
left = mid + 1;
len -= half + 1;
} else {
len = half;
}
}
left
}
}
impl<'a, T, W, E, B> IntoIterator for &'a FixedVec<T, W, E, B>
where
T: Storable<W>,
W: Word,
E: Endianness,
B: AsRef<[W]>,
{
type Item = T;
type IntoIter = iter::FixedVecIter<'a, T, W, E, B>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<T, W, E> IntoIterator for FixedVec<T, W, E, Vec<W>>
where
T: Storable<W> + 'static,
W: Word,
E: Endianness,
{
type Item = T;
type IntoIter = iter::FixedVecIntoIter<T, W, E>;
fn into_iter(self) -> Self::IntoIter {
iter::FixedVecIntoIter::new(self)
}
}
impl<T, W, E> FromIterator<T> for FixedVec<T, W, E, Vec<W>>
where
T: Storable<W>,
W: Word,
E: Endianness,
dsi_bitstream::impls::BufBitWriter<E, dsi_bitstream::impls::MemWordWriterVec<W, Vec<W>>>:
dsi_bitstream::prelude::BitWrite<E, Error = std::convert::Infallible>,
{
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let data: Vec<T> = iter.into_iter().collect();
Self::builder().build(&data).unwrap()
}
}
impl<T, W, E> Default for FixedVec<T, W, E, Vec<W>>
where
T: Storable<W>,
W: Word,
E: Endianness,
{
fn default() -> Self {
unsafe { Self::new_unchecked(Vec::new(), 0, 1) }
}
}
impl<T, W, E> FixedVec<T, W, E, Vec<W>>
where
T: Storable<W> + ToPrimitive,
W: Word,
E: Endianness,
{
pub fn new(bit_width: usize) -> Result<Self, Error> {
if bit_width > <W as traits::Word>::BITS {
return Err(Error::InvalidParameters(format!(
"bit_width ({}) cannot be greater than the word size ({})",
bit_width,
<W as traits::Word>::BITS
)));
}
Ok(unsafe { Self::new_unchecked(Vec::new(), 0, bit_width) })
}
#[inline(always)]
pub fn push(&mut self, value: T) {
let value_w = <T as Storable<W>>::into_word(value);
if (value_w & !self.mask) != W::ZERO {
panic!(
"Value {:?} does not fit in the configured bit_width of {}",
value_w, self.bit_width
);
}
let bits_per_word = <W as traits::Word>::BITS;
if (self.len + 1) * self.bit_width > self.bits.len() * bits_per_word {
self.bits.push(W::ZERO);
}
unsafe {
self.set_unchecked(self.len, value_w);
}
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
let value = self.get(self.len - 1).unwrap();
self.len -= 1;
Some(value)
}
pub fn clear(&mut self) {
self.len = 0;
}
pub fn with_capacity(bit_width: usize, capacity: usize) -> Result<Self, Error> {
if bit_width > <W as traits::Word>::BITS {
return Err(Error::InvalidParameters(format!(
"bit_width ({}) cannot be greater than the word size ({})",
bit_width,
<W as traits::Word>::BITS
)));
}
let bits_per_word = <W as traits::Word>::BITS;
let total_bits = capacity.saturating_mul(bit_width);
let num_words = total_bits.div_ceil(bits_per_word);
let buffer = if capacity == 0 {
Vec::new()
} else {
Vec::with_capacity(num_words + 1) };
Ok(unsafe { Self::new_unchecked(buffer, 0, bit_width) })
}
pub fn capacity(&self) -> usize {
if self.bit_width == 0 {
return usize::MAX;
}
let word_capacity = self.bits.capacity();
if word_capacity <= 1 {
return 0; }
((word_capacity - 1) * <W as traits::Word>::BITS) / self.bit_width
}
pub fn word_capacity(&self) -> usize {
self.bits.capacity()
}
pub fn reserve(&mut self, additional: usize) {
let target_element_capacity = self.len.saturating_add(additional);
if self.capacity() >= target_element_capacity {
return;
}
let bits_per_word = <W as Word>::BITS;
let required_total_bits = target_element_capacity.saturating_mul(self.bit_width);
let required_data_words = required_total_bits.div_ceil(bits_per_word);
let required_word_capacity = required_data_words + 1;
let current_len = self.bits.len();
if self.bits.capacity() < required_word_capacity {
self.bits.reserve(required_word_capacity - current_len);
}
}
#[inline(always)]
pub fn resize(&mut self, new_len: usize, value: T) {
if new_len > self.len {
let value_w = <T as Storable<W>>::into_word(value);
if (value_w & !self.mask) != W::ZERO {
panic!(
"Value {:?} does not fit in the configured bit_width of {}",
value_w, self.bit_width
);
}
let bits_per_word = <W as traits::Word>::BITS;
let required_total_bits = new_len * self.bit_width;
let required_data_words = required_total_bits.div_ceil(bits_per_word);
let required_vec_len = required_data_words.saturating_add(1);
if self.bits.len() < required_vec_len {
self.bits.resize(required_vec_len, W::ZERO);
}
for i in self.len..new_len {
unsafe {
self.set_unchecked(i, value_w);
}
}
self.len = new_len;
} else {
self.len = new_len;
}
}
pub fn shrink_to_fit(&mut self) {
let min_word_len = if self.len == 0 {
0
} else {
let bits_per_word = <W as traits::Word>::BITS;
let required_total_bits = self.len.saturating_mul(self.bit_width);
let required_words = required_total_bits.div_ceil(bits_per_word);
required_words + 1 };
if self.bits.len() > min_word_len {
self.bits.truncate(min_word_len);
}
self.bits.shrink_to_fit();
}
pub fn remove(&mut self, index: usize) -> T {
assert!(index < self.len, "remove: index out of bounds");
let value_to_return = self.get(index).unwrap();
let start_bit = index * self.bit_width;
let end_bit = self.len * self.bit_width;
let total_bits_to_shift = end_bit - (start_bit + self.bit_width);
if total_bits_to_shift > 0 {
self.shift_bits_left(start_bit, self.bit_width, total_bits_to_shift);
}
self.len -= 1;
value_to_return
}
pub fn insert(&mut self, index: usize, element: T) {
assert!(index <= self.len, "insert: index out of bounds");
let value_w = <T as Storable<W>>::into_word(element);
let bits_per_word = <W as Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word && value_w >= limit {
panic!(
"Value {:?} does not fit in the configured bit_width of {}",
value_w, self.bit_width
);
}
self.reserve(1);
let start_shift_bit = index * self.bit_width;
let num_bits_to_move = (self.len - index) * self.bit_width;
if num_bits_to_move > 0 {
self.shift_bits_right(start_shift_bit, self.bit_width, num_bits_to_move);
}
self.len += 1;
unsafe {
self.set_unchecked(index, value_w);
}
}
fn shift_bits_left(&mut self, start_bit: usize, shift_amount: usize, num_bits_to_move: usize) {
if num_bits_to_move == 0 {
return;
}
let bits_per_word = <W as Word>::BITS;
if shift_amount.is_multiple_of(bits_per_word) {
let start_write_word = start_bit / bits_per_word;
let start_read_word = (start_bit + shift_amount) / bits_per_word;
let num_words_to_move = num_bits_to_move.div_ceil(bits_per_word);
if start_read_word < self.bits.len() {
let read_end = (start_read_word + num_words_to_move).min(self.bits.len());
self.bits
.copy_within(start_read_word..read_end, start_write_word);
}
return;
}
let shift_rem = shift_amount % bits_per_word;
let inv_shift_rem = bits_per_word - shift_rem;
let start_write_bit = start_bit;
let end_write_bit = start_bit + num_bits_to_move;
let start_write_word = start_write_bit / bits_per_word;
let end_write_word = (end_write_bit - 1) / bits_per_word;
for write_word_idx in start_write_word..=end_write_word {
let read_bit = write_word_idx * bits_per_word + shift_rem;
let read_word_idx = read_bit / bits_per_word;
let low_part = self.bits.get(read_word_idx).copied().unwrap_or(W::ZERO) >> shift_rem;
let high_part =
self.bits.get(read_word_idx + 1).copied().unwrap_or(W::ZERO) << inv_shift_rem;
let value_to_write = low_part | high_part;
let mut mask = W::max_value();
if write_word_idx == start_write_word {
mask &= W::max_value() << (start_write_bit % bits_per_word);
}
if write_word_idx == end_write_word {
let end_offset = end_write_bit % bits_per_word;
if end_offset != 0 {
mask &= (W::ONE << end_offset).wrapping_sub(W::ONE);
}
}
self.bits[write_word_idx] =
(self.bits[write_word_idx] & !mask) | (value_to_write & mask);
}
}
fn shift_bits_right(&mut self, start_bit: usize, shift_amount: usize, num_bits_to_move: usize) {
if num_bits_to_move == 0 {
return;
}
let bits_per_word = <W as Word>::BITS;
let required_end_bit = start_bit + shift_amount + num_bits_to_move;
let required_words = required_end_bit.div_ceil(bits_per_word);
let required_vec_len = required_words.saturating_add(1); if self.bits.len() < required_vec_len {
self.bits.resize(required_vec_len, W::ZERO);
}
if shift_amount.is_multiple_of(bits_per_word) {
let start_read_word = start_bit / bits_per_word;
let start_write_word = (start_bit + shift_amount) / bits_per_word;
let num_words_to_move = num_bits_to_move.div_ceil(bits_per_word);
if start_read_word + num_words_to_move <= self.bits.len() {
self.bits.copy_within(
start_read_word..start_read_word + num_words_to_move,
start_write_word,
);
}
} else {
let word_shift = shift_amount / bits_per_word;
let shift_rem = shift_amount % bits_per_word;
let inv_shift_rem = bits_per_word - shift_rem;
let start_write_bit = start_bit + shift_amount;
let end_write_bit = start_write_bit + num_bits_to_move;
let start_write_word = start_write_bit / bits_per_word;
let end_write_word = (end_write_bit - 1) / bits_per_word;
for write_word_idx in (start_write_word..=end_write_word).rev() {
let read_word_idx = write_word_idx - word_shift;
let high_part =
self.bits.get(read_word_idx).copied().unwrap_or(W::ZERO) << shift_rem;
let low_part = if read_word_idx > 0 {
self.bits.get(read_word_idx - 1).copied().unwrap_or(W::ZERO) >> inv_shift_rem
} else {
W::ZERO
};
let value_to_write = low_part | high_part;
let mut mask = W::max_value();
if write_word_idx == start_write_word {
mask &= W::max_value() << (start_write_bit % bits_per_word);
}
if write_word_idx == end_write_word {
let end_offset = end_write_bit % bits_per_word;
if end_offset != 0 {
mask &= (W::ONE << end_offset).wrapping_sub(W::ONE);
}
}
self.bits[write_word_idx] =
(self.bits[write_word_idx] & !mask) | (value_to_write & mask);
}
}
let mut clear_bit = start_bit;
let end_clear_bit = start_bit + shift_amount;
while clear_bit < end_clear_bit {
let word_idx = clear_bit / bits_per_word;
let offset = clear_bit % bits_per_word;
let bits_to_clear = (bits_per_word - offset).min(end_clear_bit - clear_bit);
let mask = if bits_to_clear == bits_per_word {
W::max_value()
} else {
((W::ONE << bits_to_clear).wrapping_sub(W::ONE)) << offset
};
if word_idx < self.bits.len() {
self.bits[word_idx] &= !mask;
}
clear_bit += bits_to_clear;
}
}
pub fn swap_remove(&mut self, index: usize) -> T {
assert!(index < self.len, "swap_remove: index out of bounds");
if index == self.len - 1 {
self.pop().unwrap()
} else {
let old_val = unsafe { self.get_unchecked(index) };
let last_val = self.pop().unwrap(); self.set(index, last_val);
old_val
}
}
pub fn try_push(&mut self, value: T) -> Result<(), Error> {
let value_w = <T as Storable<W>>::into_word(value);
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word && value_w >= limit {
return Err(Error::ValueTooLarge {
value: value_w.to_u128().unwrap(),
index: self.len,
bit_width: self.bit_width,
});
}
self.push(value);
Ok(())
}
pub fn extend_from_slice(&mut self, other: &[T]) {
if other.is_empty() {
return;
}
self.reserve(other.len());
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word {
for (i, &value) in other.iter().enumerate() {
let value_w = <T as Storable<W>>::into_word(value);
if value_w >= limit {
panic!(
"Value at index {} of slice ({:?}) does not fit in the configured bit_width of {}",
i, value_w, self.bit_width
);
}
}
}
let old_len = self.len;
let new_len = old_len + other.len();
let required_total_bits = new_len * self.bit_width;
let required_data_words = required_total_bits.div_ceil(bits_per_word);
let required_vec_len = required_data_words.saturating_add(1); if self.bits.len() < required_vec_len {
self.bits.resize(required_vec_len, W::ZERO);
}
for (i, &value) in other.iter().enumerate() {
unsafe {
self.set_unchecked(old_len + i, <T as Storable<W>>::into_word(value));
}
}
self.len = new_len;
}
}
impl<T, W, E, B> FixedVec<T, W, E, B>
where
T: Storable<W>,
W: Word,
E: Endianness,
B: AsRef<[W]> + AsMut<[W]>,
{
pub fn at_mut(&mut self, index: usize) -> Option<MutProxy<'_, T, W, E, B>> {
if index >= self.len {
return None;
}
Some(MutProxy::new(self, index))
}
pub unsafe fn as_mut_limbs(&mut self) -> &mut [W] {
self.bits.as_mut()
}
pub fn set(&mut self, index: usize, value: T) {
assert!(
index < self.len,
"Index out of bounds: expected index < {}, got {}",
self.len,
index
);
let value_w = <T as Storable<W>>::into_word(value);
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word && value_w >= limit {
panic!(
"Value {:?} does not fit in the configured bit_width of {}",
value_w, self.bit_width
);
}
unsafe { self.set_unchecked(index, value_w) };
}
#[inline(always)]
pub unsafe fn set_unchecked(&mut self, index: usize, value_w: W) {
let bits_per_word = <W as traits::Word>::BITS;
if self.bit_width == bits_per_word {
unsafe {
*self.bits.as_mut().get_unchecked_mut(index) = if E::IS_LITTLE {
value_w
} else {
value_w.to_be()
};
}
return;
}
let bit_pos = index * self.bit_width;
let word_index = bit_pos / bits_per_word;
let bit_offset = bit_pos % bits_per_word;
let limbs = self.bits.as_mut();
if E::IS_LITTLE {
if bit_offset + self.bit_width <= bits_per_word {
let mut word = unsafe { *limbs.get_unchecked(word_index) };
word &= !(self.mask << bit_offset);
word |= value_w << bit_offset;
unsafe { *limbs.get_unchecked_mut(word_index) = word };
} else {
let remaining_bits_in_first_word = bits_per_word - bit_offset;
let (left, right) = unsafe { limbs.split_at_mut_unchecked(word_index + 1) };
let mut low_word_val = unsafe { *left.get_unchecked(word_index) };
let low_mask = (<W as num_traits::NumCast>::from(1u8).unwrap() << bit_offset)
.wrapping_sub(<W as num_traits::NumCast>::from(1u8).unwrap());
low_word_val &= low_mask;
low_word_val |= value_w << bit_offset;
unsafe { *left.get_unchecked_mut(word_index) = low_word_val };
let mut high_word_val = unsafe { *right.get_unchecked(0) };
high_word_val &= !(self.mask >> remaining_bits_in_first_word);
high_word_val |= value_w >> remaining_bits_in_first_word;
unsafe { *right.get_unchecked_mut(0) = high_word_val };
}
} else {
if bit_offset + self.bit_width <= bits_per_word {
let shift = bits_per_word - self.bit_width - bit_offset;
let mask = self.mask << shift;
let word = unsafe { limbs.get_unchecked_mut(word_index) };
*word &= !mask.to_be();
*word |= (value_w << shift).to_be();
} else {
let (left, right) = unsafe { limbs.split_at_mut_unchecked(word_index + 1) };
let high_word = unsafe { left.get_unchecked_mut(word_index) };
let low_word = unsafe { right.get_unchecked_mut(0) };
let bits_in_first = bits_per_word - bit_offset;
let bits_in_second = self.bit_width - bits_in_first;
let high_mask =
(self.mask >> bits_in_second) << (bits_per_word - bits_in_first - bit_offset);
let high_value = value_w >> bits_in_second;
*high_word &= !high_mask.to_be();
*high_word |= (high_value << (bits_per_word - bits_in_first - bit_offset)).to_be();
let low_mask = self.mask << (bits_per_word - bits_in_second);
let low_value = value_w << (bits_per_word - bits_in_second);
*low_word &= !low_mask.to_be();
*low_word |= low_value.to_be();
}
}
}
pub fn try_set(&mut self, index: usize, value: T) -> Result<(), Error> {
assert!(index < self.len, "try_set: index out of bounds");
let value_w = <T as Storable<W>>::into_word(value);
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word && value_w >= limit {
return Err(Error::ValueTooLarge {
value: value_w.to_u128().unwrap(),
index,
bit_width: self.bit_width,
});
}
unsafe { self.set_unchecked(index, value_w) };
Ok(())
}
pub fn chunks_mut(&mut self, chunk_size: usize) -> iter_mut::ChunksMut<'_, T, W, E, B> {
iter_mut::ChunksMut::new(self, chunk_size)
}
pub unsafe fn iter_mut_unchecked(&mut self) -> iter_mut::IterMutUnchecked<'_, T, W, E, B> {
iter_mut::IterMutUnchecked::new(self)
}
pub unsafe fn map_in_place_unchecked<F>(&mut self, mut f: F)
where
F: FnMut(T) -> T,
{
if E::IS_LITTLE {
let mut word_op = |word: W| -> W {
let val_t = <T as Storable<W>>::from_word(word);
let new_val_t = f(val_t);
<T as Storable<W>>::into_word(new_val_t)
};
unsafe { self.map_in_place_generic_word_op(&mut word_op) };
} else {
for i in 0..self.len {
let old_val_t = unsafe { self.get_unchecked(i) };
let new_val_t = f(old_val_t);
unsafe { self.set_unchecked(i, <T as Storable<W>>::into_word(new_val_t)) };
}
}
}
unsafe fn map_in_place_generic_word_op<FW>(&mut self, f_w: &mut FW)
where
FW: FnMut(W) -> W,
{
let bit_width = self.bit_width;
if self.len == 0 || bit_width == 0 {
return;
}
let bits_per_word = <W as Word>::BITS;
if bit_width.is_power_of_two() && bits_per_word % bit_width == 0 {
let elems_per_word = bits_per_word / bit_width;
let mask = self.mask;
let num_full_words = self.len / elems_per_word;
for word_idx in 0..num_full_words {
let old_word = unsafe { *self.bits.as_ref().get_unchecked(word_idx) };
let mut new_word = W::ZERO;
for i in 0..elems_per_word {
let shift = i * bit_width;
let old_val_w = (old_word >> shift) & mask;
new_word |= f_w(old_val_w) << shift;
}
unsafe { *self.bits.as_mut().get_unchecked_mut(word_idx) = new_word };
}
let start_idx = num_full_words * elems_per_word;
for i in start_idx..self.len {
let old_val_t = self.get(i).unwrap();
let old_val_w = <T as Storable<W>>::into_word(old_val_t);
unsafe { self.set_unchecked(i, f_w(old_val_w)) };
}
return;
}
let limbs = self.bits.as_mut();
let num_words = (self.len * bit_width).div_ceil(bits_per_word);
let last_word_idx = num_words.saturating_sub(1);
let mut write_buffer = W::ZERO;
let mut read_buffer = unsafe { *limbs.get_unchecked(0) };
let mut global_bit_offset = 0;
for word_idx in 0..last_word_idx {
let lower_word_boundary = word_idx * bits_per_word;
let upper_word_boundary = lower_word_boundary + bits_per_word;
while global_bit_offset + bit_width <= upper_word_boundary {
let offset_in_word = global_bit_offset - lower_word_boundary;
let element = (read_buffer >> offset_in_word) & self.mask;
write_buffer |= f_w(element) << offset_in_word;
global_bit_offset += bit_width;
}
let next_word = unsafe { *limbs.get_unchecked(word_idx + 1) };
let mut new_write_buffer = W::ZERO;
if upper_word_boundary != global_bit_offset {
let elem_idx = global_bit_offset / bit_width;
if elem_idx >= self.len {
unsafe { *limbs.get_unchecked_mut(word_idx) = write_buffer };
return;
}
let remainder_in_word = upper_word_boundary - global_bit_offset;
let offset_in_word = global_bit_offset - lower_word_boundary;
let element = ((read_buffer >> offset_in_word) | (next_word << remainder_in_word))
& self.mask;
let new_element = f_w(element);
write_buffer |= new_element << offset_in_word;
new_write_buffer = new_element >> remainder_in_word;
global_bit_offset += bit_width;
}
unsafe { *limbs.get_unchecked_mut(word_idx) = write_buffer };
read_buffer = next_word;
write_buffer = new_write_buffer;
}
let lower_word_boundary = last_word_idx * bits_per_word;
while global_bit_offset < self.len * bit_width {
let offset_in_word = global_bit_offset - lower_word_boundary;
let element = (read_buffer >> offset_in_word) & self.mask;
write_buffer |= f_w(element) << offset_in_word;
global_bit_offset += bit_width;
}
unsafe { *limbs.get_unchecked_mut(last_word_idx) = write_buffer };
}
pub fn map_in_place<F>(&mut self, mut f: F)
where
F: FnMut(T) -> T,
{
let bit_width = self.bit_width;
let limit = if bit_width < <W as Word>::BITS {
W::ONE << bit_width
} else {
W::max_value()
};
let safe_f = |value: T| {
let new_value = f(value);
let new_value_w = <T as Storable<W>>::into_word(new_value);
if bit_width < <W as Word>::BITS && new_value_w >= limit {
panic!(
"map_in_place: returned value {new_value_w:?} does not fit in bit_width {bit_width}"
);
}
new_value
};
unsafe {
self.map_in_place_unchecked(safe_f);
}
}
pub fn replace(&mut self, index: usize, value: T) -> T {
assert!(index < self.len, "replace: index out of bounds");
let old_value = unsafe { self.get_unchecked(index) };
self.set(index, value);
old_value
}
pub fn swap(&mut self, a: usize, b: usize) {
assert!(a < self.len, "swap: index a out of bounds");
assert!(b < self.len, "swap: index b out of bounds");
if a == b {
return;
}
unsafe {
let val_a = self.get_unchecked(a);
let val_b = self.get_unchecked(b);
self.set_unchecked(a, <T as Storable<W>>::into_word(val_b));
self.set_unchecked(b, <T as Storable<W>>::into_word(val_a));
}
}
pub fn first_mut(&mut self) -> Option<MutProxy<'_, T, W, E, B>> {
if self.is_empty() {
None
} else {
self.at_mut(0)
}
}
pub fn last_mut(&mut self) -> Option<MutProxy<'_, T, W, E, B>> {
if self.is_empty() {
None
} else {
let len = self.len();
self.at_mut(len - 1)
}
}
pub fn first(&self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.get_unchecked(0) })
}
}
pub fn last(&self) -> Option<T> {
if self.is_empty() {
None
} else {
let len = self.len();
Some(unsafe { self.get_unchecked(len - 1) })
}
}
pub fn split_at_mut(
&mut self,
mid: usize,
) -> (
slice::FixedVecSlice<&mut Self>,
slice::FixedVecSlice<&mut Self>,
) {
assert!(mid <= self.len, "mid > len in split_at_mut");
unsafe {
let ptr = self as *mut Self;
let left = slice::FixedVecSlice::new(&mut *ptr, 0..mid);
let right = slice::FixedVecSlice::new(&mut *ptr, mid..self.len());
(left, right)
}
}
pub fn iter_mut(&mut self) -> iter_mut::IterMut<'_, T, W, E, B> {
iter_mut::IterMut::new(self)
}
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len, "mid > len in rotate_left");
if self.is_empty() || mid == 0 || mid == self.len {
return;
}
let mut temp = Vec::with_capacity(mid);
for i in 0..mid {
temp.push(unsafe { self.get_unchecked(i) });
}
for i in mid..self.len {
let val = unsafe { self.get_unchecked(i) };
unsafe { self.set_unchecked(i - mid, <T as Storable<W>>::into_word(val)) };
}
for (i, val) in temp.into_iter().enumerate() {
unsafe { self.set_unchecked(self.len - mid + i, <T as Storable<W>>::into_word(val)) };
}
}
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len, "k > len in rotate_right");
if self.is_empty() || k == 0 || k == self.len {
return;
}
self.rotate_left(self.len - k);
}
pub fn fill(&mut self, value: T) {
let value_w = <T as Storable<W>>::into_word(value);
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
if self.bit_width < bits_per_word && value_w >= limit {
panic!(
"Value {:?} does not fit in the configured bit_width of {}",
value_w, self.bit_width
);
}
for i in 0..self.len() {
unsafe { self.set_unchecked(i, value_w) };
}
}
pub fn fill_with<F>(&mut self, mut f: F)
where
F: FnMut() -> T,
{
let bits_per_word = <W as traits::Word>::BITS;
let limit = if self.bit_width < bits_per_word {
W::ONE << self.bit_width
} else {
W::max_value()
};
for i in 0..self.len() {
let value = f();
let value_w = <T as Storable<W>>::into_word(value);
if self.bit_width < bits_per_word && value_w >= limit {
panic!(
"Value {:?} returned by closure does not fit in bit_width {}",
value_w, self.bit_width
);
}
unsafe { self.set_unchecked(i, value_w) };
}
}
pub fn copy_from_slice(
&mut self,
src: &Self,
src_range: std::ops::Range<usize>,
dest_index: usize,
) {
assert_eq!(
self.bit_width, src.bit_width,
"bit_width mismatch in copy_from_slice"
);
assert!(src_range.start <= src_range.end, "source range start > end");
assert!(src_range.end <= src.len(), "source range out of bounds");
let len = src_range.len();
assert!(
dest_index + len <= self.len(),
"destination range out of bounds"
);
if len == 0 {
return;
}
let bit_width = self.bit_width;
let bits_per_word = <W as Word>::BITS;
let src_bit_offset = (src_range.start * bit_width) % bits_per_word;
let dest_bit_offset = (dest_index * bit_width) % bits_per_word;
if src_bit_offset == dest_bit_offset {
let src_word_start = (src_range.start * bit_width) / bits_per_word;
let dest_word_start = (dest_index * bit_width) / bits_per_word;
let total_bits_to_copy = len * bit_width;
let num_words_to_copy = total_bits_to_copy.div_ceil(bits_per_word);
let src_words = &src.bits.as_ref()[src_word_start..src_word_start + num_words_to_copy];
let dest_words =
&mut self.bits.as_mut()[dest_word_start..dest_word_start + num_words_to_copy];
let residual_bits = total_bits_to_copy % bits_per_word;
if residual_bits == 0 {
dest_words.copy_from_slice(src_words);
} else {
if num_words_to_copy > 1 {
dest_words[..num_words_to_copy - 1]
.copy_from_slice(&src_words[..num_words_to_copy - 1]);
}
let last_word_mask = (W::ONE << residual_bits).wrapping_sub(W::ONE);
let dest_last_word = &mut dest_words[num_words_to_copy - 1];
let src_last_word = &src_words[num_words_to_copy - 1];
*dest_last_word =
(*dest_last_word & !last_word_mask) | (*src_last_word & last_word_mask);
}
} else {
if dest_index > src_range.start && dest_index < src_range.end {
for i in (0..len).rev() {
let val = unsafe { src.get_unchecked(src_range.start + i) };
unsafe {
self.set_unchecked(dest_index + i, <T as Storable<W>>::into_word(val))
};
}
} else {
for i in 0..len {
let val = unsafe { src.get_unchecked(src_range.start + i) };
unsafe {
self.set_unchecked(dest_index + i, <T as Storable<W>>::into_word(val))
};
}
}
}
}
}
impl<T, W, E, B, B2> PartialEq<FixedVec<T, W, E, B2>> for FixedVec<T, W, E, B>
where
T: Storable<W> + PartialEq,
W: Word,
E: Endianness,
B: AsRef<[W]>,
B2: AsRef<[W]>,
{
fn eq(&self, other: &FixedVec<T, W, E, B2>) -> bool {
if self.len() != other.len() || self.bit_width() != other.bit_width() {
return false;
}
self.as_limbs() == other.as_limbs()
}
}
impl<T, W, E, B, T2> PartialEq<&[T2]> for FixedVec<T, W, E, B>
where
T: Storable<W> + PartialEq<T2>,
W: Word,
E: Endianness,
B: AsRef<[W]>,
T2: Clone,
{
fn eq(&self, other: &&[T2]) -> bool {
if self.len() != other.len() {
return false;
}
self.iter().zip(other.iter()).all(|(a, b)| a == *b)
}
}
impl<T, W, E> Extend<T> for FixedVec<T, W, E, Vec<W>>
where
T: Storable<W> + ToPrimitive,
W: Word,
E: Endianness,
{
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
let iter = iter.into_iter();
let (lower_bound, _) = iter.size_hint();
self.reserve(lower_bound);
for item in iter {
self.push(item);
}
}
}
impl<T, W, E> From<FixedVec<T, W, E, Vec<W>>> for FixedVec<T, W, E, Box<[W]>>
where
T: Storable<W>,
W: Word,
E: Endianness,
{
fn from(vec: FixedVec<T, W, E, Vec<W>>) -> Self {
unsafe { Self::new_unchecked(vec.bits.into_boxed_slice(), vec.len, vec.bit_width) }
}
}
impl<T, W, E> From<FixedVec<T, W, E, Box<[W]>>> for FixedVec<T, W, E, Vec<W>>
where
T: Storable<W>,
W: Word,
E: Endianness,
{
fn from(vec: FixedVec<T, W, E, Box<[W]>>) -> Self {
unsafe { Self::new_unchecked(vec.bits.into_vec(), vec.len, vec.bit_width) }
}
}
impl<'a, T> TryFrom<&'a [T]> for FixedVec<T, <T as DefaultParams>::W, <T as DefaultParams>::E>
where
T: Storable<<T as DefaultParams>::W> + DefaultParams,
<T as DefaultParams>::W: Word,
<T as DefaultParams>::E: Endianness,
dsi_bitstream::impls::BufBitWriter<
<T as DefaultParams>::E,
dsi_bitstream::impls::MemWordWriterVec<
<T as DefaultParams>::W,
Vec<<T as DefaultParams>::W>,
>,
>: dsi_bitstream::prelude::BitWrite<<T as DefaultParams>::E, Error = std::convert::Infallible>,
{
type Error = Error;
fn try_from(slice: &'a [T]) -> Result<Self, Self::Error> {
Self::builder().bit_width(BitWidth::Minimal).build(slice)
}
}