use crate::ambassador_impl_Index;
use crate::bits::{assert_unaligned, debug_assert_unaligned, test_unaligned};
use crate::traits::ambassador_impl_Backend;
use crate::traits::ambassador_impl_BitLength;
use crate::traits::{
AtomicBitIter, AtomicBitVecOps, Backend, BitIter, BitVecOps, BitVecValueOps, Word,
};
use crate::utils::SelectInWord;
use crate::{
traits::{bit_vec_ops::BitLength, rank_sel::*},
utils::{
CannotCastToAtomicError, transmute_boxed_slice_from_atomic,
transmute_boxed_slice_into_atomic, transmute_vec_from_atomic, transmute_vec_into_atomic,
},
};
use ambassador::Delegate;
use atomic_primitive::{Atomic, AtomicPrimitive, PrimitiveAtomic, PrimitiveAtomicUnsigned};
#[allow(unused_imports)] use core::borrow::BorrowMut;
use core::fmt;
use mem_dbg::*;
use num_primitive::PrimitiveInteger;
use std::mem::size_of;
use std::{ops::Index, sync::atomic::Ordering};
#[derive(Debug, Clone, MemSize, MemDbg, Delegate)]
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[delegate(crate::traits::Backend, target = "bits")]
pub struct BitVec<B = Vec<usize>> {
bits: B,
len: usize,
}
#[macro_export]
macro_rules! bit_vec {
($W:ty) => {
$crate::bits::BitVec::<Vec<$W>>::new(0)
};
($W:ty: false; $n:expr) => {
$crate::bits::BitVec::<Vec<$W>>::new($n)
};
($W:ty: 0; $n:expr) => {
$crate::bits::BitVec::<Vec<$W>>::new($n)
};
($W:ty: true; $n:expr) => {
{
$crate::bits::BitVec::<Vec<$W>>::with_value($n, true)
}
};
($W:ty: 1; $n:expr) => {
{
$crate::bits::BitVec::<Vec<$W>>::with_value($n, true)
}
};
($W:ty: $($x:expr),+ $(,)?) => {
{
let mut b = $crate::bits::BitVec::<Vec<$W>>::with_capacity([$($x),+].len());
$( b.push($x != 0); )*
b
}
};
() => {
$crate::bits::BitVec::<Vec<usize>>::new(0)
};
(false; $n:expr) => {
$crate::bits::BitVec::<Vec<usize>>::new($n)
};
(0; $n:expr) => {
$crate::bits::BitVec::<Vec<usize>>::new($n)
};
(true; $n:expr) => {
{
$crate::bits::BitVec::<Vec<usize>>::with_value($n, true)
}
};
(1; $n:expr) => {
{
$crate::bits::BitVec::<Vec<usize>>::with_value($n, true)
}
};
($($x:expr),+ $(,)?) => {
{
let mut b = $crate::bits::BitVec::<Vec<usize>>::with_capacity([$($x),+].len());
$( b.push($x != 0); )*
b
}
};
}
impl<B> BitVec<B> {
#[inline(always)]
pub const fn len(&self) -> usize {
self.len
}
#[inline(always)]
#[must_use]
pub const unsafe fn from_raw_parts(bits: B, len: usize) -> Self {
Self { bits, len }
}
#[inline(always)]
pub fn into_raw_parts(self) -> (B, usize) {
(self.bits, self.len)
}
#[inline(always)]
pub unsafe fn map<B2>(self, f: impl FnOnce(B) -> B2) -> BitVec<B2> {
BitVec {
bits: f(self.bits),
len: self.len,
}
}
}
impl<W: Word> BitVec<Vec<W>> {
#[must_use]
pub fn new(len: usize) -> Self {
Self::with_value(len, false)
}
#[must_use]
pub fn with_value(len: usize, value: bool) -> Self {
let bits_per_word = W::BITS as usize;
let n_of_words = len.div_ceil(bits_per_word);
let extra_bits = (n_of_words * bits_per_word) - len;
let word_value = if value { !W::ZERO } else { W::ZERO };
let mut bits = vec![word_value; n_of_words];
if extra_bits > 0 {
let last_word_value = word_value >> extra_bits;
bits[n_of_words - 1] = last_word_value;
}
Self { bits, len }
}
#[must_use]
pub fn with_capacity(capacity: usize) -> Self {
let bits_per_word = W::BITS as usize;
let n_of_words = capacity.div_ceil(bits_per_word);
Self {
bits: Vec::with_capacity(n_of_words),
len: 0,
}
}
pub fn capacity(&self) -> usize {
self.bits.capacity() * W::BITS as usize
}
pub fn push(&mut self, b: bool) {
let bits_per_word = W::BITS as usize;
if self.bits.len() * bits_per_word == self.len {
self.bits.push(W::ZERO);
}
let word_index = self.len / bits_per_word;
let bit_index = self.len % bits_per_word;
self.bits[word_index] &= !(W::ONE << bit_index);
if b {
self.bits[word_index] |= W::ONE << bit_index;
}
self.len += 1;
}
pub fn append_value(&mut self, value: W, width: usize) {
assert!(
width <= W::BITS as usize,
"width {} must be at most W::BITS ({})",
width,
W::BITS
);
if width == 0 {
return;
}
let bits_per_word = W::BITS as usize;
let l = bits_per_word - width;
let value = (value << l) >> l;
let new_len = self.len + width;
let needed_words = new_len.div_ceil(bits_per_word);
self.bits.resize(needed_words, W::ZERO);
let word_idx = self.len / bits_per_word;
let bit_idx = self.len % bits_per_word;
self.bits[word_idx] |= value << bit_idx;
if bit_idx + width > bits_per_word {
self.bits[word_idx + 1] = value.wrapping_shr(bit_idx.wrapping_neg() as u32);
}
self.len = new_len;
}
pub fn pop(&mut self) -> Option<bool> {
if self.len == 0 {
return None;
}
let last_pos = self.len - 1;
let result = unsafe { BitVecOps::<W>::get_unchecked(self, last_pos) };
self.len = last_pos;
Some(result)
}
pub fn reserve(&mut self, additional: usize) {
let needed_words = (self.len + additional).div_ceil(W::BITS as usize);
self.bits
.reserve(needed_words.saturating_sub(self.bits.len()));
}
pub fn reserve_exact(&mut self, additional: usize) {
let needed_words = (self.len + additional).div_ceil(W::BITS as usize);
self.bits
.reserve_exact(needed_words.saturating_sub(self.bits.len()));
}
pub fn append<B2: AsRef<[W]>>(&mut self, other: &BitVec<B2>) {
let other_len = other.len;
if other_len == 0 {
return;
}
let bpw = W::BITS as usize;
let offset = self.len % bpw;
let src: &[W] = other.bits.as_ref();
let src_words = other_len.div_ceil(bpw);
let new_total = self.len + other_len;
let new_word_count = new_total.div_ceil(bpw);
if offset == 0 {
self.bits.extend_from_slice(&src[..src_words]);
} else {
self.bits.reserve(new_word_count - self.bits.len());
let last_idx = self.bits.len() - 1;
self.bits[last_idx] |= src[0] << offset;
let shift_right = bpw - offset;
for i in 1..src_words {
self.bits
.push((src[i - 1] >> shift_right) | (src[i] << offset));
}
if new_word_count > self.bits.len() {
self.bits.push(src[src_words - 1] >> shift_right);
}
}
self.len = new_total;
}
pub fn resize(&mut self, new_len: usize, value: bool) {
let bits_per_word = W::BITS as usize;
if new_len > self.len {
let old_len = self.len;
let old_word = old_len / bits_per_word;
let old_bit = old_len % bits_per_word;
let word_value = if value { !W::ZERO } else { W::ZERO };
self.bits
.resize(new_len.div_ceil(bits_per_word), word_value);
if old_bit != 0 {
let mask = !W::ZERO << old_bit;
self.bits[old_word] = (self.bits[old_word] & !mask) | (word_value & mask);
self.bits[old_word + 1..].fill(word_value);
} else {
self.bits[old_word..].fill(word_value);
}
}
self.len = new_len;
}
pub fn into_padded(mut self) -> BitVec<Box<[W]>> {
let needed = self.len.div_ceil(W::BITS as usize);
if self.bits.len() <= needed {
self.bits.push(W::ZERO);
}
unsafe { BitVec::from_raw_parts(self.bits.into_boxed_slice(), self.len) }
}
pub fn new_padded(len: usize) -> BitVec<Box<[W]>> {
let n_of_words = len.div_ceil(W::BITS as usize);
unsafe { BitVec::from_raw_parts(vec![W::ZERO; n_of_words + 1].into_boxed_slice(), len) }
}
}
impl<W: Word> Extend<bool> for BitVec<Vec<W>> {
fn extend<T: IntoIterator<Item = bool>>(&mut self, i: T) {
for b in i {
self.push(b);
}
}
}
impl<W: Word> FromIterator<bool> for BitVec<Vec<W>> {
fn from_iter<T: IntoIterator<Item = bool>>(iter: T) -> Self {
let mut res = Self::new(0);
res.extend(iter);
res
}
}
impl<B: ToOwned> BitVec<B> {
pub fn to_owned(&self) -> BitVec<<B as ToOwned>::Owned> {
BitVec {
bits: self.bits.to_owned(),
len: self.len,
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitVecValueOps<B::Word> for BitVec<B> {
fn get_value(&self, pos: usize, width: usize) -> B::Word {
assert!(
width <= B::Word::BITS as usize,
"width {} must be at most W::BITS ({})",
width,
B::Word::BITS
);
assert!(
pos + width <= self.len,
"bit range {}..{} out of bounds for length {}",
pos,
pos + width,
self.len
);
unsafe { self.get_value_unchecked(pos, width) }
}
#[inline]
unsafe fn get_value_unchecked(&self, pos: usize, width: usize) -> B::Word {
let bits = B::Word::BITS as usize;
let word_index = pos / bits;
let bit_index = pos % bits;
let l = bits - width;
let data = self.bits.as_ref();
if width == 0 {
return B::Word::ZERO;
}
unsafe {
if bit_index <= l {
(*data.get_unchecked(word_index) << (l - bit_index)) >> l
} else {
(*data.get_unchecked(word_index) >> bit_index)
| ((*data.get_unchecked(word_index + 1))
.wrapping_shl(l.wrapping_sub(bit_index) as u32)
>> l)
}
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitVec<B> {
pub fn get_value_unaligned(&self, pos: usize, width: usize) -> B::Word {
assert_unaligned!(B::Word, width);
assert!(
pos + width <= self.len,
"bit range {}..{} out of bounds for length {}",
pos,
pos + width,
self.len
);
assert!(
pos / 8 + size_of::<B::Word>() <= std::mem::size_of_val(self.bits.as_ref()),
"unaligned read at bit position {} would exceed allocation",
pos,
);
unsafe { self.get_value_unaligned_unchecked(pos, width) }
}
#[inline]
pub unsafe fn get_value_unaligned_unchecked(&self, pos: usize, width: usize) -> B::Word {
debug_assert_unaligned!(B::Word, width);
if width == 0 {
return B::Word::ZERO;
}
let base_ptr = self.bits.as_ref().as_ptr() as *const u8;
debug_assert!(
pos / 8 + size_of::<B::Word>() <= std::mem::size_of_val(self.bits.as_ref()),
"unaligned read at bit position {} would exceed allocation",
pos,
);
let ptr = unsafe { base_ptr.add(pos / 8) } as *const B::Word;
let word = unsafe { core::ptr::read_unaligned(ptr) };
let l = B::Word::BITS as usize - width;
((word >> (pos % 8)) << l) >> l
}
}
impl<B> BitLength for BitVec<B> {
#[inline(always)]
fn len(&self) -> usize {
self.len
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitCount for BitVec<B> {
fn count_ones(&self) -> usize {
let bits_per_word = B::Word::BITS as usize;
let full_words = self.len() / bits_per_word;
let residual = self.len() % bits_per_word;
let bits: &[B::Word] = self.as_ref();
let mut num_ones: usize = bits[..full_words]
.iter()
.map(|x| x.count_ones() as usize)
.sum();
if residual != 0 {
num_ones += (bits[full_words] << (bits_per_word - residual)).count_ones() as usize
}
num_ones
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>, C: Backend<Word = B::Word> + AsRef<[B::Word]>>
PartialEq<BitVec<C>> for BitVec<B>
{
fn eq(&self, other: &BitVec<C>) -> bool {
let len = self.len();
if len != other.len() {
return false;
}
let word_bits = B::Word::BITS as usize;
let full_words = len / word_bits;
if self.as_ref()[..full_words] != other.as_ref()[..full_words] {
return false;
}
let residual = len % word_bits;
residual == 0
|| (self.as_ref()[full_words] ^ other.as_ref()[full_words]) << (word_bits - residual)
== B::Word::ZERO
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> Eq for BitVec<B> {}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> std::hash::Hash for BitVec<B> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let len = self.len();
len.hash(state);
let word_bits = B::Word::BITS as usize;
let full_words = len / word_bits;
self.as_ref()[..full_words].hash(state);
let residual = len % word_bits;
if residual != 0 {
(self.as_ref()[full_words] << (word_bits - residual)).hash(state);
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> Index<usize> for BitVec<B> {
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
match BitVecOps::<B::Word>::get(self, index) {
false => &false,
true => &true,
}
}
}
impl<'a, B: Backend<Word: Word> + AsRef<[B::Word]>> IntoIterator for &'a BitVec<B> {
type IntoIter = BitIter<'a, B::Word, B>;
type Item = bool;
fn into_iter(self) -> Self::IntoIter {
BitIter::new(&self.bits, self.len())
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> fmt::Display for BitVec<B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[")?;
for b in self {
write!(f, "{:b}", b as usize)?;
}
write!(f, "]")?;
Ok(())
}
}
#[derive(Debug, Clone, MemSize, MemDbg, Delegate)]
#[delegate(crate::traits::Backend, target = "bits")]
pub struct AtomicBitVec<B = Box<[Atomic<usize>]>> {
bits: B,
len: usize,
}
impl<B> AtomicBitVec<B> {
#[inline(always)]
pub const fn len(&self) -> usize {
self.len
}
#[inline(always)]
#[must_use]
pub const unsafe fn from_raw_parts(bits: B, len: usize) -> Self {
Self { bits, len }
}
#[inline(always)]
pub fn into_raw_parts(self) -> (B, usize) {
(self.bits, self.len)
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + From<Vec<B::Word>>> AtomicBitVec<B> {
#[must_use]
pub fn new(len: usize) -> Self {
Self::with_value(len, false)
}
#[must_use]
pub fn with_value(len: usize, value: bool) -> Self {
let bits_per_word = <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let n_of_words = len.div_ceil(bits_per_word);
let extra_bits = (n_of_words * bits_per_word) - len;
let word_value = if value {
!<B::Word as PrimitiveAtomic>::Value::ZERO
} else {
<B::Word as PrimitiveAtomic>::Value::ZERO
};
let mut bits: Vec<B::Word> = (0..n_of_words).map(|_| B::Word::new(word_value)).collect();
if extra_bits > 0 {
let last_word_value = word_value >> extra_bits;
bits[n_of_words - 1] = B::Word::new(last_word_value);
}
Self {
bits: B::from(bits),
len,
}
}
}
impl<B> BitLength for AtomicBitVec<B> {
#[inline(always)]
fn len(&self) -> usize {
self.len
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>> BitCount
for AtomicBitVec<B>
{
fn count_ones(&self) -> usize {
let bits_per_word = <B::Word as PrimitiveAtomic>::Value::BITS as usize;
let full_words = self.len() / bits_per_word;
let residual = self.len() % bits_per_word;
let bits: &[B::Word] = self.as_ref();
let mut num_ones;
core::sync::atomic::fence(Ordering::SeqCst);
num_ones = bits[..full_words]
.iter()
.map(|x| x.load(Ordering::Relaxed).count_ones() as usize)
.sum();
if residual != 0 {
num_ones += (bits[full_words].load(Ordering::Relaxed) << (bits_per_word - residual))
.count_ones() as usize
}
num_ones
}
}
impl<B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>> Index<usize>
for AtomicBitVec<B>
{
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
match AtomicBitVecOps::<B::Word>::get(self, index, Ordering::Relaxed) {
false => &false,
true => &true,
}
}
}
impl<'a, B: Backend<Word: PrimitiveAtomicUnsigned<Value: Word>> + AsRef<[B::Word]>> IntoIterator
for &'a AtomicBitVec<B>
{
type IntoIter = AtomicBitIter<'a, B::Word, B>;
type Item = bool;
fn into_iter(self) -> Self::IntoIter {
AtomicBitIter::new(&self.bits, self.len())
}
}
impl<'a, W: AtomicPrimitive> TryFrom<BitVec<&'a [W]>> for AtomicBitVec<&'a [W::Atomic]> {
type Error = CannotCastToAtomicError<W>;
fn try_from(value: BitVec<&'a [W]>) -> Result<Self, Self::Error> {
if core::mem::align_of::<W>() != core::mem::align_of::<W::Atomic>() {
return Err(CannotCastToAtomicError::default());
}
Ok(AtomicBitVec {
bits: unsafe { core::mem::transmute::<&'a [W], &'a [W::Atomic]>(value.bits) },
len: value.len,
})
}
}
impl<'a, W: AtomicPrimitive> TryFrom<BitVec<&'a mut [W]>> for AtomicBitVec<&'a mut [W::Atomic]> {
type Error = CannotCastToAtomicError<W>;
fn try_from(value: BitVec<&'a mut [W]>) -> Result<Self, Self::Error> {
if core::mem::align_of::<W>() != core::mem::align_of::<W::Atomic>() {
return Err(CannotCastToAtomicError::default());
}
Ok(AtomicBitVec {
bits: unsafe { core::mem::transmute::<&'a mut [W], &'a mut [W::Atomic]>(value.bits) },
len: value.len,
})
}
}
impl<W: AtomicPrimitive> From<AtomicBitVec<Box<[W::Atomic]>>> for BitVec<Vec<W>> {
fn from(value: AtomicBitVec<Box<[W::Atomic]>>) -> Self {
BitVec {
bits: transmute_vec_from_atomic::<W::Atomic>(value.bits.into_vec()),
len: value.len,
}
}
}
impl<W: AtomicPrimitive> From<BitVec<Vec<W>>> for AtomicBitVec<Box<[W::Atomic]>> {
fn from(value: BitVec<Vec<W>>) -> Self {
AtomicBitVec {
bits: transmute_vec_into_atomic(value.bits).into_boxed_slice(),
len: value.len,
}
}
}
impl<W: AtomicPrimitive> From<AtomicBitVec<Box<[W::Atomic]>>> for BitVec<Box<[W]>> {
fn from(value: AtomicBitVec<Box<[W::Atomic]>>) -> Self {
BitVec {
bits: transmute_boxed_slice_from_atomic::<W::Atomic>(value.bits),
len: value.len,
}
}
}
impl<W: AtomicPrimitive + Copy> From<BitVec<Box<[W]>>> for AtomicBitVec<Box<[W::Atomic]>> {
fn from(value: BitVec<Box<[W]>>) -> Self {
AtomicBitVec {
bits: transmute_boxed_slice_into_atomic::<W>(value.bits),
len: value.len,
}
}
}
impl<'a, W: AtomicPrimitive> From<AtomicBitVec<&'a [W::Atomic]>> for BitVec<&'a [W]> {
fn from(value: AtomicBitVec<&'a [W::Atomic]>) -> Self {
BitVec {
bits: unsafe { core::mem::transmute::<&'a [W::Atomic], &'a [W]>(value.bits) },
len: value.len,
}
}
}
impl<'a, W: AtomicPrimitive> From<AtomicBitVec<&'a mut [W::Atomic]>> for BitVec<&'a mut [W]> {
fn from(value: AtomicBitVec<&'a mut [W::Atomic]>) -> Self {
BitVec {
bits: unsafe { core::mem::transmute::<&'a mut [W::Atomic], &'a mut [W]>(value.bits) },
len: value.len,
}
}
}
impl<W> From<BitVec<Vec<W>>> for BitVec<Box<[W]>> {
fn from(value: BitVec<Vec<W>>) -> Self {
BitVec {
bits: value.bits.into_boxed_slice(),
len: value.len,
}
}
}
impl<W> From<BitVec<Box<[W]>>> for BitVec<Vec<W>> {
fn from(value: BitVec<Box<[W]>>) -> Self {
BitVec {
bits: value.bits.into_vec(),
len: value.len,
}
}
}
impl<W: Word, B: AsRef<[W]>> AsRef<[W]> for BitVec<B> {
#[inline(always)]
fn as_ref(&self) -> &[W] {
self.bits.as_ref()
}
}
impl<W: Word, B: AsMut<[W]>> AsMut<[W]> for BitVec<B> {
#[inline(always)]
fn as_mut(&mut self) -> &mut [W] {
self.bits.as_mut()
}
}
impl<B: Backend + AsRef<[B::Word]>> AsRef<[B::Word]> for AtomicBitVec<B> {
#[inline(always)]
fn as_ref(&self) -> &[B::Word] {
self.bits.as_ref()
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> RankHinted for BitVec<B> {
#[inline(always)]
unsafe fn rank_hinted<const WORDS_PER_SUBBLOCK: usize>(
&self,
pos: usize,
hint_pos: usize,
hint_rank: usize,
) -> usize {
let bits_per_word = B::Word::BITS as usize;
let bits: &[B::Word] = self.as_ref();
let mut rank = hint_rank;
let mut hp = hint_pos;
debug_assert!(hp < bits.len(), "hint_pos: {}, len: {}", hp, bits.len());
if WORDS_PER_SUBBLOCK * std::mem::size_of::<B::Word>() > 64 {
crate::utils::prefetch_index(bits, pos / bits_per_word);
}
if WORDS_PER_SUBBLOCK == usize::MAX {
while (hp + 1) * bits_per_word <= pos {
rank += unsafe { bits.get_unchecked(hp) }.count_ones() as usize;
hp += 1;
}
} else {
for _ in 0..WORDS_PER_SUBBLOCK - 1 {
if (hp + 1) * bits_per_word > pos {
break;
}
rank += unsafe { bits.get_unchecked(hp) }.count_ones() as usize;
hp += 1;
}
}
rank + (unsafe { *bits.get_unchecked(hp) }
& (B::Word::ONE << (pos % bits_per_word) as u32).wrapping_sub(B::Word::ONE))
.count_ones() as usize
}
}
impl<B: Backend<Word: Word + SelectInWord> + AsRef<[B::Word]>> SelectHinted for BitVec<B> {
#[inline(always)]
unsafe fn select_hinted<const WORDS_PER_SUBBLOCK: usize>(
&self,
rank: usize,
hint_pos: usize,
hint_rank: usize,
) -> usize {
let bits_per_word = B::Word::BITS as usize;
let bits: &[B::Word] = self.as_ref();
let mut word_index = hint_pos / bits_per_word;
let bit_index = hint_pos % bits_per_word;
let mut residual = rank - hint_rank;
let mut word = (unsafe { *bits.get_unchecked(word_index) } >> bit_index) << bit_index;
let limit = if WORDS_PER_SUBBLOCK == usize::MAX {
usize::MAX
} else {
WORDS_PER_SUBBLOCK
};
for _ in 0..limit {
let bit_count = word.count_ones() as usize;
if residual < bit_count {
return word_index * bits_per_word + word.select_in_word(residual);
}
word_index += 1;
word = *unsafe { bits.get_unchecked(word_index) };
residual -= bit_count;
}
unreachable!()
}
}
impl<B: Backend<Word: Word + SelectInWord> + AsRef<[B::Word]>> SelectZeroHinted for BitVec<B> {
#[inline(always)]
unsafe fn select_zero_hinted<const WORDS_PER_SUBBLOCK: usize>(
&self,
rank: usize,
hint_pos: usize,
hint_rank: usize,
) -> usize {
let bits_per_word = B::Word::BITS as usize;
let bits: &[B::Word] = self.as_ref();
let mut word_index = hint_pos / bits_per_word;
let bit_index = hint_pos % bits_per_word;
let mut residual = rank - hint_rank;
let mut word = (!unsafe { *bits.get_unchecked(word_index) } >> bit_index) << bit_index;
let limit = if WORDS_PER_SUBBLOCK == usize::MAX {
usize::MAX
} else {
WORDS_PER_SUBBLOCK
};
for _ in 0..limit {
let bit_count = word.count_ones() as usize;
if residual < bit_count {
return word_index * bits_per_word + word.select_in_word(residual);
}
word_index += 1;
word = unsafe { !*bits.get_unchecked(word_index) };
residual -= bit_count;
}
unreachable!()
}
}
#[derive(Debug, Clone, MemSize, MemDbg, Delegate)]
#[cfg_attr(feature = "epserde", derive(epserde::Epserde))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[delegate(Index<usize>, target = "0")]
#[delegate(crate::traits::Backend, target = "0")]
#[delegate(crate::traits::bit_vec_ops::BitLength, target = "0")]
pub struct BitVecU<B>(BitVec<B>);
impl<W: Word> From<BitVecU<Box<[W]>>> for BitVec<Box<[W]>> {
fn from(unaligned: BitVecU<Box<[W]>>) -> Self {
unaligned.0
}
}
impl<W: Word> crate::traits::TryIntoUnaligned for BitVec<Box<[W]>> {
type Unaligned = BitVecU<Box<[W]>>;
fn try_into_unaligned(
self,
) -> Result<Self::Unaligned, crate::traits::UnalignedConversionError> {
let needed = self.len().div_ceil(W::BITS as usize);
if self.as_ref().len() > needed {
Ok(BitVecU(self))
} else {
let (raw, len) = self.into_raw_parts();
let mut v = raw.into_vec();
v.reserve_exact(1);
v.push(W::ZERO);
Ok(BitVecU(unsafe {
BitVec::from_raw_parts(v.into_boxed_slice(), len)
}))
}
}
}
impl<B: Backend<Word: Word> + AsRef<[B::Word]>> BitVecValueOps<B::Word> for BitVecU<B> {
#[inline(always)]
fn get_value(&self, pos: usize, width: usize) -> B::Word {
self.0.get_value_unaligned(pos, width)
}
#[inline(always)]
unsafe fn get_value_unchecked(&self, pos: usize, width: usize) -> B::Word {
unsafe { self.0.get_value_unaligned_unchecked(pos, width) }
}
}
impl<B: Backend + AsRef<[B::Word]>> AsRef<[B::Word]> for BitVecU<B> {
#[inline(always)]
fn as_ref(&self) -> &[B::Word] {
self.0.bits.as_ref()
}
}