use crate::FastVec;
use crate::error::{Result, ZiporaError};
use std::fmt;
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
use std::arch::x86_64::{
__m256i, _mm256_and_si256, _mm256_loadu_si256, _mm256_or_si256, _mm256_storeu_si256,
_mm256_xor_si256,
};
pub struct BitVector {
blocks: FastVec<u64>,
len: usize,
}
const BITS_PER_BLOCK: usize = 64;
impl BitVector {
#[inline]
pub fn new() -> Self {
Self {
blocks: FastVec::new(),
len: 0,
}
}
pub fn with_capacity(capacity: usize) -> Result<Self> {
let block_capacity = (capacity + BITS_PER_BLOCK - 1) / BITS_PER_BLOCK;
Ok(Self {
blocks: FastVec::with_capacity(block_capacity)?,
len: 0,
})
}
pub fn with_size(size: usize, value: bool) -> Result<Self> {
let mut bv = Self::with_capacity(size)?;
bv.resize(size, value)?;
Ok(bv)
}
pub fn from_raw_bits(raw_bits: Vec<u64>, total_bits: usize) -> Result<Self> {
let required_blocks = (total_bits + BITS_PER_BLOCK - 1) / BITS_PER_BLOCK;
if raw_bits.len() < required_blocks {
return Err(ZiporaError::invalid_data(format!(
"Insufficient raw bits: need {} blocks for {} bits, got {}",
required_blocks, total_bits, raw_bits.len()
)));
}
let mut blocks = FastVec::with_capacity(required_blocks)?;
for (i, &word) in raw_bits.iter().enumerate() {
if i < required_blocks {
blocks.push(word)?;
}
}
if total_bits > 0 {
let last_block_idx = required_blocks - 1;
let bits_in_last_block = total_bits % BITS_PER_BLOCK;
if bits_in_last_block > 0 {
let mask = (1u64 << bits_in_last_block) - 1;
blocks[last_block_idx] &= mask;
}
}
Ok(Self {
blocks,
len: total_bits,
})
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn capacity(&self) -> usize {
self.blocks.capacity() * BITS_PER_BLOCK
}
#[inline]
pub fn get(&self, index: usize) -> Option<bool> {
if index >= self.len {
return None;
}
let block_index = index / BITS_PER_BLOCK;
let bit_index = index % BITS_PER_BLOCK;
Some((self.blocks[block_index] >> bit_index) & 1 == 1)
}
#[inline]
pub unsafe fn get_unchecked(&self, index: usize) -> bool {
debug_assert!(index < self.len);
let block_index = index / BITS_PER_BLOCK;
let bit_index = index % BITS_PER_BLOCK;
(unsafe { self.blocks.get_unchecked(block_index) } >> bit_index) & 1 == 1
}
pub fn set(&mut self, index: usize, value: bool) -> Result<()> {
if index >= self.len {
return Err(ZiporaError::out_of_bounds(index, self.len));
}
let block_index = index / BITS_PER_BLOCK;
let bit_index = index % BITS_PER_BLOCK;
if value {
self.blocks[block_index] |= 1u64 << bit_index;
} else {
self.blocks[block_index] &= !(1u64 << bit_index);
}
Ok(())
}
#[inline]
pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
debug_assert!(index < self.len);
let block_index = index / BITS_PER_BLOCK;
let bit_index = index % BITS_PER_BLOCK;
if value {
unsafe {
*self.blocks.get_unchecked_mut(block_index) |= 1u64 << bit_index;
}
} else {
unsafe {
*self.blocks.get_unchecked_mut(block_index) &= !(1u64 << bit_index);
}
}
}
pub fn push(&mut self, value: bool) -> Result<()> {
let block_index = self.len / BITS_PER_BLOCK;
let bit_index = self.len % BITS_PER_BLOCK;
while self.blocks.len() <= block_index {
self.blocks.push(0)?;
}
if value {
self.blocks[block_index] |= 1u64 << bit_index;
} else {
self.blocks[block_index] &= !(1u64 << bit_index);
}
self.len += 1;
Ok(())
}
pub fn ensure_set1(&mut self, i: usize) -> Result<()> {
if i < self.len {
let block_index = i / BITS_PER_BLOCK;
let bit_index = i % BITS_PER_BLOCK;
self.blocks[block_index] |= 1u64 << bit_index;
} else {
self.ensure_set1_slow_path(i)?;
}
Ok(())
}
#[cold]
#[inline(never)]
fn ensure_set1_slow_path(&mut self, i: usize) -> Result<()> {
let new_len = i + 1;
let new_block_count = (new_len + BITS_PER_BLOCK - 1) / BITS_PER_BLOCK;
while self.blocks.len() < new_block_count {
self.blocks.push(0)?;
}
self.len = new_len;
let block_index = i / BITS_PER_BLOCK;
let bit_index = i % BITS_PER_BLOCK;
self.blocks[block_index] |= 1u64 << bit_index;
Ok(())
}
pub fn fast_ensure_set1(&mut self, i: usize) -> Result<()> {
let capacity = self.blocks.len() * BITS_PER_BLOCK;
if i < capacity {
if self.len <= i {
debug_assert!(
(self.len..=i).all(|j| {
let block_idx = j / BITS_PER_BLOCK;
let bit_idx = j % BITS_PER_BLOCK;
block_idx >= self.blocks.len() || (self.blocks[block_idx] >> bit_idx) & 1 == 0
}),
"fast_ensure_set1 invariant violated: unused bits must be 0"
);
self.len = i + 1;
}
let block_index = i / BITS_PER_BLOCK;
let bit_index = i % BITS_PER_BLOCK;
self.blocks[block_index] |= 1u64 << bit_index;
Ok(())
} else {
self.fast_ensure_set1_slow_path(i)
}
}
#[cold]
#[inline(never)]
fn fast_ensure_set1_slow_path(&mut self, i: usize) -> Result<()> {
#[cfg(debug_assertions)]
{
let capacity = self.blocks.len() * BITS_PER_BLOCK;
for j in self.len..capacity {
let block_idx = j / BITS_PER_BLOCK;
let bit_idx = j % BITS_PER_BLOCK;
if block_idx < self.blocks.len() {
debug_assert!(
(self.blocks[block_idx] >> bit_idx) & 1 == 0,
"fast_ensure_set1 invariant violated at bit {}: unused bits must be 0",
j
);
}
}
}
let old_capacity = self.blocks.len() * BITS_PER_BLOCK;
let new_len = i + 1;
let new_block_count = (new_len + BITS_PER_BLOCK - 1) / BITS_PER_BLOCK;
while self.blocks.len() < new_block_count {
self.blocks.push(0)?;
}
let new_capacity = self.blocks.len() * BITS_PER_BLOCK;
if old_capacity < new_capacity {
}
self.len = new_len;
let block_index = i / BITS_PER_BLOCK;
let bit_index = i % BITS_PER_BLOCK;
self.blocks[block_index] |= 1u64 << bit_index;
Ok(())
}
pub fn insert(&mut self, index: usize, value: bool) -> Result<()> {
if index > self.len {
return Err(ZiporaError::out_of_bounds(index, self.len));
}
self.push(false)?;
for i in (index + 1..self.len).rev() {
let bit = self.get(i - 1).unwrap_or(false);
self.set(i, bit)?;
}
self.set(index, value)?;
Ok(())
}
pub fn get_mut(&mut self, index: usize) -> Option<BitRef<'_>> {
if index >= self.len {
return None;
}
Some(BitRef {
bit_vector: self,
index,
})
}
pub fn pop(&mut self) -> Option<bool> {
if self.len == 0 {
return None;
}
self.len -= 1;
let block_index = self.len / BITS_PER_BLOCK;
let bit_index = self.len % BITS_PER_BLOCK;
let value = (self.blocks[block_index] >> bit_index) & 1 == 1;
self.blocks[block_index] &= !(1u64 << bit_index);
Some(value)
}
pub fn resize(&mut self, new_len: usize, value: bool) -> Result<()> {
if new_len > self.len {
for _ in self.len..new_len {
self.push(value)?;
}
} else if new_len < self.len {
self.len = new_len;
if new_len > 0 {
let last_block_index = (new_len - 1) / BITS_PER_BLOCK;
let bits_in_last_block = new_len % BITS_PER_BLOCK;
if bits_in_last_block > 0 {
let mask = (1u64 << bits_in_last_block) - 1;
self.blocks[last_block_index] &= mask;
}
}
}
Ok(())
}
pub fn clear(&mut self) {
self.blocks.clear();
self.len = 0;
}
pub fn count_ones(&self) -> usize {
let mut count = 0;
let complete_blocks = self.len / BITS_PER_BLOCK;
for i in 0..complete_blocks {
count += self.blocks[i].count_ones() as usize;
}
let remaining_bits = self.len % BITS_PER_BLOCK;
if remaining_bits > 0 && complete_blocks < self.blocks.len() {
let mask = (1u64 << remaining_bits) - 1;
count += (self.blocks[complete_blocks] & mask).count_ones() as usize;
}
count
}
#[inline]
pub fn count_zeros(&self) -> usize {
self.len - self.count_ones()
}
pub fn rank1(&self, pos: usize) -> usize {
if pos == 0 {
return 0;
}
let pos = pos.min(self.len);
let mut count = 0;
let complete_blocks = pos / BITS_PER_BLOCK;
for i in 0..complete_blocks {
count += self.blocks[i].count_ones() as usize;
}
let remaining_bits = pos % BITS_PER_BLOCK;
if remaining_bits > 0 && complete_blocks < self.blocks.len() {
let mask = (1u64 << remaining_bits) - 1;
count += (self.blocks[complete_blocks] & mask).count_ones() as usize;
}
count
}
#[inline]
pub fn rank0(&self, pos: usize) -> usize {
if pos == 0 {
return 0;
}
let pos = pos.min(self.len);
pos - self.rank1(pos)
}
#[inline]
pub fn blocks(&self) -> &[u64] {
self.blocks.as_slice()
}
pub fn reserve(&mut self, additional: usize) -> Result<()> {
let new_len = self.len + additional;
let new_block_count = (new_len + BITS_PER_BLOCK - 1) / BITS_PER_BLOCK;
let additional_blocks = new_block_count.saturating_sub(self.blocks.len());
if additional_blocks > 0 {
self.blocks.reserve(additional_blocks)?;
}
Ok(())
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
pub fn rank1_bulk_simd(&self, positions: &[usize]) -> Vec<usize> {
let mut results = Vec::with_capacity(positions.len());
if is_x86_feature_detected!("avx2") {
self.rank1_bulk_simd_avx2(positions, &mut results);
} else {
for &pos in positions {
results.push(self.rank1(pos));
}
}
results
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
fn rank1_bulk_simd_avx2(&self, positions: &[usize], results: &mut Vec<usize>) {
let chunk_size = 4;
for chunk in positions.chunks(chunk_size) {
unsafe {
let mut pos_array = [0u64; 4];
for (i, &pos) in chunk.iter().enumerate() {
pos_array[i] = pos as u64;
}
let _positions_vec = _mm256_loadu_si256(pos_array.as_ptr() as *const __m256i);
for &pos in chunk {
results.push(self.rank1(pos));
}
}
}
}
#[cfg(not(all(target_arch = "x86_64", feature = "simd")))]
pub fn rank1_bulk_simd(&self, positions: &[usize]) -> Vec<usize> {
positions.iter().map(|&pos| self.rank1(pos)).collect()
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
pub fn set_range_simd(&mut self, start: usize, end: usize, value: bool) -> Result<()> {
if start > end || end > self.len {
return Err(ZiporaError::out_of_bounds(end, self.len));
}
if is_x86_feature_detected!("avx2") {
self.set_range_simd_avx2(start, end, value)?;
} else {
for i in start..end {
self.set(i, value)?;
}
}
Ok(())
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
fn set_range_simd_avx2(&mut self, start: usize, end: usize, value: bool) -> Result<()> {
let start_block = start / BITS_PER_BLOCK;
let end_block = (end - 1) / BITS_PER_BLOCK;
for block_idx in start_block..=end_block {
if block_idx >= self.blocks.len() {
break;
}
let block_start = block_idx * BITS_PER_BLOCK;
let block_end = ((block_idx + 1) * BITS_PER_BLOCK).min(end);
let range_start = start.max(block_start);
let range_end = end.min(block_end);
if range_start >= range_end {
continue;
}
let start_bit = range_start - block_start;
let end_bit = range_end - block_start;
let mask = if end_bit >= BITS_PER_BLOCK {
!((1u64 << start_bit) - 1)
} else {
((1u64 << end_bit) - 1) & !((1u64 << start_bit) - 1)
};
if value {
self.blocks[block_idx] |= mask;
} else {
self.blocks[block_idx] &= !mask;
}
}
Ok(())
}
#[cfg(not(all(target_arch = "x86_64", feature = "simd")))]
pub fn set_range_simd(&mut self, start: usize, end: usize, value: bool) -> Result<()> {
if start > end || end > self.len {
return Err(ZiporaError::out_of_bounds(end, self.len));
}
for i in start..end {
self.set(i, value)?;
}
Ok(())
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
pub fn bulk_bitwise_op_simd(
&mut self,
other: &BitVector,
op: BitwiseOp,
start: usize,
end: usize,
) -> Result<()> {
if start > end || end > self.len || end > other.len {
return Err(ZiporaError::invalid_data(
"Invalid range for bulk operation".to_string(),
));
}
if is_x86_feature_detected!("avx2") {
self.bulk_bitwise_op_simd_avx2(other, op, start, end)?;
} else {
for i in start..end {
let other_bit = other.get(i).unwrap_or(false);
let self_bit = self.get(i).unwrap_or(false);
let result = match op {
BitwiseOp::And => self_bit & other_bit,
BitwiseOp::Or => self_bit | other_bit,
BitwiseOp::Xor => self_bit ^ other_bit,
};
self.set(i, result)?;
}
}
Ok(())
}
#[cfg(all(target_arch = "x86_64", feature = "simd"))]
fn bulk_bitwise_op_simd_avx2(
&mut self,
other: &BitVector,
op: BitwiseOp,
start: usize,
end: usize,
) -> Result<()> {
let start_block = start / BITS_PER_BLOCK;
let end_block = (end - 1) / BITS_PER_BLOCK;
let avx2_blocks = 4;
let mut block_idx = start_block;
unsafe {
while block_idx + avx2_blocks <= end_block + 1
&& block_idx + avx2_blocks <= self.blocks.len()
&& block_idx + avx2_blocks <= other.blocks.len()
{
let self_ptr = self.blocks.as_ptr().add(block_idx) as *const __m256i;
let other_ptr = other.blocks.as_ptr().add(block_idx) as *const __m256i;
let result_ptr = self.blocks.as_mut_ptr().add(block_idx) as *mut __m256i;
let self_vec = _mm256_loadu_si256(self_ptr);
let other_vec = _mm256_loadu_si256(other_ptr);
let result_vec = match op {
BitwiseOp::And => _mm256_and_si256(self_vec, other_vec),
BitwiseOp::Or => _mm256_or_si256(self_vec, other_vec),
BitwiseOp::Xor => _mm256_xor_si256(self_vec, other_vec),
};
_mm256_storeu_si256(result_ptr, result_vec);
block_idx += avx2_blocks;
}
}
while block_idx <= end_block
&& block_idx < self.blocks.len()
&& block_idx < other.blocks.len()
{
match op {
BitwiseOp::And => self.blocks[block_idx] &= other.blocks[block_idx],
BitwiseOp::Or => self.blocks[block_idx] |= other.blocks[block_idx],
BitwiseOp::Xor => self.blocks[block_idx] ^= other.blocks[block_idx],
}
block_idx += 1;
}
Ok(())
}
#[cfg(not(all(target_arch = "x86_64", feature = "simd")))]
pub fn bulk_bitwise_op_simd(
&mut self,
other: &BitVector,
op: BitwiseOp,
start: usize,
end: usize,
) -> Result<()> {
if start > end || end > self.len || end > other.len {
return Err(ZiporaError::invalid_data(
"Invalid range for bulk operation".to_string(),
));
}
for i in start..end {
let other_bit = other.get(i).unwrap_or(false);
let self_bit = self.get(i).unwrap_or(false);
let result = match op {
BitwiseOp::And => self_bit & other_bit,
BitwiseOp::Or => self_bit | other_bit,
BitwiseOp::Xor => self_bit ^ other_bit,
};
self.set(i, result)?;
}
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BitwiseOp {
And,
Or,
Xor,
}
pub struct BitRef<'a> {
bit_vector: &'a mut BitVector,
index: usize,
}
impl<'a> BitRef<'a> {
#[inline]
pub fn get(&self) -> bool {
self.bit_vector.get(self.index).unwrap_or(false)
}
#[inline]
pub fn set(&mut self, value: bool) -> Result<()> {
self.bit_vector.set(self.index, value)
}
}
impl<'a> std::ops::Deref for BitRef<'a> {
type Target = bool;
fn deref(&self) -> &Self::Target {
if self.get() { &true } else { &false }
}
}
impl Default for BitVector {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for BitVector {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "BitVector {{ len: {}, bits: [", self.len)?;
for i in 0..self.len.min(64) {
write!(f, "{}", if self.get(i).expect("index within bitvector length") { '1' } else { '0' })?;
}
if self.len > 64 {
write!(f, "...")?;
}
write!(f, "] }}")
}
}
impl Clone for BitVector {
fn clone(&self) -> Self {
Self {
blocks: self.blocks.clone(),
len: self.len,
}
}
}
impl PartialEq for BitVector {
fn eq(&self, other: &Self) -> bool {
if self.len != other.len {
return false;
}
let complete_blocks = self.len / BITS_PER_BLOCK;
for i in 0..complete_blocks {
if self.blocks[i] != other.blocks[i] {
return false;
}
}
let remaining_bits = self.len % BITS_PER_BLOCK;
if remaining_bits > 0
&& complete_blocks < self.blocks.len()
&& complete_blocks < other.blocks.len()
{
let mask = (1u64 << remaining_bits) - 1;
if (self.blocks[complete_blocks] & mask) != (other.blocks[complete_blocks] & mask) {
return false;
}
}
true
}
}
impl Eq for BitVector {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let bv = BitVector::new();
assert_eq!(bv.len(), 0);
assert!(bv.is_empty());
}
#[test]
fn test_push_pop() {
let mut bv = BitVector::new();
bv.push(true).unwrap();
bv.push(false).unwrap();
bv.push(true).unwrap();
assert_eq!(bv.len(), 3);
assert_eq!(bv.get(0), Some(true));
assert_eq!(bv.get(1), Some(false));
assert_eq!(bv.get(2), Some(true));
assert_eq!(bv.pop(), Some(true));
assert_eq!(bv.pop(), Some(false));
assert_eq!(bv.len(), 1);
}
#[test]
fn test_set_get() {
let mut bv = BitVector::with_size(10, false).unwrap();
bv.set(0, true).unwrap();
bv.set(5, true).unwrap();
bv.set(9, true).unwrap();
assert_eq!(bv.get(0), Some(true));
assert_eq!(bv.get(1), Some(false));
assert_eq!(bv.get(5), Some(true));
assert_eq!(bv.get(9), Some(true));
assert_eq!(bv.get(10), None);
}
#[test]
fn test_count_operations() {
let mut bv = BitVector::with_size(10, false).unwrap();
bv.set(0, true).unwrap();
bv.set(3, true).unwrap();
bv.set(7, true).unwrap();
assert_eq!(bv.count_ones(), 3);
assert_eq!(bv.count_zeros(), 7);
}
#[test]
fn test_rank_operations() {
let mut bv = BitVector::with_size(10, false).unwrap();
bv.set(1, true).unwrap();
bv.set(3, true).unwrap();
bv.set(7, true).unwrap();
assert_eq!(bv.rank1(0), 0);
assert_eq!(bv.rank1(2), 1);
assert_eq!(bv.rank1(4), 2);
assert_eq!(bv.rank1(8), 3);
assert_eq!(bv.rank1(10), 3);
assert_eq!(bv.rank0(2), 1);
assert_eq!(bv.rank0(4), 2);
assert_eq!(bv.rank0(8), 5);
}
#[test]
fn test_large_bitvector() {
let mut bv = BitVector::new();
for i in 0..200 {
bv.push(i % 3 == 0).unwrap();
}
assert_eq!(bv.len(), 200);
let mut expected_ones = 0;
for i in 0..200 {
if i % 3 == 0 {
expected_ones += 1;
assert_eq!(bv.get(i), Some(true));
} else {
assert_eq!(bv.get(i), Some(false));
}
}
assert_eq!(bv.count_ones(), expected_ones);
}
#[test]
fn test_resize() {
let mut bv = BitVector::new();
bv.resize(5, true).unwrap();
assert_eq!(bv.len(), 5);
assert_eq!(bv.count_ones(), 5);
bv.resize(3, false).unwrap();
assert_eq!(bv.len(), 3);
assert_eq!(bv.count_ones(), 3);
bv.resize(8, false).unwrap();
assert_eq!(bv.len(), 8);
assert_eq!(bv.count_ones(), 3);
}
#[test]
fn test_equality() {
let mut bv1 = BitVector::new();
let mut bv2 = BitVector::new();
for &bit in &[true, false, true, true, false] {
bv1.push(bit).unwrap();
bv2.push(bit).unwrap();
}
assert_eq!(bv1, bv2);
bv2.set(2, false).unwrap();
assert_ne!(bv1, bv2);
}
#[test]
fn test_unsafe_operations() {
let mut bv = BitVector::new();
bv.push(true).unwrap();
bv.push(false).unwrap();
bv.push(true).unwrap();
unsafe {
assert_eq!(bv.get_unchecked(0), true);
assert_eq!(bv.get_unchecked(1), false);
assert_eq!(bv.get_unchecked(2), true);
bv.set_unchecked(1, true);
assert_eq!(bv.get_unchecked(1), true);
bv.set_unchecked(0, false);
assert_eq!(bv.get_unchecked(0), false);
}
}
#[test]
fn test_blocks_access() {
let mut bv = BitVector::new();
for i in 0..128 {
bv.push(i % 3 == 0).unwrap();
}
let blocks = bv.blocks();
assert!(blocks.len() >= 2);
let first_block = blocks[0];
let first_bit = first_block & 1;
assert_eq!(first_bit, 1); }
#[test]
fn test_reserve() {
let mut bv = BitVector::new();
bv.reserve(1000).unwrap();
assert!(bv.capacity() >= 1000);
for i in 0..1000 {
bv.push(i % 7 == 0).unwrap();
}
assert_eq!(bv.len(), 1000);
}
#[test]
fn test_partial_block_operations() {
let mut bv = BitVector::new();
for i in 0..64 {
bv.push(i % 2 == 0).unwrap();
}
assert_eq!(bv.len(), 64);
assert_eq!(bv.count_ones(), 32);
bv.push(true).unwrap();
bv.push(false).unwrap();
bv.push(true).unwrap();
assert_eq!(bv.len(), 67);
assert_eq!(bv.count_ones(), 34);
assert_eq!(bv.rank1(64), 32);
assert_eq!(bv.rank1(67), 34);
}
#[test]
fn test_edge_case_resize() {
let mut bv = BitVector::new();
bv.resize(0, true).unwrap();
assert_eq!(bv.len(), 0);
assert!(bv.is_empty());
bv.resize(10, true).unwrap();
assert_eq!(bv.len(), 10);
assert_eq!(bv.count_ones(), 10);
bv.resize(5, false).unwrap();
assert_eq!(bv.len(), 5);
assert_eq!(bv.count_ones(), 5);
bv.resize(15, false).unwrap();
assert_eq!(bv.len(), 15);
assert_eq!(bv.count_ones(), 5); }
#[test]
fn test_boundary_conditions() {
let mut bv = BitVector::new();
for i in 0..128 {
bv.push(i < 64).unwrap(); }
assert_eq!(bv.rank1(0), 0);
assert_eq!(bv.rank1(64), 64);
assert_eq!(bv.rank1(128), 64);
assert_eq!(bv.rank0(64), 0);
assert_eq!(bv.rank0(128), 64);
}
#[test]
fn test_error_conditions() {
let mut bv = BitVector::new();
bv.push(true).unwrap();
bv.push(false).unwrap();
assert!(bv.set(5, true).is_err());
assert!(bv.set(2, true).is_err());
assert!(bv.set(0, false).is_ok());
assert_eq!(bv.get(0), Some(false));
}
#[test]
fn test_clone_and_equality_edge_cases() {
let mut original = BitVector::new();
let cloned_empty = original.clone();
assert_eq!(original, cloned_empty);
for i in 0..130 {
original.push(i % 5 == 0).unwrap();
}
let cloned = original.clone();
assert_eq!(original, cloned);
let mut shorter = original.clone();
shorter.resize(100, false).unwrap();
assert_ne!(original, shorter);
let mut different = original.clone();
different.set(50, !different.get(50).unwrap()).unwrap();
assert_ne!(original, different);
}
#[test]
fn test_debug_output() {
let mut bv = BitVector::new();
for i in 0..10 {
bv.push(i % 2 == 0).unwrap();
}
let debug_str = format!("{:?}", bv);
assert!(debug_str.contains("BitVector"));
assert!(debug_str.contains("len: 10"));
assert!(debug_str.contains("1010101010"));
let mut long_bv = BitVector::new();
for i in 0..100 {
long_bv.push(i % 2 == 0).unwrap();
}
let long_debug = format!("{:?}", long_bv);
assert!(long_debug.contains("..."));
assert!(long_debug.contains("len: 100"));
}
#[test]
fn test_default() {
let bv = BitVector::default();
assert!(bv.is_empty());
assert_eq!(bv.len(), 0);
assert_eq!(bv.capacity(), 0);
}
#[test]
fn test_ensure_set1_basic() {
let mut bv = BitVector::new();
bv.ensure_set1(0).unwrap();
assert_eq!(bv.len(), 1);
assert_eq!(bv.get(0), Some(true));
bv.ensure_set1(5).unwrap();
assert_eq!(bv.len(), 6);
assert_eq!(bv.get(5), Some(true));
for i in 1..5 {
assert_eq!(bv.get(i), Some(false));
}
bv.ensure_set1(5).unwrap();
assert_eq!(bv.len(), 6);
assert_eq!(bv.get(5), Some(true));
}
#[test]
fn test_ensure_set1_sequential() {
let mut bv = BitVector::new();
for i in (0..10).step_by(2) {
bv.ensure_set1(i).unwrap();
}
assert_eq!(bv.len(), 9);
assert_eq!(bv.count_ones(), 5);
for i in 0..9 {
assert_eq!(bv.get(i), Some(i % 2 == 0));
}
}
#[test]
fn test_ensure_set1_across_blocks() {
let mut bv = BitVector::new();
bv.ensure_set1(0).unwrap();
bv.ensure_set1(63).unwrap();
bv.ensure_set1(64).unwrap();
bv.ensure_set1(127).unwrap();
bv.ensure_set1(128).unwrap();
assert_eq!(bv.len(), 129);
assert_eq!(bv.count_ones(), 5);
assert_eq!(bv.get(0), Some(true));
assert_eq!(bv.get(63), Some(true));
assert_eq!(bv.get(64), Some(true));
assert_eq!(bv.get(127), Some(true));
assert_eq!(bv.get(128), Some(true));
}
#[test]
fn test_fast_ensure_set1_sequential() {
let mut bv = BitVector::new();
for i in 0..100 {
bv.fast_ensure_set1(i).unwrap();
}
assert_eq!(bv.len(), 100);
assert_eq!(bv.count_ones(), 100); }
#[test]
fn test_fast_ensure_set1_sparse() {
let mut bv = BitVector::new();
for i in (0..1000).step_by(10) {
bv.fast_ensure_set1(i).unwrap();
}
assert_eq!(bv.len(), 991);
assert_eq!(bv.count_ones(), 100);
for i in (0..1000).step_by(10) {
if i < bv.len() {
assert_eq!(bv.get(i), Some(true), "bit {} should be set", i);
}
}
}
#[test]
fn test_ensure_set1_vs_fast_ensure_set1_equivalence() {
let indices: Vec<usize> = vec![0, 5, 10, 63, 64, 100, 127, 128, 200];
let mut bv1 = BitVector::new();
let mut bv2 = BitVector::new();
for &i in &indices {
bv1.ensure_set1(i).unwrap();
bv2.fast_ensure_set1(i).unwrap();
}
assert_eq!(bv1.len(), bv2.len());
assert_eq!(bv1.count_ones(), bv2.count_ones());
for i in 0..bv1.len() {
assert_eq!(bv1.get(i), bv2.get(i), "mismatch at bit {}", i);
}
}
#[test]
fn test_ensure_set1_integer_set_use_case() {
let integers = vec![3, 7, 15, 31, 63];
let mut bv = BitVector::new();
for &i in &integers {
bv.fast_ensure_set1(i).unwrap();
}
assert_eq!(bv.get(3), Some(true));
assert_eq!(bv.get(7), Some(true));
assert_eq!(bv.get(15), Some(true));
assert_eq!(bv.get(31), Some(true));
assert_eq!(bv.get(63), Some(true));
assert_eq!(bv.get(0), Some(false));
assert_eq!(bv.get(4), Some(false));
assert_eq!(bv.get(8), Some(false));
}
}