use std::cmp::max;
use std::fmt;
use std::hash;
use std::iter::{DoubleEndedIterator, ExactSizeIterator, FromIterator};
use std::mem::{forget, replace, size_of};
use std::ops::{Range, Index};
use std::slice;
#[macro_export]
macro_rules! sbvec {
($elem:expr; $n:expr) => (
$crate::SmallBitVec::from_elem($n, $elem)
);
($($x:expr),*) => (
[$($x),*].iter().cloned().collect::<$crate::SmallBitVec>()
);
($($x:expr,)*) => (
sbvec![$($x),*]
);
}
#[cfg(test)]
mod tests;
pub struct SmallBitVec {
data: usize,
}
fn inline_bits() -> usize {
size_of::<usize>() * 8
}
fn inline_capacity() -> usize {
inline_bits() - 2
}
fn inline_shift(n: usize) -> usize {
debug_assert!(n <= inline_capacity());
inline_bits() - 1 - n
}
fn inline_index(n: usize) -> usize {
1 << inline_shift(n)
}
fn inline_ones(n: usize) -> usize {
if n == 0 {
0
} else {
!0 << (inline_bits() - n)
}
}
const HEAP_FLAG: usize = 1;
type Storage = usize;
#[inline(always)]
fn bits_per_storage() -> usize {
size_of::<Storage>() * 8
}
struct Header {
len: Storage,
buffer_len: Storage,
}
impl Header {
fn new(cap: usize, len: usize, val: bool) -> *mut Header {
let alloc_len = header_len() + buffer_len(cap);
let init = if val { !0 } else { 0 };
let v: Vec<Storage> = vec![init; alloc_len];
let buffer_len = v.capacity() - header_len();
let header_ptr = v.as_ptr() as *mut Header;
forget(v);
unsafe {
(*header_ptr).len = len;
(*header_ptr).buffer_len = buffer_len;
}
header_ptr
}
}
fn header_len() -> usize {
size_of::<Header>() / size_of::<Storage>()
}
fn buffer_len(cap: usize) -> usize {
(cap + bits_per_storage() - 1) / bits_per_storage()
}
impl SmallBitVec {
#[inline]
pub fn new() -> SmallBitVec {
SmallBitVec {
data: inline_index(0)
}
}
pub fn from_elem(len: usize, val: bool) -> SmallBitVec {
if len <= inline_capacity() {
return SmallBitVec {
data: if val {
inline_ones(len + 1)
} else {
inline_index(len)
}
}
}
let header_ptr = Header::new(len, len, val);
SmallBitVec {
data: (header_ptr as usize) | HEAP_FLAG
}
}
pub fn with_capacity(cap: usize) -> SmallBitVec {
if cap <= inline_capacity() {
return SmallBitVec::new()
}
let header_ptr = Header::new(cap, 0, false);
SmallBitVec {
data: (header_ptr as usize) | HEAP_FLAG
}
}
#[inline]
pub fn len(&self) -> usize {
if self.is_inline() {
inline_bits() - self.data.trailing_zeros() as usize - 1
} else {
self.header().len
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn capacity(&self) -> usize {
if self.is_inline() {
inline_capacity()
} else {
self.header().buffer_len * bits_per_storage()
}
}
#[inline]
pub fn get(&self, n: usize) -> Option<bool> {
if n < self.len() {
Some(unsafe { self.get_unchecked(n) })
} else {
None
}
}
pub unsafe fn get_unchecked(&self, n: usize) -> bool {
if self.is_inline() {
self.data & inline_index(n) != 0
} else {
let buffer = self.buffer();
let i = n / bits_per_storage();
let offset = n % bits_per_storage();
*buffer.get_unchecked(i) & (1 << offset) != 0
}
}
pub fn set(&mut self, n: usize, val: bool) {
assert!(n < self.len(), "Index {} out of bounds", n);
unsafe { self.set_unchecked(n, val); }
}
pub unsafe fn set_unchecked(&mut self, n: usize, val: bool) {
if self.is_inline() {
if val {
self.data |= inline_index(n);
} else {
self.data &= !inline_index(n);
}
} else {
let buffer = self.buffer_mut();
let i = n / bits_per_storage();
let offset = n % bits_per_storage();
if val {
*buffer.get_unchecked_mut(i) |= 1 << offset;
} else {
*buffer.get_unchecked_mut(i) &= !(1 << offset);
}
}
}
#[inline]
pub fn push(&mut self, val: bool) {
let idx = self.len();
if idx == self.capacity() {
self.reserve(1);
}
unsafe {
self.set_len(idx + 1);
self.set_unchecked(idx, val);
}
}
pub fn pop(&mut self) -> Option<bool> {
let old_len = self.len();
if old_len == 0 {
return None
}
unsafe {
let val = self.get_unchecked(old_len - 1);
self.set_len(old_len - 1);
Some(val)
}
}
pub fn remove(&mut self, idx: usize) -> bool {
let len = self.len();
let val = self[idx];
if self.is_inline() {
let mask = !inline_ones(idx);
let new_vals = (self.data & mask) << 1;
self.data = (self.data & !mask) | (new_vals & mask);
} else {
let first = idx / bits_per_storage();
let offset = idx % bits_per_storage();
let count = buffer_len(len);
{
let buf = self.buffer_mut();
let mask = !0 << offset;
let new_vals = (buf[first] & mask) >> 1;
buf[first] = (buf[first] & !mask) | (new_vals & mask);
}
for i in (first + 1)..count {
let bit_idx = i * bits_per_storage();
unsafe {
let first_bit = self.get_unchecked(bit_idx);
self.set_unchecked(bit_idx - 1, first_bit);
}
self.buffer_mut()[i] >>= 1;
}
unsafe {
self.set_len(len - 1);
}
}
val
}
pub fn clear(&mut self) {
unsafe {
self.set_len(0);
}
}
pub fn reserve(&mut self, additional: usize) {
let old_cap = self.capacity();
let new_cap = self.len().checked_add(additional).expect("capacity overflow");
if new_cap <= old_cap {
return
}
let double_cap = old_cap.saturating_mul(2);
self.reallocate(max(new_cap, double_cap));
}
unsafe fn set_len(&mut self, len: usize) {
debug_assert!(len <= self.capacity());
if self.is_inline() {
let sentinel = inline_index(len);
let mask = !(sentinel - 1);
self.data |= sentinel;
self.data &= mask;
} else {
self.header_mut().len = len;
}
}
pub fn iter(&self) -> Iter {
Iter { vec: self, range: 0..self.len() }
}
pub fn all_false(&self) -> bool {
let mut len = self.len();
if len == 0 {
return true
}
if self.is_inline() {
let mask = inline_ones(len);
self.data & mask == 0
} else {
for &storage in self.buffer() {
if len >= bits_per_storage() {
if storage != 0 {
return false
}
len -= bits_per_storage();
} else {
let mask = (1 << len) - 1;
if storage & mask != 0 {
return false
}
break
}
}
true
}
}
pub fn all_true(&self) -> bool {
let mut len = self.len();
if len == 0 {
return true
}
if self.is_inline() {
let mask = inline_ones(len);
self.data & mask == mask
} else {
for &storage in self.buffer() {
if len >= bits_per_storage() {
if storage != !0 {
return false
}
len -= bits_per_storage();
} else {
let mask = (1 << len) - 1;
if storage & mask != mask {
return false
}
break
}
}
true
}
}
fn reallocate(&mut self, cap: usize) {
let old_cap = self.capacity();
if cap <= old_cap {
return
}
assert!(self.len() <= cap);
if self.is_heap() {
let old_buffer_len = self.header().buffer_len;
let new_buffer_len = buffer_len(cap);
let old_alloc_len = header_len() + old_buffer_len;
let new_alloc_len = header_len() + new_buffer_len;
let old_ptr = self.header_raw() as *mut Storage;
let mut v = unsafe {
Vec::from_raw_parts(old_ptr, old_alloc_len, old_alloc_len)
};
v.resize(new_alloc_len, 0);
v.shrink_to_fit();
self.data = v.as_ptr() as usize | HEAP_FLAG;
forget(v);
self.header_mut().buffer_len = new_buffer_len;
} else {
let old_self = replace(self, SmallBitVec::with_capacity(cap));
unsafe {
self.set_len(old_self.len());
for i in 0..old_self.len() {
self.set_unchecked(i, old_self.get_unchecked(i));
}
}
}
}
pub fn heap_ptr(&self) -> Option<*const usize> {
match self.is_heap() {
true => Some((self.data & !HEAP_FLAG) as *const Storage),
false => None
}
}
fn is_inline(&self) -> bool {
self.data & HEAP_FLAG == 0
}
fn is_heap(&self) -> bool {
!self.is_inline()
}
fn header_raw(&self) -> *mut Header {
assert!(self.is_heap());
(self.data & !HEAP_FLAG) as *mut Header
}
fn header_mut(&mut self) -> &mut Header {
unsafe { &mut *self.header_raw() }
}
fn header(&self) -> &Header {
unsafe { &*self.header_raw() }
}
fn buffer_raw(&self) -> *mut [Storage] {
unsafe {
let header_ptr = self.header_raw();
let buffer_len = (*header_ptr).buffer_len;
let buffer_ptr = (header_ptr as *mut Storage)
.offset((size_of::<Header>() / size_of::<Storage>()) as isize);
slice::from_raw_parts_mut(buffer_ptr, buffer_len)
}
}
fn buffer_mut(&mut self) -> &mut [Storage] {
unsafe { &mut *self.buffer_raw() }
}
fn buffer(&self) -> &[Storage] {
unsafe { &*self.buffer_raw() }
}
}
impl fmt::Debug for SmallBitVec {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter().map(|b| b as u8)).finish()
}
}
impl PartialEq for SmallBitVec {
fn eq(&self, other: &Self) -> bool {
if self.is_inline() && other.is_inline() {
return self.data == other.data
}
let len = self.len();
if len != other.len() {
return false
}
if self.is_heap() && other.is_heap() {
let buf0 = self.buffer();
let buf1 = other.buffer();
let full_blocks = len / bits_per_storage();
let remainder = len % bits_per_storage();
if buf0[..full_blocks] != buf1[..full_blocks] {
return false
}
if remainder != 0 {
let mask = (1 << remainder) - 1;
if buf0[full_blocks] & mask != buf1[full_blocks] & mask {
return false
}
}
return true
}
Iterator::eq(self.iter(), other.iter())
}
}
impl Eq for SmallBitVec {}
impl Drop for SmallBitVec {
fn drop(&mut self) {
if self.is_heap() {
unsafe {
let header_ptr = self.header_raw();
let alloc_ptr = header_ptr as *mut Storage;
let alloc_len = header_len() + (*header_ptr).buffer_len;
Vec::from_raw_parts(alloc_ptr, alloc_len, alloc_len);
}
}
}
}
impl Clone for SmallBitVec {
fn clone(&self) -> Self {
if self.is_inline() {
return SmallBitVec { data: self.data }
}
let buffer_len = self.header().buffer_len;
let alloc_len = header_len() + buffer_len;
let ptr = self.header_raw() as *mut Storage;
let raw_allocation = unsafe {
slice::from_raw_parts(ptr, alloc_len)
};
let v = raw_allocation.to_vec();
let header_ptr = v.as_ptr() as *mut Header;
forget(v);
SmallBitVec {
data: (header_ptr as usize) | HEAP_FLAG
}
}
}
impl Index<usize> for SmallBitVec {
type Output = bool;
#[inline]
fn index(&self, i: usize) -> &bool {
assert!(i < self.len(), "index out of range");
if self.get(i).unwrap() {
&true
} else {
&false
}
}
}
impl hash::Hash for SmallBitVec {
#[inline]
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.len().hash(state);
for b in self.iter() {
b.hash(state);
}
}
}
impl Extend<bool> for SmallBitVec {
#[inline]
fn extend<I: IntoIterator<Item=bool>>(&mut self, iter: I) {
let iter = iter.into_iter();
let (min, _) = iter.size_hint();
assert!(min <= usize::max_value(), "capacity overflow");
self.reserve(min);
for element in iter {
self.push(element)
}
}
}
impl FromIterator<bool> for SmallBitVec {
#[inline]
fn from_iter<I: IntoIterator<Item=bool>>(iter: I) -> Self {
let mut v = SmallBitVec::new();
v.extend(iter);
v
}
}
impl IntoIterator for SmallBitVec {
type Item = bool;
type IntoIter = IntoIter;
#[inline]
fn into_iter(self) -> IntoIter {
IntoIter { range: 0..self.len(), vec: self }
}
}
impl<'a> IntoIterator for &'a SmallBitVec {
type Item = bool;
type IntoIter = Iter<'a>;
#[inline]
fn into_iter(self) -> Iter<'a> {
self.iter()
}
}
pub struct IntoIter {
vec: SmallBitVec,
range: Range<usize>,
}
impl Iterator for IntoIter {
type Item = bool;
#[inline]
fn next(&mut self) -> Option<bool> {
self.range.next().map(|i| unsafe { self.vec.get_unchecked(i) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.range.size_hint()
}
}
impl DoubleEndedIterator for IntoIter {
#[inline]
fn next_back(&mut self) -> Option<bool> {
self.range.next_back().map(|i| unsafe { self.vec.get_unchecked(i) })
}
}
impl ExactSizeIterator for IntoIter {}
pub struct Iter<'a> {
vec: &'a SmallBitVec,
range: Range<usize>,
}
impl<'a> Iterator for Iter<'a> {
type Item = bool;
#[inline]
fn next(&mut self) -> Option<bool> {
self.range.next().map(|i| unsafe { self.vec.get_unchecked(i) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.range.size_hint()
}
}
impl<'a> DoubleEndedIterator for Iter<'a> {
#[inline]
fn next_back(&mut self) -> Option<bool> {
self.range.next_back().map(|i| unsafe { self.vec.get_unchecked(i) })
}
}
impl<'a> ExactSizeIterator for Iter<'a> {}