use ::core::{
fmt,
mem::{align_of, size_of},
ptr::{self, NonNull},
};
use alloc::alloc::{alloc, alloc_zeroed, dealloc, handle_alloc_error, realloc, Layout};
use super::Core;
pub struct BitVecCore<T> {
elem_ptr: NonNull<T>,
bit_ptr: NonNull<usize>,
cap: usize,
len: usize,
}
const BITS_PER_USIZE: usize = size_of::<usize>() * 8;
impl<T> BitVecCore<T> {
unsafe fn dealloc(&mut self) {
if self.cap != 0 {
if size_of::<T>() != 0 {
dealloc(self.elem_ptr.as_ptr() as *mut _, self.old_elem_layout());
}
dealloc(self.bit_ptr.as_ptr() as *mut _, self.old_bit_layout());
self.cap = 0;
}
}
unsafe fn old_elem_layout(&self) -> Layout {
Layout::from_size_align_unchecked(
self.cap * size_of::<T>(),
align_of::<T>(),
)
}
unsafe fn old_bit_layout(&self) -> Layout {
Layout::from_size_align_unchecked(
size_of::<usize>() * num_usizes_for(self.cap),
align_of::<usize>(),
)
}
}
impl<T> Core<T> for BitVecCore<T> {
fn new() -> Self {
Self {
elem_ptr: NonNull::dangling(),
bit_ptr: NonNull::dangling(),
cap: 0,
len: 0,
}
}
fn len(&self) -> usize {
self.len
}
unsafe fn set_len(&mut self, new_len: usize) {
debug_assert!(new_len <= self.cap());
self.len = new_len;
}
fn cap(&self) -> usize {
self.cap
}
#[inline(never)]
#[cold]
unsafe fn realloc(&mut self, new_cap: usize) {
debug_assert!(new_cap >= self.len());
debug_assert!(new_cap <= isize::max_value() as usize);
#[inline(never)]
#[cold]
fn capacity_overflow() -> ! {
panic!("capacity overflow in `stable_vec::BitVecCore::realloc` (attempt \
to allocate more than `usize::MAX` bytes");
}
if new_cap == 0 {
self.dealloc();
return;
}
if size_of::<T>() != 0 {
let size = new_cap.checked_mul(size_of::<T>())
.unwrap_or_else(|| capacity_overflow());
let new_elem_layout = Layout::from_size_align_unchecked(size, align_of::<T>());
let ptr = if self.cap == 0 {
alloc(new_elem_layout)
} else {
realloc(self.elem_ptr.as_ptr() as *mut _, self.old_elem_layout(), size)
};
if ptr.is_null() {
handle_alloc_error(new_elem_layout);
}
self.elem_ptr = NonNull::new_unchecked(ptr as *mut _);
};
{
let size = size_of::<usize>() * num_usizes_for(new_cap);
let new_bit_layout = Layout::from_size_align_unchecked(size, align_of::<usize>());
let ptr = if self.cap == 0 {
alloc_zeroed(new_bit_layout)
} else {
realloc(self.bit_ptr.as_ptr() as *mut _, self.old_bit_layout(), size)
};
let ptr = ptr as *mut usize;
if ptr.is_null() {
handle_alloc_error(new_bit_layout);
}
if self.cap != 0 {
let initialized_usizes = num_usizes_for(self.cap);
let new_usizes = num_usizes_for(new_cap);
if new_usizes > initialized_usizes {
ptr::write_bytes(
ptr.add(initialized_usizes),
0,
new_usizes - initialized_usizes,
);
}
}
self.bit_ptr = NonNull::new_unchecked(ptr as *mut _);
}
self.cap = new_cap;
}
unsafe fn has_element_at(&self, idx: usize) -> bool {
debug_assert!(idx < self.cap());
let usize_pos = idx / BITS_PER_USIZE;
let bit_pos = idx % BITS_PER_USIZE;
let block = *self.bit_ptr.as_ptr().add(usize_pos);
((block >> bit_pos) & 0b1) != 0
}
unsafe fn insert_at(&mut self, idx: usize, elem: T) {
debug_assert!(idx < self.cap());
debug_assert!(self.has_element_at(idx) == false);
ptr::write(self.elem_ptr.as_ptr().add(idx), elem);
let usize_pos = idx / BITS_PER_USIZE;
let bit_pos = idx % BITS_PER_USIZE;
let mask = 1 << bit_pos;
*self.bit_ptr.as_ptr().add(usize_pos) |= mask;
}
unsafe fn remove_at(&mut self, idx: usize) -> T {
debug_assert!(idx < self.cap());
debug_assert!(self.has_element_at(idx));
let usize_pos = idx / BITS_PER_USIZE;
let bit_pos = idx % BITS_PER_USIZE;
let mask = !(1 << bit_pos);
*self.bit_ptr.as_ptr().add(usize_pos) &= mask;
ptr::read(self.elem_ptr.as_ptr().add(idx))
}
unsafe fn get_unchecked(&self, idx: usize) -> &T {
debug_assert!(idx < self.cap());
debug_assert!(self.has_element_at(idx));
&*self.elem_ptr.as_ptr().add(idx)
}
unsafe fn get_unchecked_mut(&mut self, idx: usize) -> &mut T {
debug_assert!(idx < self.cap());
debug_assert!(self.has_element_at(idx));
&mut *self.elem_ptr.as_ptr().add(idx)
}
fn clear(&mut self) {
unsafe {
for idx in 0..self.len {
if self.has_element_at(idx) {
ptr::drop_in_place(self.get_unchecked_mut(idx));
}
}
for bit_idx in 0..num_usizes_for(self.len) {
*self.bit_ptr.as_ptr().add(bit_idx) = 0;
}
self.len = 0;
}
}
unsafe fn swap(&mut self, a: usize, b: usize) {
let a_existed = self.has_element_at(a);
let b_existed = self.has_element_at(b);
let swap_bit = (a_existed ^ b_existed) as usize;
let usize_pos = a / BITS_PER_USIZE;
let bit_pos = a % BITS_PER_USIZE;
let mask = swap_bit << bit_pos;
*self.bit_ptr.as_ptr().add(usize_pos) ^= mask;
let usize_pos = b / BITS_PER_USIZE;
let bit_pos = b % BITS_PER_USIZE;
let mask = swap_bit << bit_pos;
*self.bit_ptr.as_ptr().add(usize_pos) ^= mask;
ptr::swap(
self.elem_ptr.as_ptr().add(a),
self.elem_ptr.as_ptr().add(b),
);
}
}
impl<T> Drop for BitVecCore<T> {
fn drop(&mut self) {
self.clear();
unsafe {
self.dealloc();
}
}
}
impl<T: Clone> Clone for BitVecCore<T> {
fn clone(&self) -> Self {
let mut out = Self::new();
if self.cap != 0 {
unsafe {
out.realloc(self.cap);
if size_of::<T>() != 0 {
let mut idx = 0;
while let Some(next) = self.first_filled_slot_from(idx) {
let clone = self.get_unchecked(next).clone();
ptr::write(out.elem_ptr.as_ptr().add(next), clone);
idx = next + 1;
}
}
ptr::copy_nonoverlapping(
self.bit_ptr.as_ptr(),
out.bit_ptr.as_ptr(),
num_usizes_for(self.cap)
);
out.set_len(self.len);
}
}
out
}
}
impl<T> fmt::Debug for BitVecCore<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BitVecCore")
.field("len", &self.len())
.field("cap", &self.cap())
.finish()
}
}
unsafe impl<T: Send> Send for BitVecCore<T> {}
unsafe impl<T: Sync> Sync for BitVecCore<T> {}
#[inline(always)]
fn num_usizes_for(cap: usize) -> usize {
(cap + (BITS_PER_USIZE - 1)) / BITS_PER_USIZE
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn num_usizes() {
assert_eq!(num_usizes_for(0), 0);
assert_eq!(num_usizes_for(1), 1);
assert_eq!(num_usizes_for(2), 1);
assert_eq!(num_usizes_for(3), 1);
#[cfg(target_pointer_width = "64")]
{
assert_eq!(num_usizes_for(63), 1);
assert_eq!(num_usizes_for(64), 1);
assert_eq!(num_usizes_for(65), 2);
assert_eq!(num_usizes_for(66), 2);
assert_eq!(num_usizes_for(66), 2);
assert_eq!(num_usizes_for(255), 4);
assert_eq!(num_usizes_for(256), 4);
assert_eq!(num_usizes_for(257), 5);
assert_eq!(num_usizes_for(258), 5);
assert_eq!(num_usizes_for(259), 5);
}
#[cfg(target_pointer_width = "32")]
{
assert_eq!(num_usizes_for(31), 1);
assert_eq!(num_usizes_for(32), 1);
assert_eq!(num_usizes_for(33), 2);
assert_eq!(num_usizes_for(34), 2);
assert_eq!(num_usizes_for(35), 2);
assert_eq!(num_usizes_for(127), 4);
assert_eq!(num_usizes_for(128), 4);
assert_eq!(num_usizes_for(129), 5);
assert_eq!(num_usizes_for(130), 5);
assert_eq!(num_usizes_for(131), 5);
}
}
}