use crate::{bits::CountBitVec, traits::*};
use anyhow::Result;
use common_traits::SelectInWord;
use epserde::*;
use mem_dbg::*;
#[derive(Epserde, Debug, Clone, MemDbg, MemSize)]
pub struct SelectZeroFixed2<
B: SelectZeroHinted = CountBitVec,
I: AsRef<[u64]> = Vec<u64>,
const LOG2_ZEROS_PER_INVENTORY: usize = 10,
const LOG2_U64_PER_SUBINVENTORY: usize = 2,
> {
bits: B,
inventory: I,
}
impl<
B: SelectZeroHinted,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
const ONES_PER_INVENTORY: usize = 1 << LOG2_ZEROS_PER_INVENTORY;
const U64_PER_SUBINVENTORY: usize = 1 << LOG2_U64_PER_SUBINVENTORY;
const U64_PER_INVENTORY: usize = 1 + Self::U64_PER_SUBINVENTORY;
const LOG2_ZEROS_PER_SUB64: usize = LOG2_ZEROS_PER_INVENTORY - LOG2_U64_PER_SUBINVENTORY;
const ONES_PER_SUB64: usize = 1 << Self::LOG2_ZEROS_PER_SUB64;
const LOG2_ZEROS_PER_SUB16: usize = Self::LOG2_ZEROS_PER_SUB64 - 2;
const ONES_PER_SUB16: usize = 1 << Self::LOG2_ZEROS_PER_SUB16;
const INVENTORY_MASK: u64 = (1 << 63) - 1;
}
impl<
B: SelectZeroHinted + BitLength + BitCount + AsRef<[usize]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> SelectZeroFixed2<B, Vec<u64>, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
pub fn new(bitvec: B) -> Self {
let num_zeros = bitvec.len() - bitvec.count();
let inventory_size = (bitvec.len() - bitvec.count()).div_ceil(Self::ONES_PER_INVENTORY);
let inventory_len = inventory_size * Self::U64_PER_INVENTORY + 1;
let mut inventory = Vec::with_capacity(inventory_len);
let mut number_of_ones = 0;
let mut next_quantum = 0;
'outer: for (i, word) in bitvec.as_ref().iter().copied().map(|x| !x).enumerate() {
let ones_in_word = word.count_ones() as u64;
while (number_of_ones + ones_in_word).min(num_zeros as u64) > next_quantum {
let in_word_index = word.select_in_word((next_quantum - number_of_ones) as usize);
let index = (i * u64::BITS as usize) + in_word_index;
inventory.push(index as u64);
inventory.resize(inventory.len() + Self::U64_PER_SUBINVENTORY, 0);
next_quantum += Self::ONES_PER_INVENTORY as u64;
if next_quantum >= num_zeros as u64 {
break 'outer;
}
}
number_of_ones += ones_in_word;
}
inventory.push(BitLength::len(&bitvec) as u64);
debug_assert_eq!(inventory_len, inventory.len());
let iter = 0..inventory_size;
iter.for_each(|inventory_idx| {
let start_idx = inventory_idx * Self::U64_PER_INVENTORY;
let end_idx = start_idx + Self::U64_PER_INVENTORY;
let start_bit_idx = inventory[start_idx];
let end_bit_idx = inventory[end_idx];
let span = end_bit_idx - start_bit_idx;
let mut word_idx = start_bit_idx / u64::BITS as u64;
let bit_idx = start_bit_idx % u64::BITS as u64;
let mut word = !(bitvec.as_ref()[word_idx as usize]) >> bit_idx << bit_idx;
let mut number_of_ones = inventory_idx * Self::ONES_PER_INVENTORY;
let mut next_quantum = number_of_ones;
let quantum;
if span <= u16::MAX as u64 {
quantum = Self::ONES_PER_SUB16;
} else {
quantum = Self::ONES_PER_SUB64;
inventory[start_idx] |= 1_u64 << 63;
}
let end_word_idx = end_bit_idx.div_ceil(u64::BITS as u64);
let mut subinventory_idx = 1;
next_quantum += quantum;
'outer: loop {
let ones_in_word = word.count_ones() as usize;
while (number_of_ones + ones_in_word).min(num_zeros) > next_quantum {
debug_assert!(next_quantum <= end_bit_idx as _);
let in_word_index = word.select_in_word(next_quantum - number_of_ones);
let bit_index = (word_idx * u64::BITS as u64) + in_word_index as u64;
let sub_offset = bit_index - start_bit_idx;
if span <= u16::MAX as u64 {
let subinventory: &mut [u16] =
unsafe { inventory[start_idx + 1..end_idx].align_to_mut().1 };
subinventory[subinventory_idx] = sub_offset as u16;
} else {
inventory[start_idx + 1 + subinventory_idx] = sub_offset;
}
subinventory_idx += 1;
if subinventory_idx == (1 << LOG2_ZEROS_PER_INVENTORY) / quantum {
break 'outer;
}
next_quantum += quantum;
}
number_of_ones += ones_in_word;
word_idx += 1;
if word_idx == end_word_idx {
break;
}
word = !bitvec.as_ref()[word_idx as usize];
}
});
Self {
bits: bitvec,
inventory,
}
}
}
impl<
B: SelectZeroHinted + BitLength + BitCount,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> SelectZero for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
#[inline(always)]
unsafe fn select_zero_unchecked(&self, rank: usize) -> usize {
let inventory_index = rank / Self::ONES_PER_INVENTORY;
let subrank = rank % Self::ONES_PER_INVENTORY;
let start_idx = inventory_index * (1 + Self::U64_PER_SUBINVENTORY);
let inventory_rank = *self.inventory.as_ref().get_unchecked(start_idx);
let u64s = self
.inventory
.as_ref()
.get_unchecked(start_idx + 1..start_idx + 1 + Self::U64_PER_SUBINVENTORY);
let (pos, residual) = if inventory_rank as isize >= 0 {
let (_pre, u16s, _post) = u64s.align_to::<u16>();
(
inventory_rank + u16s[subrank / Self::ONES_PER_SUB16] as u64,
subrank % Self::ONES_PER_SUB16,
)
} else {
(
(inventory_rank & Self::INVENTORY_MASK) + u64s[subrank / Self::ONES_PER_SUB64],
subrank % Self::ONES_PER_SUB64,
)
};
self.bits
.select_zero_hinted_unchecked(rank, pos as usize, rank - residual)
}
}
impl<
B: SelectZeroHinted + BitLength + BitCount,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> ConvertTo<B> for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
#[inline(always)]
fn convert_to(self) -> Result<B> {
Ok(self.bits)
}
}
impl<
B: SelectZeroHinted + BitLength + BitCount + AsRef<[usize]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> ConvertTo<SelectZeroFixed2<B, Vec<u64>, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>>
for B
{
#[inline(always)]
fn convert_to(
self,
) -> Result<SelectZeroFixed2<B, Vec<u64>, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>>
{
Ok(SelectZeroFixed2::new(self))
}
}
impl<
B: SelectZeroHinted + BitLength,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> BitLength for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
#[inline(always)]
fn len(&self) -> usize {
self.bits.len()
}
}
impl<
B: SelectZeroHinted + BitCount,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> BitCount for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
#[inline(always)]
fn count(&self) -> usize {
self.bits.count()
}
}
impl<
B: SelectZeroHinted + Select,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> Select for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
#[inline(always)]
fn select(&self, rank: usize) -> Option<usize> {
self.bits.select(rank)
}
#[inline(always)]
unsafe fn select_unchecked(&self, rank: usize) -> usize {
self.bits.select_unchecked(rank)
}
}
impl<
B: SelectZeroHinted + Rank,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> Rank for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
fn rank(&self, pos: usize) -> usize {
self.bits.rank(pos)
}
unsafe fn rank_unchecked(&self, pos: usize) -> usize {
self.bits.rank_unchecked(pos)
}
}
impl<
B: SelectZeroHinted + RankZero,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> RankZero for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
fn rank_zero(&self, pos: usize) -> usize {
self.bits.rank_zero(pos)
}
unsafe fn rank_zero_unchecked(&self, pos: usize) -> usize {
self.bits.rank_zero_unchecked(pos)
}
}
impl<
B: SelectZeroHinted + AsRef<[usize]>,
I: AsRef<[u64]>,
const LOG2_ZEROS_PER_INVENTORY: usize,
const LOG2_U64_PER_SUBINVENTORY: usize,
> AsRef<[usize]>
for SelectZeroFixed2<B, I, LOG2_ZEROS_PER_INVENTORY, LOG2_U64_PER_SUBINVENTORY>
{
fn as_ref(&self) -> &[usize] {
self.bits.as_ref()
}
}