vers_vecs/bit_vec/fast_rs_vec/mod.rs
1//! A fast succinct bit vector implementation with rank and select queries. Rank computes in
2//! constant-time, select on average in constant-time, with a logarithmic worst case.
3
4use std::mem::size_of;
5
6#[cfg(all(
7 feature = "simd",
8 target_arch = "x86_64",
9 target_feature = "avx",
10 target_feature = "avx2",
11 target_feature = "avx512f",
12 target_feature = "avx512bw",
13))]
14pub use bitset::*;
15pub use iter::*;
16
17use crate::util::impl_vector_iterator;
18use crate::BitVec;
19
20use super::WORD_SIZE;
21
22/// Size of a block in the bitvector.
23const BLOCK_SIZE: usize = 512;
24
25/// Size of a super block in the bitvector. Super-blocks exist to decrease the memory overhead
26/// of block descriptors.
27/// Increasing or decreasing the super block size has negligible effect on performance of rank
28/// instruction. This means we want to make the super block size as large as possible, as long as
29/// the zero-counter in normal blocks still fits in a reasonable amount of bits. However, this has
30/// impact on the performance of select queries. The larger the super block size, the deeper will
31/// a binary search be. We found 2^13 to be a good compromise between memory overhead and
32/// performance.
33const SUPER_BLOCK_SIZE: usize = 1 << 13;
34
35/// Size of a select block. The select block is used to speed up select queries. The select block
36/// contains the indices of every `SELECT_BLOCK_SIZE`'th 1-bit and 0-bit in the bitvector.
37/// The smaller this block-size, the faster are select queries, but the more memory is used.
38const SELECT_BLOCK_SIZE: usize = 1 << 13;
39
40/// Meta-data for a block. The `zeros` field stores the number of zeros up to the block,
41/// beginning from the last super-block boundary. This means the first block in a super-block
42/// always stores the number zero, which serves as a sentinel value to avoid special-casing the
43/// first block in a super-block (which would be a performance hit due branch prediction failures).
44#[derive(Clone, Copy, Debug)]
45#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
46struct BlockDescriptor {
47 zeros: u16,
48}
49
50/// Meta-data for a super-block. The `zeros` field stores the number of zeros up to this super-block.
51/// This allows the `BlockDescriptor` to store the number of zeros in a much smaller
52/// space. The `zeros` field is the number of zeros up to the super-block.
53#[derive(Clone, Copy, Debug)]
54#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
55struct SuperBlockDescriptor {
56 zeros: usize,
57}
58
59/// Meta-data for the select query. Each entry i in the select vector contains the indices to find
60/// the i * `SELECT_BLOCK_SIZE`'th 0- and 1-bit in the bitvector. Those indices may be very far apart.
61/// The indices do not point into the bit-vector, but into the super-block vector.
62#[derive(Clone, Debug)]
63#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
64struct SelectSuperBlockDescriptor {
65 index_0: usize,
66 index_1: usize,
67}
68
69/// A bitvector that supports constant-time rank and select queries and is optimized for fast queries.
70/// The bitvector is stored as a vector of `u64`s. The bit-vector stores meta-data for constant-time
71/// rank and select queries, which takes sub-linear additional space. The space overhead is
72/// 28 bits per 512 bits of user data (~5.47%).
73///
74/// # Example
75/// ```rust
76/// use vers_vecs::{BitVec, RsVec};
77///
78/// let mut bit_vec = BitVec::new();
79/// bit_vec.append_word(u64::MAX);
80///
81/// let rs_vec = RsVec::from_bit_vec(bit_vec);
82/// assert_eq!(rs_vec.rank1(64), 64);
83/// assert_eq!(rs_vec.select1(64), 64);
84///```
85#[derive(Clone, Debug)]
86#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
87pub struct RsVec {
88 data: Vec<u64>,
89 len: usize,
90 blocks: Vec<BlockDescriptor>,
91 super_blocks: Vec<SuperBlockDescriptor>,
92 select_blocks: Vec<SelectSuperBlockDescriptor>,
93 pub(crate) rank0: usize,
94 pub(crate) rank1: usize,
95}
96
97impl RsVec {
98 /// Build an [`RsVec`] from a [`BitVec`]. This will consume the [`BitVec`]. Since [`RsVec`]s are
99 /// immutable, this is the only way to construct an [`RsVec`].
100 ///
101 /// # Example
102 /// See the example for [`RsVec`].
103 ///
104 /// [`BitVec`]: ../struct.BitVec.html
105 /// [`RsVec`]: struct.RsVec.html
106 #[must_use]
107 pub fn from_bit_vec(mut vec: BitVec) -> RsVec {
108 // Construct the block descriptor meta data. Each block descriptor contains the number of
109 // zeros in the super-block, up to but excluding the block.
110 let mut blocks = Vec::with_capacity(vec.len() / BLOCK_SIZE + 1);
111 let mut super_blocks = Vec::with_capacity(vec.len() / SUPER_BLOCK_SIZE + 1);
112 let mut select_blocks = Vec::new();
113
114 // sentinel value
115 select_blocks.push(SelectSuperBlockDescriptor {
116 index_0: 0,
117 index_1: 0,
118 });
119
120 let mut total_zeros: usize = 0;
121 let mut current_zeros: usize = 0;
122 let mut last_zero_select_block: usize = 0;
123 let mut last_one_select_block: usize = 0;
124
125 for (idx, &word) in vec.data.iter().enumerate() {
126 // if we moved past a block boundary, append the block information for the previous
127 // block and reset the counter if we moved past a super-block boundary.
128 if idx % (BLOCK_SIZE / WORD_SIZE) == 0 {
129 if idx % (SUPER_BLOCK_SIZE / WORD_SIZE) == 0 {
130 total_zeros += current_zeros;
131 current_zeros = 0;
132 super_blocks.push(SuperBlockDescriptor { zeros: total_zeros });
133 }
134
135 // this cannot overflow because a super block isn't 2^16 bits long
136 #[allow(clippy::cast_possible_truncation)]
137 blocks.push(BlockDescriptor {
138 zeros: current_zeros as u16,
139 });
140 }
141
142 // count the zeros in the current word and add them to the counter
143 // the last word may contain padding zeros, which should not be counted,
144 // but since we do not append the last block descriptor, this is not a problem
145 let new_zeros = word.count_zeros() as usize;
146 let all_zeros = total_zeros + current_zeros + new_zeros;
147 if all_zeros / SELECT_BLOCK_SIZE > (total_zeros + current_zeros) / SELECT_BLOCK_SIZE {
148 if all_zeros / SELECT_BLOCK_SIZE == select_blocks.len() {
149 select_blocks.push(SelectSuperBlockDescriptor {
150 index_0: super_blocks.len() - 1,
151 index_1: 0,
152 });
153 } else {
154 select_blocks[all_zeros / SELECT_BLOCK_SIZE].index_0 = super_blocks.len() - 1;
155 }
156
157 last_zero_select_block += 1;
158 }
159
160 let total_bits = (idx + 1) * WORD_SIZE;
161 let all_ones = total_bits - all_zeros;
162 if all_ones / SELECT_BLOCK_SIZE
163 > (idx * WORD_SIZE - total_zeros - current_zeros) / SELECT_BLOCK_SIZE
164 {
165 if all_ones / SELECT_BLOCK_SIZE == select_blocks.len() {
166 select_blocks.push(SelectSuperBlockDescriptor {
167 index_0: 0,
168 index_1: super_blocks.len() - 1,
169 });
170 } else {
171 select_blocks[all_ones / SELECT_BLOCK_SIZE].index_1 = super_blocks.len() - 1;
172 }
173
174 last_one_select_block += 1;
175 }
176
177 current_zeros += new_zeros;
178 }
179
180 // insert dummy select blocks at the end that just report the same index like the last real
181 // block, so the bound check for binary search doesn't overflow
182 // this is technically the incorrect value, but since all valid queries will be smaller,
183 // this will only tell select to stay in the current super block, which is correct.
184 // we cannot use a real value here, because this would change the size of the super-block
185 if last_zero_select_block == select_blocks.len() - 1 {
186 select_blocks.push(SelectSuperBlockDescriptor {
187 index_0: select_blocks[last_zero_select_block].index_0,
188 index_1: 0,
189 });
190 } else {
191 debug_assert!(select_blocks[last_zero_select_block + 1].index_0 == 0);
192 select_blocks[last_zero_select_block + 1].index_0 =
193 select_blocks[last_zero_select_block].index_0;
194 }
195 if last_one_select_block == select_blocks.len() - 1 {
196 select_blocks.push(SelectSuperBlockDescriptor {
197 index_0: 0,
198 index_1: select_blocks[last_one_select_block].index_1,
199 });
200 } else {
201 debug_assert!(select_blocks[last_one_select_block + 1].index_1 == 0);
202 select_blocks[last_one_select_block + 1].index_1 =
203 select_blocks[last_one_select_block].index_1;
204 }
205
206 // pad the internal vector to be block-aligned, so SIMD operations don't try to read
207 // past the end of the vector. Note that this does not affect the content of the vector,
208 // because those bits are not considered part of the vector.
209 // Note further, that currently no SIMD implementation exists.
210 while vec.data.len() % (BLOCK_SIZE / WORD_SIZE) != 0 {
211 vec.data.push(0);
212 }
213
214 RsVec {
215 data: vec.data,
216 len: vec.len,
217 blocks,
218 super_blocks,
219 select_blocks,
220 // the last block may contain padding zeros, which should not be counted
221 rank0: total_zeros + current_zeros - ((WORD_SIZE - (vec.len % WORD_SIZE)) % WORD_SIZE),
222 rank1: vec.len
223 - (total_zeros + current_zeros - ((WORD_SIZE - (vec.len % WORD_SIZE)) % WORD_SIZE)),
224 }
225 }
226
227 /// Return the 0-rank of the bit at the given position. The 0-rank is the number of
228 /// 0-bits in the vector up to but excluding the bit at the given position. Calling this
229 /// function with an index larger than the length of the bit-vector will report the total
230 /// number of 0-bits in the bit-vector.
231 ///
232 /// # Parameters
233 /// - `pos`: The position of the bit to return the rank of.
234 #[must_use]
235 pub fn rank0(&self, pos: usize) -> usize {
236 self.rank(true, pos)
237 }
238
239 /// Return the 1-rank of the bit at the given position. The 1-rank is the number of
240 /// 1-bits in the vector up to but excluding the bit at the given position. Calling this
241 /// function with an index larger than the length of the bit-vector will report the total
242 /// number of 1-bits in the bit-vector.
243 ///
244 /// # Parameters
245 /// - `pos`: The position of the bit to return the rank of.
246 #[must_use]
247 pub fn rank1(&self, pos: usize) -> usize {
248 self.rank(false, pos)
249 }
250
251 // I measured 5-10% improvement with this. I don't know why it's not inlined by default, the
252 // branch elimination profits alone should make it worth it.
253 #[allow(clippy::inline_always)]
254 #[inline(always)]
255 fn rank(&self, zero: bool, pos: usize) -> usize {
256 #[allow(clippy::collapsible_else_if)]
257 // readability and more obvious where dead branch elimination happens
258 if zero {
259 if pos >= self.len() {
260 return self.rank0;
261 }
262 } else {
263 if pos >= self.len() {
264 return self.rank1;
265 }
266 }
267
268 let index = pos / WORD_SIZE;
269 let block_index = pos / BLOCK_SIZE;
270 let super_block_index = pos / SUPER_BLOCK_SIZE;
271 let mut rank = 0;
272
273 // at first add the number of zeros/ones before the current super block
274 rank += if zero {
275 self.super_blocks[super_block_index].zeros
276 } else {
277 (super_block_index * SUPER_BLOCK_SIZE) - self.super_blocks[super_block_index].zeros
278 };
279
280 // then add the number of zeros/ones before the current block
281 rank += if zero {
282 self.blocks[block_index].zeros as usize
283 } else {
284 ((block_index % (SUPER_BLOCK_SIZE / BLOCK_SIZE)) * BLOCK_SIZE)
285 - self.blocks[block_index].zeros as usize
286 };
287
288 // naive popcount of blocks
289 for &i in &self.data[(block_index * BLOCK_SIZE) / WORD_SIZE..index] {
290 rank += if zero {
291 i.count_zeros() as usize
292 } else {
293 i.count_ones() as usize
294 };
295 }
296
297 rank += if zero {
298 (!self.data[index] & ((1 << (pos % WORD_SIZE)) - 1)).count_ones() as usize
299 } else {
300 (self.data[index] & ((1 << (pos % WORD_SIZE)) - 1)).count_ones() as usize
301 };
302
303 rank
304 }
305
306 /// Return the length of the vector, i.e. the number of bits it contains.
307 #[must_use]
308 pub fn len(&self) -> usize {
309 self.len
310 }
311
312 /// Return whether the vector is empty.
313 #[must_use]
314 pub fn is_empty(&self) -> bool {
315 self.len() == 0
316 }
317
318 /// Return the bit at the given position. The bit takes the least significant
319 /// bit of the returned u64 word.
320 /// If the position is larger than the length of the vector, `None` is returned.
321 #[must_use]
322 pub fn get(&self, pos: usize) -> Option<u64> {
323 if pos >= self.len() {
324 None
325 } else {
326 Some(self.get_unchecked(pos))
327 }
328 }
329
330 /// Return the bit at the given position. The bit takes the least significant
331 /// bit of the returned u64 word.
332 ///
333 /// # Panics
334 /// This function may panic if `pos >= self.len()` (alternatively, it may return garbage).
335 #[must_use]
336 pub fn get_unchecked(&self, pos: usize) -> u64 {
337 (self.data[pos / WORD_SIZE] >> (pos % WORD_SIZE)) & 1
338 }
339
340 /// Return multiple bits at the given position. The number of bits to return is given by `len`.
341 /// At most 64 bits can be returned.
342 /// If the position at the end of the query is larger than the length of the vector,
343 /// None is returned (even if the query partially overlaps with the vector).
344 /// If the length of the query is larger than 64, None is returned.
345 #[must_use]
346 pub fn get_bits(&self, pos: usize, len: usize) -> Option<u64> {
347 if len > WORD_SIZE {
348 return None;
349 }
350 if pos + len > self.len {
351 None
352 } else {
353 Some(self.get_bits_unchecked(pos, len))
354 }
355 }
356
357 /// Return multiple bits at the given position. The number of bits to return is given by `len`.
358 /// At most 64 bits can be returned.
359 ///
360 /// This function is always inlined, because it gains a lot from loop optimization and
361 /// can utilize the processor pre-fetcher better if it is.
362 ///
363 /// # Errors
364 /// If the length of the query is larger than 64, unpredictable data will be returned.
365 /// Use [`get_bits`] to properly handle this case with an `Option`.
366 ///
367 /// # Panics
368 /// If the position or interval is larger than the length of the vector,
369 /// the function will either return unpredictable data, or panic.
370 ///
371 /// [`get_bits`]: #method.get_bits
372 #[must_use]
373 #[allow(clippy::comparison_chain)] // readability
374 #[allow(clippy::cast_possible_truncation)] // parameter must be out of scope for this to happen
375 pub fn get_bits_unchecked(&self, pos: usize, len: usize) -> u64 {
376 debug_assert!(len <= WORD_SIZE);
377 let partial_word = self.data[pos / WORD_SIZE] >> (pos % WORD_SIZE);
378 if pos % WORD_SIZE + len == WORD_SIZE {
379 partial_word
380 } else if pos % WORD_SIZE + len < WORD_SIZE {
381 partial_word & ((1 << (len % WORD_SIZE)) - 1)
382 } else {
383 (partial_word | (self.data[pos / WORD_SIZE + 1] << (WORD_SIZE - pos % WORD_SIZE)))
384 & 1u64.checked_shl(len as u32).unwrap_or(0).wrapping_sub(1)
385 }
386 }
387
388 /// Check if two `RsVec`s are equal. For sparse vectors (either sparsely filled with 1-bits or
389 /// 0-bits), this is faster than comparing the vectors bit by bit.
390 /// Choose the value of `ZERO` depending on which bits are more sparse.
391 ///
392 /// This method is faster than [`full_equals`] for sparse vectors beginning at roughly 1
393 /// million bits. Above 4 million bits, this method becomes faster than full equality in general.
394 ///
395 /// # Parameters
396 /// - `other`: The other `RsVec` to compare to.
397 /// - `ZERO`: Whether to compare the sparse 0-bits (true) or the sparse 1-bits (false).
398 ///
399 /// # Returns
400 /// `true` if the vectors' contents are equal, `false` otherwise.
401 ///
402 /// [`full_equals`]: RsVec::full_equals
403 #[must_use]
404 pub fn sparse_equals<const ZERO: bool>(&self, other: &Self) -> bool {
405 if self.len() != other.len() {
406 return false;
407 }
408
409 if self.rank0 != other.rank0 || self.rank1 != other.rank1 {
410 return false;
411 }
412
413 let iter: SelectIter<ZERO> = self.select_iter();
414
415 for (rank, bit_index) in iter.enumerate() {
416 // since rank is inlined, we get dead code elimination depending on ZERO
417 if (other.get_unchecked(bit_index) == 0) != ZERO || other.rank(ZERO, bit_index) != rank
418 {
419 return false;
420 }
421 }
422
423 true
424 }
425
426 /// Check if two `RsVec`s are equal. This compares limb by limb. This is usually faster than a
427 /// [`sparse_equals`] call for small vectors.
428 ///
429 /// # Parameters
430 /// - `other`: The other `RsVec` to compare to.
431 ///
432 /// # Returns
433 /// `true` if the vectors' contents are equal, `false` otherwise.
434 ///
435 /// [`sparse_equals`]: RsVec::sparse_equals
436 #[must_use]
437 pub fn full_equals(&self, other: &Self) -> bool {
438 if self.len() != other.len() {
439 return false;
440 }
441
442 if self.rank0 != other.rank0 || self.rank1 != other.rank1 {
443 return false;
444 }
445
446 if self.data[..self.len / 64]
447 .iter()
448 .zip(other.data[..other.len / 64].iter())
449 .any(|(a, b)| a != b)
450 {
451 return false;
452 }
453
454 // if last incomplete block exists, test it without junk data
455 if self.len % 64 > 0
456 && self.data[self.len / 64] & ((1 << (self.len % 64)) - 1)
457 != other.data[self.len / 64] & ((1 << (other.len % 64)) - 1)
458 {
459 return false;
460 }
461
462 true
463 }
464
465 /// Returns the number of bytes used on the heap for this vector. This does not include
466 /// allocated space that is not used (e.g. by the allocation behavior of `Vec`).
467 #[must_use]
468 pub fn heap_size(&self) -> usize {
469 self.data.len() * size_of::<u64>()
470 + self.blocks.len() * size_of::<BlockDescriptor>()
471 + self.super_blocks.len() * size_of::<SuperBlockDescriptor>()
472 + self.select_blocks.len() * size_of::<SelectSuperBlockDescriptor>()
473 }
474}
475
476impl_vector_iterator! { RsVec, RsVecIter, RsVecRefIter }
477
478impl PartialEq for RsVec {
479 /// Check if two `RsVec`s are equal. This method calls [`sparse_equals`] if the vector has more
480 /// than 4'000'000 bits, and [`full_equals`] otherwise.
481 ///
482 /// This was determined with benchmarks on an `x86_64` machine,
483 /// on which [`sparse_equals`] outperforms [`full_equals`] consistently above this threshold.
484 ///
485 /// # Parameters
486 /// - `other`: The other `RsVec` to compare to.
487 ///
488 /// # Returns
489 /// `true` if the vectors' contents are equal, `false` otherwise.
490 ///
491 /// [`sparse_equals`]: RsVec::sparse_equals
492 /// [`full_equals`]: RsVec::full_equals
493 fn eq(&self, other: &Self) -> bool {
494 if self.len > 4_000_000 {
495 if self.rank1 > self.rank0 {
496 self.sparse_equals::<true>(other)
497 } else {
498 self.sparse_equals::<false>(other)
499 }
500 } else {
501 self.full_equals(other)
502 }
503 }
504}
505
506impl From<BitVec> for RsVec {
507 /// Build an [`RsVec`] from a [`BitVec`]. This will consume the [`BitVec`]. Since [`RsVec`]s are
508 /// immutable, this is the only way to construct an [`RsVec`].
509 ///
510 /// # Example
511 /// See the example for [`RsVec`].
512 ///
513 /// [`BitVec`]: BitVec
514 /// [`RsVec`]: RsVec
515 fn from(vec: BitVec) -> Self {
516 RsVec::from_bit_vec(vec)
517 }
518}
519
520// iter code in here to keep it more organized
521mod iter;
522// select code in here to keep it more organized
523mod select;
524
525#[cfg(all(
526 feature = "simd",
527 target_arch = "x86_64",
528 target_feature = "avx",
529 target_feature = "avx2",
530 target_feature = "avx512f",
531 target_feature = "avx512bw",
532))]
533mod bitset;
534
535#[cfg(test)]
536mod tests;