use core::ops::AddAssign;
use crate::hash::block::{
block_hash, block_size, BlockHashSize, BlockHashSizes, ConstrainedBlockHashSize,
ConstrainedBlockHashSizes,
};
use crate::hash::{fuzzy_raw_type, FuzzyHashData, LongRawFuzzyHash, RawFuzzyHash};
use crate::intrinsics::{likely, unlikely};
use crate::macros::{invariant, optionally_unsafe};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PartialFNVHash(u8);
impl PartialFNVHash {
const OLD_HASH_INIT: u32 = 0x28021967;
pub(super) const FNV_HASH_INIT: u8 =
(Self::OLD_HASH_INIT % block_hash::ALPHABET_SIZE as u32) as u8;
pub(super) const FNV_HASH_PRIME: u32 = 0x01000193;
#[cfg(not(feature = "opt-reduce-fnv-table"))]
pub(super) const FNV_TABLE: [[u8; block_hash::ALPHABET_SIZE]; block_hash::ALPHABET_SIZE] = {
let mut array = [[0u8; block_hash::ALPHABET_SIZE]; block_hash::ALPHABET_SIZE];
let mut state = 0u8;
while state < 64 {
let mut ch = 0u8;
while ch < 64 {
array[state as usize][ch as usize] =
(((state as u32).wrapping_mul(Self::FNV_HASH_PRIME) as u8) ^ ch)
% block_hash::ALPHABET_SIZE as u8;
ch += 1;
}
state += 1;
}
array
};
#[inline]
pub fn new() -> Self {
PartialFNVHash(Self::FNV_HASH_INIT)
}
#[inline]
pub fn update_by_byte(&mut self, ch: u8) -> &mut Self {
cfg_if::cfg_if! {
if #[cfg(not(feature = "opt-reduce-fnv-table"))] {
optionally_unsafe! {
invariant!((self.value() as usize) < block_hash::ALPHABET_SIZE);
self.0 = Self::FNV_TABLE
[self.value() as usize] [(ch & (block_hash::ALPHABET_SIZE as u8).wrapping_sub(1)) as usize]; }
}
else {
self.0 = ((self.0 as u32).wrapping_mul(Self::FNV_HASH_PRIME) ^ (ch as u32)) as u8;
}
}
self
}
pub fn update_by_iter(&mut self, iter: impl Iterator<Item = u8>) -> &mut Self {
for ch in iter {
self.update_by_byte(ch);
}
self
}
pub fn update(&mut self, buf: &[u8]) -> &mut Self {
for &ch in buf.iter() {
self.update_by_byte(ch);
}
self
}
#[inline]
pub fn value(&self) -> u8 {
cfg_if::cfg_if! {
if #[cfg(not(feature = "opt-reduce-fnv-table"))] {
optionally_unsafe! {
invariant!(self.0 < (block_hash::ALPHABET_SIZE as u8));
}
self.0
}
else {
self.0 & (block_hash::ALPHABET_SIZE as u8).wrapping_sub(1)
}
}
}
}
impl Default for PartialFNVHash {
fn default() -> Self {
Self::new()
}
}
impl AddAssign<&[u8]> for PartialFNVHash {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8]) {
self.update(buffer);
}
}
impl<const N: usize> AddAssign<&[u8; N]> for PartialFNVHash {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8; N]) {
self.update(&buffer[..]);
}
}
impl AddAssign<u8> for PartialFNVHash {
#[inline(always)]
fn add_assign(&mut self, byte: u8) {
self.update_by_byte(byte);
}
}
const ROLLING_WINDOW: usize = 7;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct RollingHash {
index: u32,
h1: u32,
h2: u32,
h3: u32,
window: [u8; ROLLING_WINDOW],
}
impl RollingHash {
pub const WINDOW_SIZE: usize = ROLLING_WINDOW;
const H3_LSHIFT: usize = 5;
pub fn new() -> Self {
RollingHash {
index: 0,
h1: 0,
h2: 0,
h3: 0,
window: [0; ROLLING_WINDOW],
}
}
#[inline]
pub fn update_by_byte(&mut self, ch: u8) -> &mut Self {
optionally_unsafe! {
invariant!((self.index as usize) < Self::WINDOW_SIZE);
}
self.h2 = self.h2.wrapping_sub(self.h1);
self.h2 = self
.h2
.wrapping_add(u32::wrapping_mul(ROLLING_WINDOW as u32, ch as u32));
self.h1 = self.h1.wrapping_add(ch as u32);
self.h1 = self
.h1
.wrapping_sub(self.window[self.index as usize] as u32); self.window[self.index as usize] = ch; self.index += 1;
if self.index as usize == ROLLING_WINDOW {
self.index = 0;
}
self.h3 <<= Self::H3_LSHIFT;
self.h3 ^= ch as u32;
self
}
pub fn update_by_iter(&mut self, iter: impl Iterator<Item = u8>) -> &mut Self {
for ch in iter {
self.update_by_byte(ch);
}
self
}
pub fn update(&mut self, buf: &[u8]) -> &mut Self {
for &ch in buf.iter() {
self.update_by_byte(ch);
}
self
}
#[inline]
pub fn value(&self) -> u32 {
self.h1.wrapping_add(self.h2).wrapping_add(self.h3)
}
}
impl Default for RollingHash {
fn default() -> Self {
Self::new()
}
}
impl AddAssign<&[u8]> for RollingHash {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8]) {
self.update(buffer);
}
}
impl<const N: usize> AddAssign<&[u8; N]> for RollingHash {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8; N]) {
self.update(&buffer[..]);
}
}
impl AddAssign<u8> for RollingHash {
#[inline(always)]
fn add_assign(&mut self, byte: u8) {
self.update_by_byte(byte);
}
}
const BLOCKHASH_CHAR_NIL: u8 = 0xff;
#[derive(Debug, Clone, Copy, PartialEq)]
struct BlockHashContext {
blockhash_index: usize,
blockhash: [u8; block_hash::FULL_SIZE],
blockhash_ch_half: u8,
h_full: PartialFNVHash,
h_half: PartialFNVHash,
}
impl BlockHashContext {
pub fn new() -> Self {
BlockHashContext {
blockhash_index: 0,
blockhash: [BLOCKHASH_CHAR_NIL; block_hash::FULL_SIZE],
blockhash_ch_half: BLOCKHASH_CHAR_NIL,
h_full: PartialFNVHash::new(),
h_half: PartialFNVHash::new(),
}
}
pub fn reset(&mut self) {
self.blockhash_index = 0;
self.blockhash[block_hash::FULL_SIZE - 1] = BLOCKHASH_CHAR_NIL;
self.blockhash_ch_half = BLOCKHASH_CHAR_NIL;
self.h_full = PartialFNVHash::new();
self.h_half = PartialFNVHash::new();
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) struct GeneratorInnerData {
input_size: u64,
fixed_size: Option<u64>,
elim_border: u64,
bhidx_start: usize,
bhidx_end: usize,
bhidx_end_limit: usize,
roll_mask: u32,
roll_hash: RollingHash,
bh_context: [BlockHashContext; block_size::NUM_VALID],
h_last: PartialFNVHash,
is_last: bool,
}
#[derive(Debug, Clone)]
pub struct Generator(GeneratorInnerData);
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GeneratorError {
FixedSizeMismatch,
FixedSizeTooLarge,
InputSizeTooLarge,
OutputOverflow,
}
impl GeneratorError {
pub fn is_size_too_large_error(&self) -> bool {
matches!(
self,
GeneratorError::FixedSizeTooLarge | GeneratorError::InputSizeTooLarge
)
}
}
impl core::fmt::Display for GeneratorError {
#[rustfmt::skip]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_str(match self { GeneratorError::FixedSizeMismatch => "current state mismatches to the fixed size previously set",
GeneratorError::FixedSizeTooLarge => "fixed size is too large to generate a fuzzy hash",
GeneratorError::InputSizeTooLarge => "input size is too large to generate a fuzzy hash",
GeneratorError::OutputOverflow => "output is too large for specific fuzzy hash variant",
})
}
}
#[cfg(feature = "std")]
impl std::error::Error for GeneratorError {}
#[cfg(all(not(feature = "std"), feature = "unstable"))]
impl core::error::Error for GeneratorError {}
impl Generator {
const fn guessed_preferred_max_input_size_at(log_block_size: u8) -> u64 {
block_size::from_log_internal(log_block_size) as u64 * block_hash::FULL_SIZE as u64
}
pub const MAX_INPUT_SIZE: u64 =
Self::guessed_preferred_max_input_size_at(block_size::NUM_VALID as u8 - 1);
pub const MIN_RECOMMENDED_INPUT_SIZE: u64 = 4096 + 1;
pub fn new() -> Self {
Generator(GeneratorInnerData {
input_size: 0,
fixed_size: None,
elim_border: Self::guessed_preferred_max_input_size_at(0),
bhidx_start: 0,
bhidx_end: 1,
bhidx_end_limit: block_size::NUM_VALID - 1,
roll_mask: 0,
roll_hash: RollingHash::new(),
bh_context: [BlockHashContext::new(); block_size::NUM_VALID],
h_last: PartialFNVHash::new(),
is_last: false,
})
}
pub fn reset(&mut self) {
self.0.input_size = 0;
self.0.fixed_size = None;
self.0.elim_border = Self::guessed_preferred_max_input_size_at(0);
self.0.bhidx_start = 0;
self.0.bhidx_end = 1;
self.0.bhidx_end_limit = block_size::NUM_VALID - 1;
self.0.roll_mask = 0;
self.0.roll_hash = RollingHash::new();
self.0.bh_context[0].reset();
self.0.is_last = false;
}
#[inline(always)]
pub fn input_size(&self) -> u64 {
self.0.input_size
}
#[inline]
pub fn may_warn_about_small_input_size(&self) -> bool {
self.0.fixed_size.unwrap_or(self.0.input_size) < Self::MIN_RECOMMENDED_INPUT_SIZE
}
fn get_log_block_size_from_input_size(size: u64, start: usize) -> usize {
let size_unit = Self::guessed_preferred_max_input_size_at(0);
if size <= size_unit {
return start;
}
let high_size = (size - 1) / size_unit; optionally_unsafe! {
invariant!(high_size > 0);
}
usize::max(start, (crate::utils::u64_ilog2(high_size) + 1) as usize)
}
pub fn set_fixed_input_size(&mut self, size: u64) -> Result<(), GeneratorError> {
if size > Self::MAX_INPUT_SIZE {
return Err(GeneratorError::FixedSizeTooLarge);
}
if let Some(expected_size) = self.0.fixed_size {
if expected_size != size {
return Err(GeneratorError::FixedSizeMismatch);
}
}
self.0.fixed_size = Some(size);
self.0.bhidx_end_limit = usize::min(
block_size::NUM_VALID - 1,
Self::get_log_block_size_from_input_size(size, 0) + 1,
);
Ok(())
}
#[inline]
pub fn set_fixed_input_size_in_usize(&mut self, size: usize) -> Result<(), GeneratorError> {
if let Ok(size) = u64::try_from(size) {
self.set_fixed_input_size(size)
} else {
Err(GeneratorError::FixedSizeTooLarge)
}
}
}
macro_rules! generator_update_template {
($self: expr, $buffer: expr, $proc_per_byte: block) => {
optionally_unsafe! {
cfg_if::cfg_if! {
if #[cfg(feature = "unsafe")] {
let bh = $self.bh_context.as_mut_ptr();
let mut bhrange0 = bh.add($self.bhidx_start);
let mut bhrange1 = bh.add($self.bhidx_end);
let mut bh: *mut BlockHashContext;
let mut bh_next: *mut BlockHashContext;
}
}
for ch in $buffer {
$proc_per_byte;
$self.roll_hash.update_by_byte(ch);
if $self.is_last {
$self.h_last.update_by_byte(ch);
}
cfg_if::cfg_if! {
if #[cfg(feature = "unsafe")] {
bh = bhrange0;
loop {
(*bh).h_full.update_by_byte(ch);
(*bh).h_half.update_by_byte(ch);
bh = bh.add(1);
if bh == bhrange1 {
break;
}
}
}
else {
for bh1 in &mut $self.bh_context[$self.bhidx_start..$self.bhidx_end] {
bh1.h_full.update_by_byte(ch);
bh1.h_half.update_by_byte(ch);
}
}
}
let h_org = $self.roll_hash.value().wrapping_add(1);
let mut h = h_org / block_size::MIN;
if unlikely(h_org == 0) {
continue;
}
if likely(h & $self.roll_mask != 0) {
continue;
}
if h_org % block_size::MIN != 0 {
continue;
}
h >>= $self.bhidx_start;
cfg_if::cfg_if! {
if #[cfg(feature = "unsafe")] {
macro_rules! bh_loop_2 {
($block: block) => {
bh = bhrange0;
loop {
bh_next = bh.add(1);
$block
bh = bh_next;
if bh >= bhrange1 {
break;
}
}
};
}
macro_rules! bh_curr {() => { *bh }}
macro_rules! bh_next {() => { *bh_next }}
}
else {
let mut i = $self.bhidx_start;
macro_rules! bh_loop_2 {
($block: block) => {
loop {
$block;
i += 1;
if i >= $self.bhidx_end {
break;
}
}
};
}
macro_rules! bh_curr {() => { $self.bh_context[i] }}
macro_rules! bh_next {() => { $self.bh_context[i+1] }}
}
}
bh_loop_2!({
if unlikely(
bh_curr!().blockhash_index == 0 )
{
if $self.bhidx_end > $self.bhidx_end_limit {
if $self.bhidx_end_limit == block_size::NUM_VALID - 1 && !$self.is_last {
$self.h_last = bh_curr!().h_full; $self.is_last = true;
}
}
else {
bh_next!().reset(); bh_next!().h_full = bh_curr!().h_full; bh_next!().h_half = bh_curr!().h_half; $self.bhidx_end += 1;
#[cfg(feature = "unsafe")]
{
bhrange1 = bhrange1.add(1);
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "unsafe")] {
macro_rules! bh_curr_reused {() => { *bh }}
}
else {
let bh_curr_reused = &mut $self.bh_context[i]; macro_rules! bh_curr_reused {() => { bh_curr_reused }}
}
}
invariant!(bh_curr_reused!().blockhash_index < block_hash::FULL_SIZE);
bh_curr_reused!().blockhash[bh_curr_reused!().blockhash_index] = bh_curr_reused!().h_full.value(); bh_curr_reused!().blockhash_ch_half = bh_curr_reused!().h_half.value();
if bh_curr_reused!().blockhash_index < block_hash::FULL_SIZE - 1 {
bh_curr_reused!().blockhash_index += 1;
bh_curr_reused!().h_full = PartialFNVHash::new();
if bh_curr_reused!().blockhash_index < block_hash::HALF_SIZE {
bh_curr_reused!().blockhash_ch_half = BLOCKHASH_CHAR_NIL;
bh_curr_reused!().h_half = PartialFNVHash::new();
}
}
else if $self.bhidx_end - $self.bhidx_start >= 2
&& $self.elim_border < $self.fixed_size.unwrap_or($self.input_size)
&& bh_next!().blockhash_index >= block_hash::HALF_SIZE {
$self.bhidx_start += 1;
#[cfg(feature = "unsafe")]
{
bhrange0 = bhrange0.add(1);
}
$self.roll_mask = $self.roll_mask.wrapping_mul(2).wrapping_add(1);
$self.elim_border = $self.elim_border.wrapping_mul(2);
}
if (h & 1) != 0 {
break;
}
h >>= 1;
});
}
}
};
}
impl Generator {
#[rustfmt::skip]
pub fn update(&mut self, buffer: &[u8]) -> &mut Self {
self.0.input_size = if let Ok(size) = u64::try_from(buffer.len()) { self.0.input_size.saturating_add(size)
} else {
Self::MAX_INPUT_SIZE + 1
};
generator_update_template!(self.0, buffer.iter().cloned(), {});
self
}
pub fn update_by_iter(&mut self, iter: impl Iterator<Item = u8>) -> &mut Self {
generator_update_template!(self.0, iter, {
self.0.input_size = self.0.input_size.saturating_add(1);
});
self
}
pub fn update_by_byte(&mut self, ch: u8) -> &mut Self {
self.0.input_size = self.0.input_size.saturating_add(1);
generator_update_template!(self.0, [ch; 1], {});
self
}
#[rustfmt::skip]
fn guess_output_log_block_size(&self) -> usize {
let mut log_block_size =
Self::get_log_block_size_from_input_size(self.0.input_size, self.0.bhidx_start);
log_block_size = usize::min(log_block_size, self.0.bhidx_end - 1);
optionally_unsafe! {
invariant!(log_block_size < self.0.bh_context.len());
}
while log_block_size > self.0.bhidx_start
&& self.0.bh_context[log_block_size].blockhash_index < block_hash::HALF_SIZE {
log_block_size -= 1;
optionally_unsafe! {
invariant!(log_block_size < self.0.bh_context.len());
}
}
log_block_size
}
#[allow(clippy::branches_sharing_code)]
#[rustfmt::skip]
#[inline(always)]
fn finalize_raw_internal<const S1: usize, const S2: usize>(
&self,
truncate: bool,
) -> Result<fuzzy_raw_type!(S1, S2), GeneratorError>
where
BlockHashSize<S1>: ConstrainedBlockHashSize,
BlockHashSize<S2>: ConstrainedBlockHashSize,
BlockHashSizes<S1, S2>: ConstrainedBlockHashSizes,
{
if let Some(input_size) = self.0.fixed_size {
if input_size != self.0.input_size {
return Err(GeneratorError::FixedSizeMismatch);
}
}
if Self::MAX_INPUT_SIZE < self.0.input_size {
return Err(GeneratorError::InputSizeTooLarge);
}
let log_block_size = self.guess_output_log_block_size();
let mut fuzzy: fuzzy_raw_type!(S1, S2) = FuzzyHashData::new();
fuzzy.log_blocksize = log_block_size as u8;
let roll_value = self.0.roll_hash.value();
optionally_unsafe! {
invariant!(log_block_size < self.0.bh_context.len());
}
let bh_0 = &self.0.bh_context[log_block_size]; {
let mut sz = bh_0.blockhash_index;
if bh_0.blockhash[block_hash::FULL_SIZE - 1] != BLOCKHASH_CHAR_NIL {
sz += 1;
}
optionally_unsafe! {
invariant!(sz <= fuzzy.blockhash1.len());
invariant!(sz <= bh_0.blockhash.len());
}
fuzzy.blockhash1[0..sz].clone_from_slice(&bh_0.blockhash[0..sz]); fuzzy.len_blockhash1 = sz as u8;
if roll_value != 0 {
if sz == block_hash::FULL_SIZE {
fuzzy.blockhash1[block_hash::FULL_SIZE - 1] = bh_0.h_full.value(); } else {
optionally_unsafe! {
invariant!(sz < block_hash::FULL_SIZE);
}
fuzzy.blockhash1[sz] = bh_0.h_full.value(); fuzzy.len_blockhash1 += 1;
}
}
}
if log_block_size < self.0.bhidx_end - 1 {
optionally_unsafe! {
invariant!(log_block_size + 1 < self.0.bh_context.len());
}
let bh_1 = &self.0.bh_context[log_block_size + 1]; if truncate {
let mut sz = bh_1.blockhash_index;
if bh_1.blockhash_ch_half != BLOCKHASH_CHAR_NIL {
debug_assert!(sz >= block_hash::HALF_SIZE); sz = block_hash::HALF_SIZE;
fuzzy.blockhash2[0..(sz - 1)].clone_from_slice(&bh_1.blockhash[0..(sz - 1)]); fuzzy.blockhash2[sz - 1] = { if roll_value != 0 {
bh_1.h_half.value()
} else {
bh_1.blockhash_ch_half
}
};
} else {
optionally_unsafe! {
invariant!(sz <= fuzzy.blockhash2.len());
invariant!(sz <= bh_1.blockhash.len());
}
fuzzy.blockhash2[0..sz].clone_from_slice(&bh_1.blockhash[0..sz]); if roll_value != 0 {
optionally_unsafe! {
invariant!(sz < fuzzy.blockhash2.len());
}
fuzzy.blockhash2[sz] = bh_1.h_half.value(); sz += 1;
}
}
fuzzy.len_blockhash2 = sz as u8;
} else {
let mut sz = bh_1.blockhash_index;
if bh_1.blockhash[block_hash::FULL_SIZE - 1] != BLOCKHASH_CHAR_NIL {
sz += 1;
}
#[allow(clippy::collapsible_if)]
if !<fuzzy_raw_type!(S1, S2)>::IS_LONG_FORM {
if sz > S2 {
return Err(GeneratorError::OutputOverflow);
}
}
optionally_unsafe! {
invariant!(sz <= fuzzy.blockhash2.len());
invariant!(sz <= bh_1.blockhash.len());
}
fuzzy.blockhash2[0..sz].clone_from_slice(&bh_1.blockhash[0..sz]); fuzzy.len_blockhash2 = sz as u8;
if roll_value != 0 {
#[allow(clippy::collapsible_else_if)]
if !<fuzzy_raw_type!(S1, S2)>::IS_LONG_FORM {
if sz >= S2 {
return Err(GeneratorError::OutputOverflow);
}
optionally_unsafe! {
invariant!(sz < S2);
}
fuzzy.blockhash2[sz] = bh_1.h_full.value(); fuzzy.len_blockhash2 += 1;
} else {
if sz == block_hash::FULL_SIZE {
fuzzy.blockhash2[block_hash::FULL_SIZE - 1] = bh_1.h_full.value(); } else {
optionally_unsafe! {
invariant!(sz < block_hash::FULL_SIZE);
}
fuzzy.blockhash2[sz] = bh_1.h_full.value(); fuzzy.len_blockhash2 += 1;
}
}
}
}
} else if roll_value != 0 {
debug_assert!(log_block_size == 0 || log_block_size == block_size::NUM_VALID - 1);
if log_block_size == 0 {
fuzzy.blockhash2[0] = bh_0.h_full.value(); fuzzy.len_blockhash2 = 1;
} else {
fuzzy.blockhash2[0] = self.0.h_last.value(); fuzzy.len_blockhash2 = 1;
}
} else {
fuzzy.len_blockhash2 = 0;
}
Ok(fuzzy)
}
pub fn finalize_raw<const TRUNC: bool, const S1: usize, const S2: usize>(
&self,
) -> Result<fuzzy_raw_type!(S1, S2), GeneratorError>
where
BlockHashSize<S1>: ConstrainedBlockHashSize,
BlockHashSize<S2>: ConstrainedBlockHashSize,
BlockHashSizes<S1, S2>: ConstrainedBlockHashSizes,
{
self.finalize_raw_internal::<S1, S2>(TRUNC)
}
#[inline]
pub fn finalize(&self) -> Result<RawFuzzyHash, GeneratorError> {
self.finalize_raw::<true, { block_hash::FULL_SIZE }, { block_hash::HALF_SIZE }>()
}
#[inline]
pub fn finalize_without_truncation(&self) -> Result<LongRawFuzzyHash, GeneratorError> {
self.finalize_raw::<false, { block_hash::FULL_SIZE }, { block_hash::FULL_SIZE }>()
}
}
impl Default for Generator {
fn default() -> Self {
Self::new()
}
}
impl AddAssign<&[u8]> for Generator {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8]) {
self.update(buffer);
}
}
impl<const N: usize> AddAssign<&[u8; N]> for Generator {
#[inline(always)]
fn add_assign(&mut self, buffer: &[u8; N]) {
self.update(&buffer[..]);
}
}
impl AddAssign<u8> for Generator {
#[inline(always)]
fn add_assign(&mut self, byte: u8) {
self.update_by_byte(byte);
}
}
#[doc(hidden)]
mod const_asserts {
use super::*;
use static_assertions::{const_assert, const_assert_eq, const_assert_ne};
const_assert_eq!(ROLLING_WINDOW, 7);
const_assert_eq!(PartialFNVHash::FNV_HASH_INIT, 0x27);
const_assert_eq!(Generator::MIN_RECOMMENDED_INPUT_SIZE - 1, 4096);
const_assert!(0 < block_hash::ALPHABET_SIZE && block_hash::ALPHABET_SIZE <= 256);
const_assert!(block_hash::ALPHABET_SIZE.is_power_of_two());
const_assert!((PartialFNVHash::FNV_HASH_INIT as u16) < (block_hash::ALPHABET_SIZE as u16));
const_assert_eq!(ROLLING_WINDOW, RollingHash::WINDOW_SIZE);
#[cfg(test)]
#[test]
fn rolling_hash_h3_shift_amount() {
assert!(RollingHash::WINDOW_SIZE
.checked_mul(RollingHash::H3_LSHIFT)
.and_then(|x| u32::try_from(x).ok())
.map(|x| x >= u32::BITS)
.unwrap_or(false));
}
const_assert!(block_hash::ALPHABET_SIZE <= BLOCKHASH_CHAR_NIL as usize);
const_assert_eq!(Generator::MAX_INPUT_SIZE, 192u64 * 1024 * 1024 * 1024);
const_assert_ne!(Generator::MAX_INPUT_SIZE, u64::MAX);
const_assert_ne!(u32::MAX % block_size::MIN, block_size::MIN - 1);
}
mod tests;