#![no_std]
#[cfg(feature = "std")]
extern crate std;
#[cfg(any(feature = "std"))]
use core::hash::BuildHasherDefault;
use core::{convert::TryInto, hash::Hasher};
#[cfg(any(feature = "std"))]
use std::collections;
#[cfg(all(feature = "std"))]
pub type HashMap<K, V> = collections::HashMap<K, V, BuildHasherDefault<ZwoHasher>>;
#[cfg(all(feature = "std"))]
pub type HashSet<V> = collections::HashSet<V, BuildHasherDefault<ZwoHasher>>;
pub struct ZwoHasher {
state: usize,
}
impl Default for ZwoHasher {
#[inline]
fn default() -> ZwoHasher {
ZwoHasher { state: 0 }
}
}
#[cfg(target_pointer_width = "64")]
const M: usize = 0x2545f4914f6cdd1d;
#[cfg(target_pointer_width = "32")]
const M: usize = 0x2c9277b5;
#[cfg(target_pointer_width = "64")]
const R: u32 = 41;
#[cfg(target_pointer_width = "32")]
const R: u32 = 21;
#[cfg(target_pointer_width = "64")]
type WideInt = u128;
#[cfg(target_pointer_width = "32")]
type WideInt = u64;
const USIZE_BITS: u32 = 0usize.count_zeros();
const USIZE_BYTES: usize = core::mem::size_of::<usize>();
impl Hasher for ZwoHasher {
#[inline]
fn write_usize(&mut self, i: usize) {
self.state = self.state.wrapping_mul(M).rotate_right(R) ^ i;
}
#[inline]
fn finish(&self) -> u64 {
let wide = (self.state as WideInt) * (M as WideInt);
(wide as usize).wrapping_sub((wide >> USIZE_BITS) as usize) as u64
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
let mut copy = ZwoHasher { state: self.state };
assert!(USIZE_BYTES == 8 || USIZE_BYTES == 4);
#[allow(clippy::len_zero)]
if bytes.len() >= USIZE_BYTES {
let mut bytes_left = bytes;
while bytes_left.len() > USIZE_BYTES {
let full_chunk: [u8; USIZE_BYTES] = bytes_left[..USIZE_BYTES].try_into().unwrap();
copy.write_usize(usize::from_ne_bytes(full_chunk));
bytes_left = &bytes_left[USIZE_BYTES..];
}
if bytes.len() >= USIZE_BYTES {
let last_chunk: [u8; USIZE_BYTES] =
bytes[bytes.len() - USIZE_BYTES..].try_into().unwrap();
copy.write_usize(usize::from_ne_bytes(last_chunk));
} else {
core::unreachable!();
}
} else if USIZE_BYTES == 8 && bytes.len() >= 4 {
#[cfg(target_pointer_width = "64")]
{
let chunk_low: [u8; 4] = bytes[..4].try_into().unwrap();
let chunk_high: [u8; 4] = bytes[bytes.len() - 4..].try_into().unwrap();
let chunk_value = (u32::from_ne_bytes(chunk_low) as usize)
| ((u32::from_ne_bytes(chunk_high) as usize) << 32);
copy.write_usize(chunk_value);
}
#[cfg(target_pointer_width = "32")]
core::unreachable!();
} else if bytes.len() >= 2 {
let chunk_low: [u8; 2] = bytes[..2].try_into().unwrap();
let chunk_high: [u8; 2] = bytes[bytes.len() - 2..].try_into().unwrap();
let chunk_value = (u16::from_ne_bytes(chunk_low) as usize)
| ((u16::from_ne_bytes(chunk_high) as usize) << 16);
copy.write_usize(chunk_value);
} else if bytes.len() >= 1 {
copy.write_usize(bytes[0] as usize);
}
self.state = copy.state;
}
#[inline]
fn write_u8(&mut self, i: u8) {
self.write_usize(i as usize);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.write_usize(i as usize);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.write_usize(i as usize);
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn write_u64(&mut self, i: u64) {
self.write_usize(i as usize);
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn write_u64(&mut self, i: u64) {
self.write_usize(i as usize);
self.write_usize((i >> 32) as usize);
}
#[inline]
fn write_u128(&mut self, i: u128) {
self.write_u64(i as u64);
self.write_u64((i >> 64) as u64);
}
#[inline]
fn write_i8(&mut self, i: i8) {
self.write_u8(i as u8);
}
#[inline]
fn write_i16(&mut self, i: i16) {
self.write_u16(i as u16);
}
#[inline]
fn write_i32(&mut self, i: i32) {
self.write_u32(i as u32);
}
#[inline]
fn write_i64(&mut self, i: i64) {
self.write_u64(i as u64);
}
#[inline]
fn write_i128(&mut self, i: i128) {
self.write_u128(i as u128);
}
#[inline]
fn write_isize(&mut self, i: isize) {
self.write_usize(i as usize);
}
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
use std::{prelude::v1::*, println};
fn hash_usize(value: usize) -> usize {
let mut hasher = ZwoHasher::default();
hasher.write_usize(value);
hasher.finish() as usize
}
#[test]
fn usize_byte_subbword_collision_rate() {
let mut histogram = [0; 257];
for i in 0..USIZE_BITS - 8 {
for j in 0..USIZE_BITS - 16 {
let mut hash_subbytes: Vec<_> =
(0..256).map(|b| (hash_usize(b << i) >> j) as u16).collect();
hash_subbytes.sort_unstable();
hash_subbytes.dedup();
histogram[hash_subbytes.len()] += 1;
}
}
for (len, &count) in histogram.iter().enumerate() {
if count > 0 {
println!("{}: {}", len, count);
}
}
for (len, &count) in histogram.iter().enumerate() {
assert!(len >= 255 || count == 0);
}
}
}