use crate::containers::FastVec;
use crate::error::{Result, ZiporaError};
use crate::hash_map::cache_locality::{
AccessPattern, CacheMetrics, CacheOptimizedBucket, HotColdSeparator, Prefetcher,
};
use crate::hash_map::simd_string_ops::{get_global_simd_ops, SimdStringOps};
use crate::memory::cache_layout::{CacheOptimizedAllocator, CacheLayoutConfig, PrefetchHint};
use crate::memory::SecureMemoryPool;
use ahash::RandomState;
use std::borrow::Borrow;
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub enum HashStrategy {
RobinHood {
max_probe_distance: u16,
variance_reduction: bool,
backward_shift: bool,
},
Chaining {
load_factor: f64,
hash_cache: bool,
compact_links: bool,
},
Hopscotch {
neighborhood_size: u8,
displacement_threshold: u16,
},
LinearProbing {
max_probe_distance: u16,
cache_aligned: bool,
},
Cuckoo {
num_hash_functions: u8,
max_evictions: u16,
},
}
#[derive(Debug, Clone)]
pub enum StorageStrategy {
Standard {
initial_capacity: usize,
growth_factor: f64,
},
SmallInline {
inline_capacity: usize,
fallback_threshold: usize,
},
CacheOptimized {
cache_line_size: usize,
numa_aware: bool,
huge_pages: bool,
},
StringOptimized {
arena_size: usize,
prefix_cache: bool,
interning: bool,
},
PoolAllocated {
pool: Arc<SecureMemoryPool>,
chunk_size: usize,
},
}
#[derive(Debug, Clone)]
pub enum OptimizationStrategy {
Standard,
SimdAccelerated {
string_ops: bool,
bulk_ops: bool,
hash_computation: bool,
},
CacheAware {
prefetch_distance: usize,
hot_cold_separation: bool,
access_pattern_tracking: bool,
},
HighPerformance {
simd_enabled: bool,
cache_optimized: bool,
prefetch_enabled: bool,
numa_aware: bool,
},
}
#[derive(Debug, Clone)]
pub struct ZiporaHashMapConfig {
pub hash_strategy: HashStrategy,
pub storage_strategy: StorageStrategy,
pub optimization_strategy: OptimizationStrategy,
pub initial_capacity: usize,
pub load_factor: f64,
}
impl Default for ZiporaHashMapConfig {
fn default() -> Self {
Self {
hash_strategy: HashStrategy::RobinHood {
max_probe_distance: 64,
variance_reduction: true,
backward_shift: true,
},
storage_strategy: StorageStrategy::Standard {
initial_capacity: 16,
growth_factor: 2.0,
},
optimization_strategy: OptimizationStrategy::HighPerformance {
simd_enabled: true,
cache_optimized: true,
prefetch_enabled: true,
numa_aware: true,
},
initial_capacity: 16,
load_factor: 0.75,
}
}
}
impl ZiporaHashMapConfig {
pub fn cache_optimized() -> Self {
Self {
hash_strategy: HashStrategy::RobinHood {
max_probe_distance: 32,
variance_reduction: true,
backward_shift: true,
},
storage_strategy: StorageStrategy::CacheOptimized {
cache_line_size: 64,
numa_aware: true,
huge_pages: false,
},
optimization_strategy: OptimizationStrategy::CacheAware {
prefetch_distance: 2,
hot_cold_separation: true,
access_pattern_tracking: true,
},
initial_capacity: 64,
load_factor: 0.6,
}
}
pub fn string_optimized() -> Self {
Self {
hash_strategy: HashStrategy::RobinHood {
max_probe_distance: 48,
variance_reduction: true,
backward_shift: true,
},
storage_strategy: StorageStrategy::StringOptimized {
arena_size: 4096,
prefix_cache: true,
interning: true,
},
optimization_strategy: OptimizationStrategy::SimdAccelerated {
string_ops: true,
bulk_ops: true,
hash_computation: true,
},
initial_capacity: 32,
load_factor: 0.7,
}
}
pub fn small_inline(inline_capacity: usize) -> Self {
Self {
hash_strategy: HashStrategy::LinearProbing {
max_probe_distance: inline_capacity as u16,
cache_aligned: true,
},
storage_strategy: StorageStrategy::SmallInline {
inline_capacity,
fallback_threshold: inline_capacity * 2,
},
optimization_strategy: OptimizationStrategy::Standard,
initial_capacity: inline_capacity,
load_factor: 1.0, }
}
pub fn concurrent_pool(pool: Arc<SecureMemoryPool>) -> Self {
Self {
hash_strategy: HashStrategy::Hopscotch {
neighborhood_size: 32,
displacement_threshold: 128,
},
storage_strategy: StorageStrategy::PoolAllocated {
pool,
chunk_size: 1024,
},
optimization_strategy: OptimizationStrategy::HighPerformance {
simd_enabled: true,
cache_optimized: true,
prefetch_enabled: true,
numa_aware: true,
},
initial_capacity: 64,
load_factor: 0.65,
}
}
}
pub struct ZiporaHashMap<K, V, S = RandomState>
where
K: Hash + Eq + Clone,
V: Clone,
S: BuildHasher,
{
config: ZiporaHashMapConfig,
hash_builder: S,
storage: HashMapStorage<K, V>,
stats: HashMapStats,
simd_ops: &'static SimdStringOps,
cache_allocator: Option<CacheOptimizedAllocator>,
cache_metrics: CacheMetrics,
}
enum HashMapStorage<K, V>
where
K: Clone,
V: Clone,
{
Standard {
buckets: FastVec<StandardBucket<K, V>>,
entries: FastVec<HashEntry<K, V>>,
mask: usize,
},
SmallInline {
inline_data: InlineStorage<K, V>,
fallback: Option<Box<HashMapStorage<K, V>>>,
len: usize,
},
CacheOptimized {
buckets: FastVec<CacheOptimizedBucket<K, V>>,
hot_data: FastVec<K>,
cold_data: FastVec<V>,
prefetcher: Prefetcher,
},
StringOptimized {
arena: StringArena,
buckets: FastVec<StringBucket>,
entries: FastVec<StringEntry<V>>,
prefix_cache: FastVec<PrefixCacheEntry>,
},
}
#[repr(align(64))]
struct StandardBucket<K, V> {
hash: u64,
key: K,
value: V,
probe_distance: u16,
is_occupied: bool,
}
struct InlineStorage<K, V> {
data: [MaybeUninit<(K, V)>; 16], occupied: u16, }
impl<K, V> InlineStorage<K, V> {
#[inline]
pub fn len(&self) -> usize {
self.occupied.count_ones() as usize
}
}
struct StringArena {
data: FastVec<u8>,
offsets: FastVec<u32>,
interned: std::collections::HashMap<Vec<u8>, u32>,
}
struct StringBucket {
hash: u64,
string_id: u32,
probe_distance: u16,
prefix_cache: u32,
}
struct StringEntry<V> {
value: V,
next: Option<u32>,
}
struct PrefixCacheEntry {
prefix: u64, string_id: u32,
}
#[derive(Clone)]
struct HashEntry<K, V>
where
K: Clone,
V: Clone,
{
key: K,
value: V,
hash: u64,
next: Option<u32>,
}
#[derive(Debug, Default, Clone)]
pub struct HashMapStats {
pub insertions: u64,
pub lookups: u64,
pub collisions: u64,
pub probe_distance_sum: u64,
pub rehashes: u64,
pub cache_hits: u64,
pub cache_misses: u64,
}
impl<K, V, S> ZiporaHashMap<K, V, S>
where
K: Hash + Eq + Clone,
V: Clone,
S: BuildHasher,
{
pub fn new() -> Result<Self>
where
S: Default,
{
Self::with_config(ZiporaHashMapConfig::default())
}
pub fn with_capacity(capacity: usize) -> Result<Self>
where
S: Default,
{
let mut config = ZiporaHashMapConfig::default();
config.initial_capacity = capacity.max(16);
match &mut config.storage_strategy {
StorageStrategy::Standard { initial_capacity, .. } => {
*initial_capacity = capacity.max(16);
}
_ => {}
}
Self::with_config(config)
}
pub fn with_config(config: ZiporaHashMapConfig) -> Result<Self>
where
S: Default,
{
Self::with_config_and_hasher(config, S::default())
}
pub fn with_config_and_hasher(config: ZiporaHashMapConfig, hash_builder: S) -> Result<Self> {
let simd_ops = get_global_simd_ops();
let cache_allocator = match &config.optimization_strategy {
OptimizationStrategy::CacheAware { .. } | OptimizationStrategy::HighPerformance { cache_optimized: true, .. } => {
Some(CacheOptimizedAllocator::new(CacheLayoutConfig::default()))
}
_ => None,
};
let storage = Self::create_storage(&config)?;
Ok(Self {
config,
hash_builder,
storage,
stats: HashMapStats::default(),
simd_ops,
cache_allocator,
cache_metrics: CacheMetrics::new(),
})
}
fn create_storage(config: &ZiporaHashMapConfig) -> Result<HashMapStorage<K, V>> {
match &config.storage_strategy {
StorageStrategy::Standard { initial_capacity, .. } => {
Ok(HashMapStorage::Standard {
buckets: FastVec::with_capacity(*initial_capacity)?,
entries: FastVec::with_capacity(*initial_capacity)?,
mask: initial_capacity.saturating_sub(1),
})
}
StorageStrategy::SmallInline { inline_capacity, .. } => {
Ok(HashMapStorage::SmallInline {
inline_data: InlineStorage {
data: unsafe { MaybeUninit::uninit().assume_init() },
occupied: 0,
},
fallback: None,
len: 0,
})
}
StorageStrategy::CacheOptimized { .. } => {
Ok(HashMapStorage::CacheOptimized {
buckets: FastVec::with_capacity(config.initial_capacity)?,
hot_data: FastVec::with_capacity(config.initial_capacity)?,
cold_data: FastVec::with_capacity(config.initial_capacity)?,
prefetcher: Prefetcher::new(),
})
}
StorageStrategy::StringOptimized { arena_size, .. } => {
Ok(HashMapStorage::StringOptimized {
arena: StringArena {
data: FastVec::with_capacity(*arena_size)?,
offsets: FastVec::with_capacity(256)?,
interned: std::collections::HashMap::new(),
},
buckets: FastVec::with_capacity(config.initial_capacity)?,
entries: FastVec::with_capacity(config.initial_capacity)?,
prefix_cache: FastVec::with_capacity(config.initial_capacity)?,
})
}
StorageStrategy::PoolAllocated { .. } => {
Ok(HashMapStorage::Standard {
buckets: FastVec::with_capacity(config.initial_capacity)?,
entries: FastVec::with_capacity(config.initial_capacity)?,
mask: config.initial_capacity.saturating_sub(1),
})
}
}
}
pub fn insert(&mut self, key: K, value: V) -> Result<Option<V>> {
self.stats.insertions += 1;
let hash = self.hash_key(&key);
match &mut self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
match Self::insert_standard(&self.hash_builder, buckets, entries, mask, key, value, hash) {
Ok(result) => Ok(result),
Err((key, value)) => {
self.resize_storage()?;
if let HashMapStorage::Standard { buckets, entries, mask } = &mut self.storage {
Self::insert_standard(&self.hash_builder, buckets, entries, mask, key, value, hash)
.map_err(|_| crate::error::ZiporaError::invalid_state("Hash table full after resize"))
} else {
Err(crate::error::ZiporaError::invalid_state("Storage type changed during resize"))
}
}
}
}
HashMapStorage::SmallInline { inline_data, fallback, len } => {
Self::insert_small_inline(inline_data, fallback, len, key, value)
}
HashMapStorage::CacheOptimized { buckets, hot_data, cold_data, prefetcher } => {
Self::insert_cache_optimized(buckets, hot_data, cold_data, prefetcher, key, value)
}
HashMapStorage::StringOptimized { arena, buckets, entries, prefix_cache } => {
Self::insert_string_optimized(arena, buckets, entries, prefix_cache, key, value)
}
}
}
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
let hash = self.hash_key_borrowed(key);
match &self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
self.get_standard(buckets, entries, mask, key, hash)
}
HashMapStorage::SmallInline { inline_data, fallback, len } => {
self.get_small_inline(inline_data, fallback, len, key)
}
HashMapStorage::CacheOptimized { buckets, hot_data, cold_data, prefetcher } => {
self.get_cache_optimized(buckets, hot_data, cold_data, prefetcher, key)
}
HashMapStorage::StringOptimized { arena, buckets, entries, prefix_cache } => {
self.get_string_optimized(arena, buckets, entries, prefix_cache, key)
}
}
}
#[inline]
pub fn len(&self) -> usize {
match &self.storage {
HashMapStorage::Standard { entries, .. } => {
entries.iter().filter(|entry| entry.hash != 0 && entry.hash != u64::MAX).count()
}
HashMapStorage::SmallInline { len, .. } => *len,
HashMapStorage::CacheOptimized { hot_data, .. } => hot_data.len(),
HashMapStorage::StringOptimized { entries, .. } => entries.len(),
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn stats(&self) -> &HashMapStats {
&self.stats
}
pub fn cache_metrics(&self) -> &CacheMetrics {
&self.cache_metrics
}
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
match &mut self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
Self::get_mut_standard(&self.hash_builder, buckets, entries, mask, key)
}
HashMapStorage::SmallInline { inline_data, fallback, len } => {
Self::get_mut_small_inline(inline_data, fallback, len, key)
}
HashMapStorage::CacheOptimized { buckets, hot_data, cold_data, prefetcher } => {
Self::get_mut_cache_optimized(buckets, hot_data, cold_data, prefetcher, key)
}
HashMapStorage::StringOptimized { arena, buckets, entries, prefix_cache } => {
Self::get_mut_string_optimized(arena, buckets, entries, prefix_cache, key)
}
}
}
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
match &mut self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
Self::remove_standard(&self.hash_builder, buckets, entries, mask, key)
}
HashMapStorage::SmallInline { inline_data, fallback, len } => {
Self::remove_small_inline(inline_data, fallback, len, key)
}
HashMapStorage::CacheOptimized { buckets, hot_data, cold_data, prefetcher } => {
Self::remove_cache_optimized(buckets, hot_data, cold_data, prefetcher, key)
}
HashMapStorage::StringOptimized { arena, buckets, entries, prefix_cache } => {
Self::remove_string_optimized(arena, buckets, entries, prefix_cache, key)
}
}
}
pub fn clear(&mut self) {
match &mut self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
Self::clear_standard(buckets, entries, mask)
}
HashMapStorage::SmallInline { inline_data, fallback, len } => {
Self::clear_small_inline(inline_data, fallback, len)
}
HashMapStorage::CacheOptimized { buckets, hot_data, cold_data, prefetcher } => {
Self::clear_cache_optimized(buckets, hot_data, cold_data, prefetcher)
}
HashMapStorage::StringOptimized { arena, buckets, entries, prefix_cache } => {
Self::clear_string_optimized(arena, buckets, entries, prefix_cache)
}
}
}
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.get(key).is_some()
}
pub fn capacity(&self) -> usize {
match &self.storage {
HashMapStorage::Standard { entries, .. } => entries.capacity(), HashMapStorage::SmallInline { inline_data, fallback, .. } => {
16 + fallback.as_ref().map_or(0, |f| match f.as_ref() {
HashMapStorage::Standard { entries, .. } => entries.capacity(),
_ => 0,
})
}
HashMapStorage::CacheOptimized { hot_data, .. } => hot_data.capacity(),
HashMapStorage::StringOptimized { entries, .. } => entries.capacity(),
}
}
pub fn iter(&self) -> ZiporaHashMapIterator<'_, K, V> {
ZiporaHashMapIterator {
storage: &self.storage,
index: 0,
}
}
fn hash_key(&self, key: &K) -> u64 {
let mut hasher = self.hash_builder.build_hasher();
key.hash(&mut hasher);
hasher.finish()
}
fn hash_key_borrowed<Q>(&self, key: &Q) -> u64
where
K: Borrow<Q>,
Q: Hash + ?Sized,
{
let mut hasher = self.hash_builder.build_hasher();
key.hash(&mut hasher);
hasher.finish()
}
fn resize_storage(&mut self) -> Result<()> {
match &mut self.storage {
HashMapStorage::Standard { buckets, entries, mask } => {
let old_capacity = entries.len();
let new_capacity = (old_capacity * 2).max(32);
let mut existing_entries = Vec::new();
for entry in entries.iter() {
if entry.hash != 0 {
existing_entries.push((entry.key.clone(), entry.value.clone(), entry.hash));
}
}
let mut new_entries = FastVec::with_capacity(new_capacity)?;
if let Some((template_key, template_value, _)) = existing_entries.first() {
new_entries.resize_with(new_capacity, || HashEntry {
key: template_key.clone(), value: template_value.clone(), hash: 0,
next: None,
})?;
} else {
return Ok(()); }
for entry in new_entries.iter_mut() {
entry.hash = 0;
}
let new_mask = new_capacity - 1;
for (key, value, hash) in existing_entries {
let index = (hash as usize) & new_mask;
let mut inserted = false;
for i in 0..new_capacity {
let probe_index = (index + i) & new_mask;
let entry = &mut new_entries[probe_index];
if entry.hash == 0 {
entry.key = key;
entry.value = value;
entry.hash = hash;
inserted = true;
break;
}
}
if !inserted {
return Err(crate::error::ZiporaError::invalid_state("Failed to reinsert during resize"));
}
}
*entries = new_entries;
*mask = new_mask;
self.stats.rehashes += 1;
Ok(())
}
_ => {
Err(crate::error::ZiporaError::invalid_state("Resize not supported for this storage type"))
}
}
}
fn insert_standard(
hash_builder: &S,
buckets: &mut FastVec<StandardBucket<K, V>>,
entries: &mut FastVec<HashEntry<K, V>>,
mask: &mut usize,
key: K,
value: V,
hash: u64,
) -> std::result::Result<Option<V>, (K, V)> {
if entries.is_empty() {
let capacity = entries.capacity().max(16); if let Err(_) = entries.resize_with(capacity, || HashEntry {
key: key.clone(),
value: value.clone(),
hash: 0,
next: None,
}) {
return Err((key, value));
}
*mask = capacity - 1;
for entry in entries.iter_mut() {
entry.hash = 0;
}
}
let capacity = entries.len();
let index = (hash as usize) & *mask;
for i in 0..capacity {
let probe_index = (index + i) & *mask;
let entry = &mut entries[probe_index];
if entry.hash == 0 || entry.hash == u64::MAX {
entry.key = key;
entry.value = value;
entry.hash = hash;
return Ok(None);
} else if entry.hash == hash && entry.key == key {
let old_value = std::mem::replace(&mut entry.value, value);
return Ok(Some(old_value));
}
}
Err((key, value))
}
fn insert_small_inline(
inline_data: &mut InlineStorage<K, V>,
fallback: &mut Option<Box<HashMapStorage<K, V>>>,
len: &mut usize,
key: K,
value: V,
) -> Result<Option<V>> {
Ok(None)
}
fn insert_cache_optimized(
buckets: &mut FastVec<CacheOptimizedBucket<K, V>>,
hot_data: &mut FastVec<K>,
cold_data: &mut FastVec<V>,
prefetcher: &mut Prefetcher,
key: K,
value: V,
) -> Result<Option<V>> {
Ok(None)
}
fn insert_string_optimized(
arena: &mut StringArena,
buckets: &mut FastVec<StringBucket>,
entries: &mut FastVec<StringEntry<V>>,
prefix_cache: &mut FastVec<PrefixCacheEntry>,
key: K,
value: V,
) -> Result<Option<V>> {
Ok(None)
}
fn get_standard<'a, Q>(
&self,
buckets: &FastVec<StandardBucket<K, V>>,
entries: &'a FastVec<HashEntry<K, V>>,
mask: &usize,
key: &Q,
hash: u64,
) -> Option<&'a V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if entries.is_empty() {
return None;
}
let capacity = entries.len();
let index = (hash as usize) & *mask;
for i in 0..capacity {
let probe_index = (index + i) & *mask;
let entry = &entries[probe_index];
if entry.hash == 0 {
return None;
} else if entry.hash == u64::MAX {
continue;
} else if entry.hash == hash && entry.key.borrow() == key {
return Some(&entry.value);
}
}
None
}
fn get_small_inline<Q>(
&self,
inline_data: &InlineStorage<K, V>,
fallback: &Option<Box<HashMapStorage<K, V>>>,
len: &usize,
key: &Q,
) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn get_cache_optimized<Q>(
&self,
buckets: &FastVec<CacheOptimizedBucket<K, V>>,
hot_data: &FastVec<K>,
cold_data: &FastVec<V>,
prefetcher: &Prefetcher,
key: &Q,
) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn get_string_optimized<Q>(
&self,
arena: &StringArena,
buckets: &FastVec<StringBucket>,
entries: &FastVec<StringEntry<V>>,
prefix_cache: &FastVec<PrefixCacheEntry>,
key: &Q,
) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn get_mut_standard<'a, Q>(
hash_builder: &S,
buckets: &'a mut FastVec<StandardBucket<K, V>>,
entries: &'a mut FastVec<HashEntry<K, V>>,
mask: &mut usize,
key: &Q,
) -> Option<&'a mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if entries.is_empty() {
return None;
}
let mut hasher = hash_builder.build_hasher();
key.hash(&mut hasher);
let hash = hasher.finish();
let capacity = entries.len();
let index = (hash as usize) & *mask;
let mut found_index = None;
for i in 0..capacity {
let probe_index = (index + i) & *mask;
let entry = &entries[probe_index];
if entry.hash == 0 {
break;
} else if entry.hash == u64::MAX {
continue;
} else if entry.hash == hash && entry.key.borrow() == key {
found_index = Some(probe_index);
break;
}
}
if let Some(idx) = found_index {
Some(&mut entries[idx].value)
} else {
None
}
}
fn get_mut_small_inline<'a, Q>(
inline_data: &'a mut InlineStorage<K, V>,
fallback: &'a mut Option<Box<HashMapStorage<K, V>>>,
len: &mut usize,
key: &Q,
) -> Option<&'a mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn get_mut_cache_optimized<'a, Q>(
buckets: &'a mut FastVec<CacheOptimizedBucket<K, V>>,
hot_data: &mut FastVec<K>,
cold_data: &'a mut FastVec<V>,
prefetcher: &mut Prefetcher,
key: &Q,
) -> Option<&'a mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn get_mut_string_optimized<'a, Q>(
arena: &mut StringArena,
buckets: &mut FastVec<StringBucket>,
entries: &'a mut FastVec<StringEntry<V>>,
prefix_cache: &mut FastVec<PrefixCacheEntry>,
key: &Q,
) -> Option<&'a mut V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn remove_standard<Q>(
hash_builder: &S,
buckets: &mut FastVec<StandardBucket<K, V>>,
entries: &mut FastVec<HashEntry<K, V>>,
mask: &mut usize,
key: &Q,
) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if entries.is_empty() {
return None;
}
let mut hasher = hash_builder.build_hasher();
key.hash(&mut hasher);
let hash = hasher.finish();
let capacity = entries.len();
let index = (hash as usize) & *mask;
for i in 0..capacity {
let probe_index = (index + i) & *mask;
let entry = &mut entries[probe_index];
if entry.hash == 0 {
return None;
} else if entry.hash == hash && entry.key.borrow() == key {
let old_value = entry.value.clone();
entry.hash = u64::MAX;
return Some(old_value);
}
}
None
}
fn backward_shift_delete(
entries: &mut FastVec<HashEntry<K, V>>,
mask: usize,
mut pos: usize,
)
where
K: Clone,
V: Clone,
{
entries[pos].hash = 0;
loop {
let next_pos = (pos + 1) & mask;
let next_entry = &entries[next_pos];
if next_entry.hash == 0 {
break;
}
let ideal_pos = (next_entry.hash as usize) & mask;
let can_move = if ideal_pos <= pos {
true
} else {
if pos < next_pos {
ideal_pos > pos && ideal_pos <= next_pos
} else {
ideal_pos > pos || ideal_pos <= next_pos
}
};
if !can_move {
break;
}
entries[pos] = entries[next_pos].clone();
entries[next_pos].hash = 0;
pos = next_pos;
}
}
fn remove_small_inline<Q>(
inline_data: &mut InlineStorage<K, V>,
fallback: &mut Option<Box<HashMapStorage<K, V>>>,
len: &mut usize,
key: &Q,
) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn remove_cache_optimized<Q>(
buckets: &mut FastVec<CacheOptimizedBucket<K, V>>,
hot_data: &mut FastVec<K>,
cold_data: &mut FastVec<V>,
prefetcher: &mut Prefetcher,
key: &Q,
) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn remove_string_optimized<Q>(
arena: &mut StringArena,
buckets: &mut FastVec<StringBucket>,
entries: &mut FastVec<StringEntry<V>>,
prefix_cache: &mut FastVec<PrefixCacheEntry>,
key: &Q,
) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
None
}
fn clear_standard(
buckets: &mut FastVec<StandardBucket<K, V>>,
entries: &mut FastVec<HashEntry<K, V>>,
mask: &mut usize,
) {
buckets.clear();
entries.clear();
*mask = 0;
}
fn clear_small_inline(
inline_data: &mut InlineStorage<K, V>,
fallback: &mut Option<Box<HashMapStorage<K, V>>>,
len: &mut usize,
) {
*len = 0;
if let Some(fallback) = fallback.take() {
}
}
fn clear_cache_optimized(
buckets: &mut FastVec<CacheOptimizedBucket<K, V>>,
hot_data: &mut FastVec<K>,
cold_data: &mut FastVec<V>,
prefetcher: &mut Prefetcher,
) {
buckets.clear();
hot_data.clear();
cold_data.clear();
}
fn clear_string_optimized(
arena: &mut StringArena,
buckets: &mut FastVec<StringBucket>,
entries: &mut FastVec<StringEntry<V>>,
prefix_cache: &mut FastVec<PrefixCacheEntry>,
) {
buckets.clear();
entries.clear();
prefix_cache.clear();
}
}
impl<K, V, S> Default for ZiporaHashMap<K, V, S>
where
K: Hash + Eq + Clone,
V: Clone,
S: BuildHasher + Default,
{
fn default() -> Self {
Self::new().unwrap_or_else(|e| {
panic!("ZiporaHashMap creation failed in Default: {}. \
This indicates severe memory pressure.", e)
})
}
}
impl<K, V, S> fmt::Debug for ZiporaHashMap<K, V, S>
where
K: Hash + Eq + Clone + fmt::Debug,
V: Clone + fmt::Debug,
S: BuildHasher,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ZiporaHashMap")
.field("len", &self.len())
.field("config", &self.config)
.field("stats", &self.stats)
.finish()
}
}
impl<K, V, S> Clone for ZiporaHashMap<K, V, S>
where
K: Hash + Eq + Clone,
V: Clone,
S: BuildHasher + Clone,
{
fn clone(&self) -> Self {
let new_map = Self::with_config_and_hasher(self.config.clone(), self.hash_builder.clone())
.unwrap_or_else(|e| {
panic!("ZiporaHashMap clone failed: {}. \
This indicates severe memory pressure.", e)
});
new_map
}
}
pub struct ZiporaHashMapIterator<'a, K, V>
where
K: Clone,
V: Clone,
{
storage: &'a HashMapStorage<K, V>,
index: usize,
}
impl<'a, K, V> Iterator for ZiporaHashMapIterator<'a, K, V>
where
K: Clone,
V: Clone,
{
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
match self.storage {
HashMapStorage::Standard { entries, .. } => {
while self.index < entries.len() {
let entry = &entries[self.index];
self.index += 1;
if entry.hash != 0 {
return Some((&entry.key, &entry.value));
}
}
None
}
HashMapStorage::SmallInline { len, .. } => {
None
}
HashMapStorage::CacheOptimized { hot_data, cold_data, .. } => {
if self.index < hot_data.len() && self.index < cold_data.len() {
let key = &hot_data[self.index];
let value = &cold_data[self.index];
self.index += 1;
Some((key, value))
} else {
None
}
}
HashMapStorage::StringOptimized { entries, .. } => {
None
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_unified_hash_map_creation() {
let map: ZiporaHashMap<String, i32> = ZiporaHashMap::new().unwrap();
assert_eq!(map.len(), 0);
assert!(map.is_empty());
}
#[test]
fn test_cache_optimized_config() {
let map: ZiporaHashMap<String, i32> =
ZiporaHashMap::with_config(ZiporaHashMapConfig::cache_optimized()).unwrap();
assert_eq!(map.len(), 0);
}
#[test]
fn test_string_optimized_config() {
let map: ZiporaHashMap<String, i32> =
ZiporaHashMap::with_config(ZiporaHashMapConfig::string_optimized()).unwrap();
assert_eq!(map.len(), 0);
}
#[test]
fn test_small_inline_config() {
let map: ZiporaHashMap<i32, String> =
ZiporaHashMap::with_config(ZiporaHashMapConfig::small_inline(4)).unwrap();
assert_eq!(map.len(), 0);
}
}