use crate::error::{Result, ZiporaError};
use crate::memory::{SecureMemoryPool, get_global_pool_for_size};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, AtomicU32, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock, Mutex};
use std::hash::{Hash, Hasher};
use std::fmt::Debug;
pub const DEFAULT_LRU_CAPACITY: usize = 1024;
const INVALID_NODE: u32 = u32::MAX;
#[derive(Debug, Clone)]
pub struct LruMapConfig {
pub capacity: usize,
pub initial_hash_capacity: usize,
pub enable_statistics: bool,
pub use_secure_memory: bool,
pub load_factor: f64,
pub enable_access_tracking: bool,
pub prefetch_distance: usize,
}
impl Default for LruMapConfig {
fn default() -> Self {
Self {
capacity: DEFAULT_LRU_CAPACITY,
initial_hash_capacity: 128,
enable_statistics: true,
use_secure_memory: true,
load_factor: 0.75,
enable_access_tracking: true,
prefetch_distance: 1,
}
}
}
impl LruMapConfig {
pub fn performance_optimized() -> Self {
Self {
capacity: 8192,
initial_hash_capacity: 1024,
enable_statistics: true,
use_secure_memory: false, load_factor: 0.75,
enable_access_tracking: false, prefetch_distance: 2,
}
}
pub fn memory_optimized() -> Self {
Self {
capacity: 512,
initial_hash_capacity: 64,
enable_statistics: false, use_secure_memory: true,
load_factor: 0.85, enable_access_tracking: false,
prefetch_distance: 0, }
}
pub fn security_optimized() -> Self {
Self {
capacity: 1024,
initial_hash_capacity: 128,
enable_statistics: true,
use_secure_memory: true, load_factor: 0.7,
enable_access_tracking: true,
prefetch_distance: 1,
}
}
pub fn validate(&self) -> Result<()> {
if self.capacity == 0 {
return Err(ZiporaError::invalid_parameter("Capacity must be > 0"));
}
if !self.initial_hash_capacity.is_power_of_two() {
return Err(ZiporaError::invalid_parameter("Initial hash capacity must be power of 2"));
}
if self.load_factor <= 0.0 || self.load_factor >= 1.0 {
return Err(ZiporaError::invalid_parameter("Load factor must be between 0.0 and 1.0"));
}
Ok(())
}
}
#[derive(Debug, Default)]
pub struct LruMapStatistics {
pub get_count: AtomicU64,
pub hit_count: AtomicU64,
pub miss_count: AtomicU64,
pub put_count: AtomicU64,
pub eviction_count: AtomicU64,
pub collision_count: AtomicU64,
pub max_probe_distance: AtomicU32,
pub entry_count: AtomicUsize,
pub memory_usage: AtomicUsize,
}
impl LruMapStatistics {
pub fn new() -> Self {
Default::default()
}
pub fn hit_ratio(&self) -> f64 {
let hits = self.hit_count.load(Ordering::Relaxed) as f64;
let total = self.get_count.load(Ordering::Relaxed) as f64;
if total > 0.0 { hits / total } else { 0.0 }
}
pub fn avg_probe_distance(&self) -> f64 {
let gets = self.get_count.load(Ordering::Relaxed) as f64;
let max_probe = self.max_probe_distance.load(Ordering::Relaxed) as f64;
if gets > 0.0 { max_probe / gets } else { 0.0 }
}
pub fn record_hit(&self) {
self.get_count.fetch_add(1, Ordering::Relaxed);
self.hit_count.fetch_add(1, Ordering::Relaxed);
}
pub fn record_miss(&self) {
self.get_count.fetch_add(1, Ordering::Relaxed);
self.miss_count.fetch_add(1, Ordering::Relaxed);
}
pub fn record_put(&self) {
self.put_count.fetch_add(1, Ordering::Relaxed);
}
pub fn record_eviction(&self) {
self.eviction_count.fetch_add(1, Ordering::Relaxed);
}
pub fn update_memory_usage(&self, delta: isize) {
if delta > 0 {
self.memory_usage.fetch_add(delta as usize, Ordering::Relaxed);
} else {
self.memory_usage.fetch_sub((-delta) as usize, Ordering::Relaxed);
}
}
}
pub trait EvictionCallback<K, V>: Send + Sync {
fn on_evict(&self, key: &K, value: &V);
}
#[derive(Clone)]
pub struct NoOpEvictionCallback;
impl<K, V> EvictionCallback<K, V> for NoOpEvictionCallback {
fn on_evict(&self, _key: &K, _value: &V) {}
}
#[repr(align(64))] struct LruNode<K, V> {
key: K,
value: V,
prev: AtomicU32,
next: AtomicU32,
hash: u64,
access_count: AtomicU32,
last_access: AtomicU64,
is_valid: bool,
}
impl<K, V> LruNode<K, V> {
fn new(key: K, value: V, hash: u64) -> Self {
Self {
key,
value,
prev: AtomicU32::new(INVALID_NODE),
next: AtomicU32::new(INVALID_NODE),
hash,
access_count: AtomicU32::new(1),
last_access: AtomicU64::new(Self::current_timestamp()),
is_valid: true,
}
}
fn current_timestamp() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64
}
fn update_access(&self) {
self.access_count.fetch_add(1, Ordering::Relaxed);
self.last_access.store(Self::current_timestamp(), Ordering::Relaxed);
}
fn reset(&mut self) {
self.prev.store(INVALID_NODE, Ordering::Relaxed);
self.next.store(INVALID_NODE, Ordering::Relaxed);
self.access_count.store(0, Ordering::Relaxed);
self.last_access.store(0, Ordering::Relaxed);
self.is_valid = false;
}
}
struct LruList {
head: AtomicU32,
tail: AtomicU32,
count: AtomicUsize,
}
impl LruList {
fn new() -> Self {
Self {
head: AtomicU32::new(INVALID_NODE),
tail: AtomicU32::new(INVALID_NODE),
count: AtomicUsize::new(0),
}
}
fn insert_head<K, V>(&self, nodes: &mut [LruNode<K, V>], node_idx: u32) {
let old_head = self.head.swap(node_idx, Ordering::Relaxed);
nodes[node_idx as usize].prev.store(INVALID_NODE, Ordering::Relaxed);
nodes[node_idx as usize].next.store(old_head, Ordering::Relaxed);
if old_head != INVALID_NODE {
nodes[old_head as usize].prev.store(node_idx, Ordering::Relaxed);
} else {
self.tail.store(node_idx, Ordering::Relaxed);
}
self.count.fetch_add(1, Ordering::Relaxed);
}
fn remove<K, V>(&self, nodes: &mut [LruNode<K, V>], node_idx: u32) {
let prev = nodes[node_idx as usize].prev.load(Ordering::Relaxed);
let next = nodes[node_idx as usize].next.load(Ordering::Relaxed);
if prev != INVALID_NODE {
nodes[prev as usize].next.store(next, Ordering::Relaxed);
} else {
self.head.store(next, Ordering::Relaxed);
}
if next != INVALID_NODE {
nodes[next as usize].prev.store(prev, Ordering::Relaxed);
} else {
self.tail.store(prev, Ordering::Relaxed);
}
nodes[node_idx as usize].prev.store(INVALID_NODE, Ordering::Relaxed);
nodes[node_idx as usize].next.store(INVALID_NODE, Ordering::Relaxed);
self.count.fetch_sub(1, Ordering::Relaxed);
}
fn move_to_head<K, V>(&self, nodes: &mut [LruNode<K, V>], node_idx: u32) {
if self.head.load(Ordering::Relaxed) == node_idx {
nodes[node_idx as usize].update_access();
return;
}
self.remove(nodes, node_idx);
self.insert_head(nodes, node_idx);
nodes[node_idx as usize].update_access();
}
fn get_lru_node(&self) -> u32 {
self.tail.load(Ordering::Relaxed)
}
fn len(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
}
pub struct LruMap<K, V, E = NoOpEvictionCallback>
where
K: Hash + Eq + Clone + Default,
V: Clone + Default,
E: EvictionCallback<K, V>,
{
config: LruMapConfig,
hash_map: RwLock<HashMap<K, u32>>,
nodes: RwLock<Vec<LruNode<K, V>>>,
lru_list: LruList,
free_nodes: Mutex<Vec<u32>>,
stats: Arc<LruMapStatistics>,
eviction_callback: E,
memory_pool: Option<Arc<SecureMemoryPool>>,
}
impl<K, V> LruMap<K, V, NoOpEvictionCallback>
where
K: Hash + Eq + Clone + Default,
V: Clone + Default,
{
pub fn new(capacity: usize) -> Result<Self> {
let config = LruMapConfig {
capacity,
..Default::default()
};
Self::with_config(config)
}
pub fn with_config(config: LruMapConfig) -> Result<Self> {
config.validate()?;
let memory_pool = if config.use_secure_memory {
Some(get_global_pool_for_size(4096).clone())
} else {
None
};
let mut nodes = Vec::with_capacity(config.capacity);
let mut free_nodes = Vec::with_capacity(config.capacity);
for i in 0..config.capacity {
nodes.push(LruNode {
key: K::default(),
value: V::default(),
prev: AtomicU32::new(INVALID_NODE),
next: AtomicU32::new(INVALID_NODE),
hash: 0,
access_count: AtomicU32::new(0),
last_access: AtomicU64::new(0),
is_valid: false,
});
free_nodes.push(i as u32);
}
let initial_capacity = config.initial_hash_capacity;
Ok(Self {
config,
hash_map: RwLock::new(HashMap::with_capacity(initial_capacity)),
nodes: RwLock::new(nodes),
lru_list: LruList::new(),
free_nodes: Mutex::new(free_nodes),
stats: Arc::new(LruMapStatistics::new()),
eviction_callback: NoOpEvictionCallback,
memory_pool,
})
}
}
impl<K, V, E> LruMap<K, V, E>
where
K: Hash + Eq + Clone + Default,
V: Clone + Default,
E: EvictionCallback<K, V>,
{
pub fn with_eviction_callback(capacity: usize, callback: E) -> Result<Self> {
let config = LruMapConfig {
capacity,
..Default::default()
};
Self::with_config_and_callback(config, callback)
}
pub fn with_config_and_callback(config: LruMapConfig, callback: E) -> Result<Self> {
config.validate()?;
let memory_pool = if config.use_secure_memory {
Some(get_global_pool_for_size(4096).clone())
} else {
None
};
let mut nodes = Vec::with_capacity(config.capacity);
let mut free_nodes = Vec::with_capacity(config.capacity);
for i in 0..config.capacity {
nodes.push(LruNode {
key: K::default(),
value: V::default(),
prev: AtomicU32::new(INVALID_NODE),
next: AtomicU32::new(INVALID_NODE),
hash: 0,
access_count: AtomicU32::new(0),
last_access: AtomicU64::new(0),
is_valid: false,
});
free_nodes.push(i as u32);
}
let initial_capacity = config.initial_hash_capacity;
Ok(Self {
config,
hash_map: RwLock::new(HashMap::with_capacity(initial_capacity)),
nodes: RwLock::new(nodes),
lru_list: LruList::new(),
free_nodes: Mutex::new(free_nodes),
stats: Arc::new(LruMapStatistics::new()),
eviction_callback: callback,
memory_pool,
})
}
pub fn get(&self, key: &K) -> Option<V> {
let hash_map = self.hash_map.read().ok()?;
let node_idx = match hash_map.get(key) {
Some(&idx) => idx,
None => {
if self.config.enable_statistics {
self.stats.record_miss();
}
return None;
}
};
drop(hash_map);
let mut nodes = self.nodes.write().ok()?;
if (node_idx as usize) >= nodes.len() || !nodes[node_idx as usize].is_valid {
return None;
}
self.lru_list.move_to_head(&mut nodes, node_idx);
let value = nodes[node_idx as usize].value.clone();
if self.config.enable_statistics {
self.stats.record_hit();
}
Some(value)
}
pub fn put(&self, key: K, value: V) -> Result<Option<V>> {
let hash = self.hash_key(&key);
{
let hash_map = self.hash_map.read().map_err(|_| ZiporaError::out_of_memory(0))?;
if let Some(&node_idx) = hash_map.get(&key) {
let mut nodes = self.nodes.write().map_err(|_| ZiporaError::out_of_memory(0))?;
if (node_idx as usize) < nodes.len() && nodes[node_idx as usize].is_valid {
let old_value = std::mem::replace(&mut nodes[node_idx as usize].value, value);
self.lru_list.move_to_head(&mut nodes, node_idx);
if self.config.enable_statistics {
self.stats.record_put();
}
return Ok(Some(old_value));
}
}
}
if self.lru_list.len() >= self.config.capacity {
self.evict_lru()?;
}
let node_idx = self.allocate_node()?;
{
let mut nodes = self.nodes.write().map_err(|_| ZiporaError::out_of_memory(0))?;
nodes[node_idx as usize] = LruNode::new(key.clone(), value, hash);
self.lru_list.insert_head(&mut nodes, node_idx);
}
{
let mut hash_map = self.hash_map.write().map_err(|_| ZiporaError::out_of_memory(0))?;
hash_map.insert(key, node_idx);
}
if self.config.enable_statistics {
self.stats.record_put();
self.stats.entry_count.store(self.lru_list.len(), Ordering::Relaxed);
}
Ok(None)
}
pub fn remove(&self, key: &K) -> Option<V> {
let mut hash_map = self.hash_map.write().ok()?;
let node_idx = hash_map.remove(key)?;
drop(hash_map);
let mut nodes = self.nodes.write().ok()?;
if (node_idx as usize) >= nodes.len() || !nodes[node_idx as usize].is_valid {
return None;
}
self.lru_list.remove(&mut nodes, node_idx);
let value = nodes[node_idx as usize].value.clone();
nodes[node_idx as usize].reset();
if let Ok(mut free_nodes) = self.free_nodes.lock() {
free_nodes.push(node_idx);
}
if self.config.enable_statistics {
self.stats.entry_count.store(self.lru_list.len(), Ordering::Relaxed);
}
Some(value)
}
pub fn contains_key(&self, key: &K) -> bool {
if let Ok(hash_map) = self.hash_map.read() {
hash_map.contains_key(key)
} else {
false
}
}
#[inline]
pub fn len(&self) -> usize {
self.lru_list.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn capacity(&self) -> usize {
self.config.capacity
}
pub fn clear(&self) -> Result<()> {
let mut hash_map = self.hash_map.write().map_err(|_| ZiporaError::out_of_memory(0))?;
let mut nodes = self.nodes.write().map_err(|_| ZiporaError::out_of_memory(0))?;
let mut free_nodes = self.free_nodes.lock().map_err(|_| ZiporaError::out_of_memory(0))?;
hash_map.clear();
free_nodes.clear();
for (i, node) in nodes.iter_mut().enumerate() {
if node.is_valid {
node.reset();
free_nodes.push(i as u32);
}
}
self.lru_list.head.store(INVALID_NODE, Ordering::Relaxed);
self.lru_list.tail.store(INVALID_NODE, Ordering::Relaxed);
self.lru_list.count.store(0, Ordering::Relaxed);
if self.config.enable_statistics {
self.stats.entry_count.store(0, Ordering::Relaxed);
}
Ok(())
}
pub fn stats(&self) -> &LruMapStatistics {
&self.stats
}
pub fn stats_arc(&self) -> Arc<LruMapStatistics> {
self.stats.clone()
}
pub fn config(&self) -> &LruMapConfig {
&self.config
}
fn hash_key(&self, key: &K) -> u64 {
use std::collections::hash_map::DefaultHasher;
let mut hasher = DefaultHasher::new();
key.hash(&mut hasher);
hasher.finish()
}
fn allocate_node(&self) -> Result<u32> {
let mut free_nodes = self.free_nodes.lock().map_err(|_| ZiporaError::out_of_memory(0))?;
free_nodes.pop().ok_or_else(|| ZiporaError::out_of_memory(0).into())
}
fn evict_lru(&self) -> Result<()> {
let lru_node_idx = self.lru_list.get_lru_node();
if lru_node_idx == INVALID_NODE {
return Err(ZiporaError::out_of_memory(0).into());
}
let mut nodes = self.nodes.write().map_err(|_| ZiporaError::out_of_memory(0))?;
if !(lru_node_idx as usize) < nodes.len() || !nodes[lru_node_idx as usize].is_valid {
return Err(ZiporaError::out_of_memory(0).into());
}
let key = nodes[lru_node_idx as usize].key.clone();
let value = nodes[lru_node_idx as usize].value.clone();
self.eviction_callback.on_evict(&key, &value);
{
let mut hash_map = self.hash_map.write().map_err(|_| ZiporaError::out_of_memory(0))?;
hash_map.remove(&key);
}
self.lru_list.remove(&mut nodes, lru_node_idx);
nodes[lru_node_idx as usize].reset();
{
let mut free_nodes = self.free_nodes.lock().map_err(|_| ZiporaError::out_of_memory(0))?;
free_nodes.push(lru_node_idx);
}
if self.config.enable_statistics {
self.stats.record_eviction();
self.stats.entry_count.store(self.lru_list.len(), Ordering::Relaxed);
}
Ok(())
}
}
impl<K, V, E> Drop for LruMap<K, V, E>
where
K: Hash + Eq + Clone + Default,
V: Clone + Default,
E: EvictionCallback<K, V>,
{
fn drop(&mut self) {
let _ = self.clear();
}
}
unsafe impl<K, V, E> Send for LruMap<K, V, E>
where
K: Hash + Eq + Clone + Send + Default,
V: Clone + Send + Default,
E: EvictionCallback<K, V> + Send,
{}
unsafe impl<K, V, E> Sync for LruMap<K, V, E>
where
K: Hash + Eq + Clone + Send + Sync + Default,
V: Clone + Send + Sync + Default,
E: EvictionCallback<K, V> + Send + Sync,
{}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
struct TestEvictionCallback {
eviction_count: Arc<AtomicUsize>,
}
impl EvictionCallback<i32, String> for TestEvictionCallback {
fn on_evict(&self, _key: &i32, _value: &String) {
self.eviction_count.fetch_add(1, Ordering::Relaxed);
}
}
#[test]
fn test_basic_operations() {
let cache = LruMap::new(3).unwrap();
assert_eq!(cache.put(1, "one".to_string()).unwrap(), None);
assert_eq!(cache.put(2, "two".to_string()).unwrap(), None);
assert_eq!(cache.get(&1), Some("one".to_string()));
assert_eq!(cache.get(&2), Some("two".to_string()));
assert_eq!(cache.len(), 2);
assert_eq!(cache.put(1, "ONE".to_string()).unwrap(), Some("one".to_string()));
assert_eq!(cache.get(&1), Some("ONE".to_string()));
assert_eq!(cache.len(), 2);
}
#[test]
fn test_lru_eviction() {
let cache = LruMap::new(2).unwrap();
cache.put(1, "one".to_string()).unwrap();
cache.put(2, "two".to_string()).unwrap();
assert_eq!(cache.get(&1), Some("one".to_string()));
cache.put(3, "three".to_string()).unwrap();
assert_eq!(cache.get(&1), Some("one".to_string()));
assert_eq!(cache.get(&2), None);
assert_eq!(cache.get(&3), Some("three".to_string()));
assert_eq!(cache.len(), 2);
}
#[test]
fn test_remove() {
let cache = LruMap::new(3).unwrap();
cache.put(1, "one".to_string()).unwrap();
cache.put(2, "two".to_string()).unwrap();
assert_eq!(cache.remove(&1), Some("one".to_string()));
assert_eq!(cache.remove(&1), None);
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&2), Some("two".to_string()));
assert_eq!(cache.len(), 1);
}
#[test]
fn test_clear() {
let cache = LruMap::new(3).unwrap();
cache.put(1, "one".to_string()).unwrap();
cache.put(2, "two".to_string()).unwrap();
assert_eq!(cache.len(), 2);
cache.clear().unwrap();
assert_eq!(cache.len(), 0);
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&2), None);
}
#[test]
fn test_eviction_callback() {
let eviction_count = Arc::new(AtomicUsize::new(0));
let callback = TestEvictionCallback {
eviction_count: eviction_count.clone(),
};
let cache = LruMap::with_eviction_callback(2, callback).unwrap();
cache.put(1, "one".to_string()).unwrap();
cache.put(2, "two".to_string()).unwrap();
cache.put(3, "three".to_string()).unwrap();
assert_eq!(eviction_count.load(Ordering::Relaxed), 1);
}
#[test]
fn test_statistics() {
let cache = LruMap::new(3).unwrap();
cache.put(1, "one".to_string()).unwrap();
cache.put(2, "two".to_string()).unwrap();
cache.get(&1);
cache.get(&3);
let stats = cache.stats();
assert_eq!(stats.hit_count.load(Ordering::Relaxed), 1);
assert_eq!(stats.miss_count.load(Ordering::Relaxed), 1);
assert_eq!(stats.put_count.load(Ordering::Relaxed), 2);
assert!((stats.hit_ratio() - 0.5).abs() < f64::EPSILON);
}
#[test]
fn test_capacity_management() {
let cache = LruMap::new(100).unwrap();
for i in 0..150 {
cache.put(i, format!("value_{}", i)).unwrap();
}
assert!(cache.len() <= cache.capacity());
assert!(cache.get(&149).is_some());
assert!(cache.get(&148).is_some());
assert!(cache.get(&0).is_none());
assert!(cache.get(&1).is_none());
}
}