use async_trait::async_trait;
use indexmap::IndexMap;
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
#[derive(Debug, Clone)]
pub struct CacheConfig {
pub max_capacity: usize,
pub default_ttl: u64,
pub cleanup_interval: u64,
pub enable_stats: bool,
}
impl Default for CacheConfig {
fn default() -> Self {
Self {
max_capacity: 10000,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
}
}
}
#[derive(Debug, Clone)]
struct CacheEntry<T> {
value: T,
created_at: Instant,
expires_at: Instant,
access_count: usize,
last_accessed: Instant,
}
impl<T> CacheEntry<T> {
fn new(value: T, ttl: Duration) -> Self {
let now = Instant::now();
Self {
value,
created_at: now,
expires_at: now + ttl,
access_count: 0,
last_accessed: now,
}
}
fn is_expired(&self) -> bool {
Instant::now() >= self.expires_at
}
fn access(&mut self) {
self.access_count += 1;
self.last_accessed = Instant::now();
}
fn remaining_ttl(&self) -> Duration {
self.expires_at.saturating_duration_since(Instant::now())
}
}
#[derive(Debug, Clone)]
pub struct CacheKey {
key: String,
}
impl CacheKey {
pub fn new(table: &str, id: &str) -> Self {
Self {
key: format!("{}:{}", table, id),
}
}
pub fn from_value(table: &str, value: &(impl Hash + ?Sized)) -> Self
where
String: std::hash::Hash + std::cmp::Eq,
{
use ahash::RandomState;
static STATE: std::sync::LazyLock<RandomState> =
std::sync::LazyLock::new(|| RandomState::with_seeds(0xa1b2c3d4, 0xe5f60718, 0xf6e7d8c9, 0xb0a1b2c3));
let hash = STATE.hash_one(value);
Self {
key: format!("{}:{:016x}", table, hash),
}
}
pub fn matches_pattern(&self, pattern: &str) -> bool {
let key = &self.key;
if pattern.contains('*') {
let regex_pattern = pattern.replace('.', r"\.").replace('*', ".*");
if let Ok(re) = regex::Regex::new(&format!("^{}$", regex_pattern)) {
return re.is_match(key);
}
}
key == pattern || key.starts_with(&format!("{}:", pattern))
}
}
impl Hash for CacheKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.key.hash(state);
}
}
impl PartialEq for CacheKey {
fn eq(&self, other: &Self) -> bool {
self.key == other.key
}
}
impl Eq for CacheKey {}
#[async_trait]
pub trait CacheStrategy: Send + Sync {
fn name(&self) -> &'static str;
fn ttl(&self) -> Duration;
fn on_hit(&self, key: &CacheKey);
fn on_miss(&self, key: &CacheKey);
fn on_update(&self, key: &CacheKey);
}
#[derive(Debug, Default)]
pub(crate) struct LruStrategy {
ttl: Duration,
}
impl LruStrategy {
pub(crate) fn new(ttl_seconds: u64) -> Self {
Self {
ttl: Duration::from_secs(ttl_seconds),
}
}
}
#[async_trait]
impl CacheStrategy for LruStrategy {
fn name(&self) -> &'static str {
"lru"
}
fn ttl(&self) -> Duration {
self.ttl
}
fn on_hit(&self, _key: &CacheKey) {
}
fn on_miss(&self, _key: &CacheKey) {
}
fn on_update(&self, _key: &CacheKey) {
}
}
#[derive(Debug)]
pub(crate) struct TtlAwareStrategy<S: CacheStrategy> {
inner: S,
default_ttl: Duration,
}
impl<S: CacheStrategy> TtlAwareStrategy<S> {
pub(crate) fn new(inner: S, ttl_seconds: u64) -> Self {
Self {
inner,
default_ttl: Duration::from_secs(ttl_seconds),
}
}
}
#[async_trait]
impl<S: CacheStrategy> CacheStrategy for TtlAwareStrategy<S> {
fn name(&self) -> &'static str {
self.inner.name()
}
fn ttl(&self) -> Duration {
self.default_ttl
}
#[inline(never)]
fn on_hit(&self, key: &CacheKey) {
let inner = &self.inner;
inner.on_hit(key);
}
#[inline(never)]
fn on_miss(&self, key: &CacheKey) {
let inner = &self.inner;
inner.on_miss(key);
}
#[inline(never)]
fn on_update(&self, key: &CacheKey) {
let inner = &self.inner;
inner.on_update(key);
}
}
#[derive(Debug, Default)]
pub struct CacheStats {
hits: Arc<std::sync::atomic::AtomicU64>,
misses: Arc<std::sync::atomic::AtomicU64>,
sets: Arc<std::sync::atomic::AtomicU64>,
deletes: Arc<std::sync::atomic::AtomicU64>,
expirations: Arc<std::sync::atomic::AtomicU64>,
}
impl CacheStats {
pub fn new() -> Self {
Self {
hits: Arc::new(std::sync::atomic::AtomicU64::new(0)),
misses: Arc::new(std::sync::atomic::AtomicU64::new(0)),
sets: Arc::new(std::sync::atomic::AtomicU64::new(0)),
deletes: Arc::new(std::sync::atomic::AtomicU64::new(0)),
expirations: Arc::new(std::sync::atomic::AtomicU64::new(0)),
}
}
pub fn hit_rate(&self) -> f64 {
let hits = self.hits.load(std::sync::atomic::Ordering::Relaxed);
let misses = self.misses.load(std::sync::atomic::Ordering::Relaxed);
let total = hits + misses;
if total == 0 { 0.0 } else { hits as f64 / total as f64 }
}
pub fn record_hit(&self) {
self.hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn record_miss(&self) {
self.misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn record_set(&self) {
self.sets.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn record_delete(&self) {
self.deletes.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn record_expiration(&self) {
self.expirations.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
}
pub struct CacheManager<T>
where
T: Clone + Send + Sync + 'static,
{
cache: RwLock<IndexMap<CacheKey, CacheEntry<T>>>,
config: CacheConfig,
strategy: Box<dyn CacheStrategy>,
stats: CacheStats,
max_capacity: usize,
}
impl<T> CacheManager<T>
where
T: Clone + Send + Sync + 'static,
{
pub fn new(config: CacheConfig) -> Self {
Self::with_strategy(config.clone(), Box::new(LruStrategy::new(config.default_ttl)))
}
pub fn with_strategy(config: CacheConfig, strategy: Box<dyn CacheStrategy>) -> Self {
Self {
cache: RwLock::new(IndexMap::new()),
config: config.clone(),
strategy,
stats: CacheStats::new(),
max_capacity: config.max_capacity,
}
}
pub async fn get(&self, key: &CacheKey) -> Option<T> {
let mut cache = self.cache.write().await;
if let Some(entry) = cache.get(key) {
if entry.is_expired() {
cache.shift_remove(key);
self.stats.record_miss();
self.strategy.on_miss(key);
return None;
}
if let Some(mut v) = cache.shift_remove(key) {
v.access();
cache.insert(key.clone(), v);
}
let value = cache.get(key)?.value.clone();
self.stats.record_hit();
self.strategy.on_hit(key);
Some(value)
} else {
self.stats.record_miss();
self.strategy.on_miss(key);
None
}
}
pub async fn set(&self, key: CacheKey, value: T) {
self.set_with_ttl(key, value, self.strategy.ttl()).await;
}
pub async fn set_with_ttl(&self, key: CacheKey, value: T, ttl: Duration) {
let mut cache = self.cache.write().await;
if cache.len() >= self.max_capacity && !cache.contains_key(&key) {
cache.shift_remove_index(0);
}
let entry = CacheEntry::new(value, ttl);
cache.insert(key.clone(), entry);
self.stats.record_set();
self.strategy.on_update(&key);
}
pub async fn delete(&self, key: &CacheKey) {
let mut cache = self.cache.write().await;
if cache.shift_remove(key).is_some() {
self.stats.record_delete();
}
}
pub async fn invalidate(&self, key: &CacheKey) {
self.delete(key).await;
}
pub async fn invalidate_pattern(&self, pattern: &str) -> usize {
let mut cache = self.cache.write().await;
let mut removed_count = 0;
let keys_to_remove: Vec<CacheKey> = cache.keys().filter(|k| k.matches_pattern(pattern)).cloned().collect();
for key in &keys_to_remove {
if cache.shift_remove(key).is_some() {
removed_count += 1;
self.stats.record_delete();
}
}
removed_count
}
pub async fn clear(&mut self) {
let mut cache = self.cache.write().await;
cache.clear();
self.stats = CacheStats::new();
}
pub async fn len(&self) -> usize {
self.cache.read().await.len()
}
pub async fn is_empty(&self) -> bool {
self.cache.read().await.is_empty()
}
pub fn stats(&self) -> &CacheStats {
&self.stats
}
pub async fn cleanup(&self) -> usize {
const BATCH_SIZE: usize = 100;
const MAX_BATCHES: usize = 100; let mut total_removed = 0;
let mut batches = 0;
loop {
if batches >= MAX_BATCHES {
tracing::warn!(
"Cache cleanup stopped after {} batches ({} items removed)",
batches,
total_removed
);
break;
}
let mut cache = self.cache.write().await;
if cache.len() <= BATCH_SIZE {
let before = cache.len();
cache.retain(|_key, entry| {
if entry.is_expired() {
self.stats.record_expiration();
false
} else {
true
}
});
total_removed += before - cache.len();
return total_removed;
}
let keys_to_remove: Vec<_> = cache
.iter()
.filter(|(_, entry)| entry.is_expired())
.take(BATCH_SIZE)
.map(|(k, _)| k.clone())
.collect();
if keys_to_remove.is_empty() {
return total_removed;
}
for key in &keys_to_remove {
cache.shift_remove(key);
self.stats.record_expiration();
}
total_removed += keys_to_remove.len();
batches += 1;
}
total_removed
}
pub async fn warmup(&self, data: Vec<(CacheKey, T, Duration)>) -> usize {
let mut cache = self.cache.write().await;
let mut loaded = 0;
for (key, value, ttl) in data {
if cache.len() < self.max_capacity {
let entry = CacheEntry::new(value, ttl);
cache.insert(key, entry);
loaded += 1;
self.stats.record_set();
} else {
return loaded;
}
}
loaded
}
pub async fn warmup_with_default_ttl(&self, data: Vec<(CacheKey, T)>) -> usize {
let default_ttl = self.strategy.ttl();
let mut data_with_ttl = Vec::with_capacity(data.len());
for (key, value) in data {
data_with_ttl.push((key, value, default_ttl));
}
self.warmup(data_with_ttl).await
}
pub async fn batch_get(&self, keys: &[CacheKey]) -> Vec<Option<T>> {
let mut cache = self.cache.write().await;
let mut results = Vec::with_capacity(keys.len());
for key in keys {
if let Some(entry) = cache.get_mut(key) {
if !entry.is_expired() {
entry.access();
let value = entry.value.clone();
let ttl = entry.expires_at.saturating_duration_since(Instant::now());
cache.shift_remove(key);
cache.insert(key.clone(), CacheEntry::new(value, ttl));
self.stats.record_hit();
self.strategy.on_hit(key);
results.push(Some(cache.get(key).map(|e| e.value.clone()).unwrap()));
} else {
cache.shift_remove(key);
self.stats.record_miss();
self.strategy.on_miss(key);
results.push(None);
}
} else {
self.stats.record_miss();
self.strategy.on_miss(key);
results.push(None);
}
}
results
}
pub async fn batch_set(&self, items: Vec<(CacheKey, T)>) {
let mut cache = self.cache.write().await;
for (key, value) in items {
if cache.len() >= self.max_capacity && !cache.contains_key(&key) {
cache.shift_remove_index(0);
}
let entry = CacheEntry::new(value, self.strategy.ttl());
cache.insert(key.clone(), entry);
self.stats.record_set();
self.strategy.on_update(&key);
}
}
pub async fn batch_delete(&self, keys: &[CacheKey]) -> usize {
let mut cache = self.cache.write().await;
let mut removed = 0;
for key in keys {
if cache.shift_remove(key).is_some() {
removed += 1;
self.stats.record_delete();
}
}
removed
}
}
pub fn make_cache_key(table_name: &str, id: &str) -> CacheKey {
CacheKey::new(table_name, id)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Mutex;
#[tokio::test]
async fn test_cache_basic_operations() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 60,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
let key = CacheKey::new("users", "1");
assert!(cache.get(&key).await.is_none());
cache.set(key.clone(), "test_value".to_string()).await;
let value = cache.get(&key).await;
assert_eq!(value, Some("test_value".to_string()));
assert_eq!(cache.stats().hits.load(std::sync::atomic::Ordering::Relaxed), 1);
assert_eq!(cache.stats().misses.load(std::sync::atomic::Ordering::Relaxed), 1);
}
#[tokio::test]
async fn test_cache_ttl() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 1,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
let key = CacheKey::new("users", "1");
cache.set(key.clone(), "test_value".to_string()).await;
assert!(cache.get(&key).await.is_some());
tokio::time::sleep(Duration::from_secs(2)).await;
assert!(cache.get(&key).await.is_none());
}
#[tokio::test]
async fn test_cache_eviction() {
let config = CacheConfig {
max_capacity: 3,
default_ttl: 60,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
for i in 0..3 {
let key = CacheKey::new("users", &i.to_string());
cache.set(key, format!("value_{}", i)).await;
}
assert_eq!(cache.len().await, 3);
let key = CacheKey::new("users", "3");
cache.set(key.clone(), "value_3".to_string()).await;
assert_eq!(cache.len().await, 3);
}
#[tokio::test]
async fn test_cache_clear() {
let config = CacheConfig::default();
let mut cache = CacheManager::<String>::new(config);
let key = CacheKey::new("users", "1");
cache.set(key.clone(), "test".to_string()).await;
assert!(!cache.is_empty().await);
cache.clear().await;
assert!(cache.is_empty().await);
}
#[tokio::test]
async fn test_cache_stats() {
let config = CacheConfig::default();
let cache = CacheManager::<String>::new(config);
let key = CacheKey::new("users", "1");
cache.get(&key).await;
assert_eq!(cache.stats().misses.load(std::sync::atomic::Ordering::Relaxed), 1);
cache.set(key.clone(), "value".to_string()).await;
assert_eq!(cache.stats().sets.load(std::sync::atomic::Ordering::Relaxed), 1);
cache.get(&key).await;
assert_eq!(cache.stats().hits.load(std::sync::atomic::Ordering::Relaxed), 1);
cache.delete(&key).await;
assert_eq!(cache.stats().deletes.load(std::sync::atomic::Ordering::Relaxed), 1);
assert!((cache.stats().hit_rate() - 0.5).abs() < 0.01);
}
#[test]
fn test_cache_key_from_value_and_hash() {
let k1 = CacheKey::from_value("users", &"abc");
let k2 = CacheKey::from_value("users", &"abc");
let k3 = CacheKey::from_value("users", &"def");
assert_eq!(k1, k2);
assert_ne!(k1, k3);
let mut h1 = std::collections::hash_map::DefaultHasher::new();
k1.hash(&mut h1);
let mut h2 = std::collections::hash_map::DefaultHasher::new();
k2.hash(&mut h2);
assert_eq!(h1.finish(), h2.finish());
let k4 = make_cache_key("t", "1");
assert_eq!(k4, CacheKey::new("t", "1"));
}
#[test]
fn test_cache_entry_remaining_ttl() {
let entry = CacheEntry::new("v".to_string(), Duration::from_secs(1));
let remaining = entry.remaining_ttl();
assert!(remaining <= Duration::from_secs(1));
}
#[tokio::test]
async fn test_cache_cleanup_small_and_large_batches() {
let config = CacheConfig {
max_capacity: 1000,
default_ttl: 1,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
cache
.set_with_ttl(CacheKey::new("t", "1"), "v1".to_string(), Duration::from_millis(0))
.await;
cache
.set_with_ttl(CacheKey::new("t", "2"), "v2".to_string(), Duration::from_secs(60))
.await;
let removed_small = cache.cleanup().await;
assert_eq!(removed_small, 1);
for i in 0..150 {
cache
.set_with_ttl(
CacheKey::new("batch", &i.to_string()),
i.to_string(),
Duration::from_millis(0),
)
.await;
}
let removed_large = cache.cleanup().await;
assert!(removed_large > 0);
assert_eq!(cache.len().await, 1);
}
#[tokio::test]
async fn test_cache_warmup_and_batch_ops() {
let config = CacheConfig {
max_capacity: 2,
default_ttl: 60,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
let loaded = cache
.warmup(vec![
(CacheKey::new("w", "1"), "a".to_string(), Duration::from_secs(60)),
(CacheKey::new("w", "2"), "b".to_string(), Duration::from_secs(60)),
(CacheKey::new("w", "3"), "c".to_string(), Duration::from_secs(60)),
])
.await;
assert_eq!(loaded, 2);
let loaded2 = cache
.warmup_with_default_ttl(vec![(CacheKey::new("w", "4"), "d".to_string())])
.await;
assert_eq!(loaded2, 0);
let k_hit = CacheKey::new("b", "hit");
let k_expired = CacheKey::new("b", "expired");
let k_miss = CacheKey::new("b", "miss");
cache
.set_with_ttl(k_hit.clone(), "vh".to_string(), Duration::from_secs(60))
.await;
cache
.set_with_ttl(k_expired.clone(), "ve".to_string(), Duration::from_millis(0))
.await;
let got = cache
.batch_get(&[k_hit.clone(), k_expired.clone(), k_miss.clone()])
.await;
assert_eq!(got.len(), 3);
assert_eq!(got[0], Some("vh".to_string()));
assert_eq!(got[1], None);
assert_eq!(got[2], None);
cache
.batch_set(vec![
(CacheKey::new("s", "1"), "v1".to_string()),
(CacheKey::new("s", "2"), "v2".to_string()),
])
.await;
let removed = cache
.batch_delete(&[CacheKey::new("s", "1"), CacheKey::new("s", "nope")])
.await;
assert_eq!(removed, 1);
}
#[tokio::test]
async fn test_cache_warmup_full_load_returns_loaded() {
let config = CacheConfig {
max_capacity: 10,
default_ttl: 60,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
let loaded = cache
.warmup(vec![
(CacheKey::new("wf", "1"), "a".to_string(), Duration::from_secs(60)),
(CacheKey::new("wf", "2"), "b".to_string(), Duration::from_secs(60)),
])
.await;
assert_eq!(loaded, 2);
assert_eq!(cache.len().await, 2);
}
#[test]
fn test_lru_strategy_name_and_ttl() {
let strategy = LruStrategy::new(7);
assert_eq!(strategy.name(), "lru");
assert_eq!(strategy.ttl(), Duration::from_secs(7));
}
#[tokio::test]
async fn test_ttl_aware_strategy_delegation() {
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug, Clone)]
struct CountingStrategy {
hits: Arc<AtomicUsize>,
misses: Arc<AtomicUsize>,
updates: Arc<AtomicUsize>,
}
#[async_trait]
impl CacheStrategy for CountingStrategy {
fn name(&self) -> &'static str {
"counting"
}
fn ttl(&self) -> Duration {
Duration::from_secs(1)
}
fn on_hit(&self, _key: &CacheKey) {
self.hits.fetch_add(1, Ordering::Relaxed);
}
fn on_miss(&self, _key: &CacheKey) {
self.misses.fetch_add(1, Ordering::Relaxed);
}
fn on_update(&self, _key: &CacheKey) {
self.updates.fetch_add(1, Ordering::Relaxed);
}
}
let hits = Arc::new(AtomicUsize::new(0));
let misses = Arc::new(AtomicUsize::new(0));
let updates = Arc::new(AtomicUsize::new(0));
let inner = CountingStrategy {
hits: hits.clone(),
misses: misses.clone(),
updates: updates.clone(),
};
let wrapped = TtlAwareStrategy::new(inner, 123);
assert_eq!(wrapped.name(), "counting");
assert_eq!(wrapped.ttl(), Duration::from_secs(123));
assert_eq!(wrapped.default_ttl, Duration::from_secs(123));
let key = CacheKey::new("t", "1");
wrapped.on_hit(&key);
wrapped.on_miss(&key);
wrapped.on_update(&key);
assert_eq!(hits.load(Ordering::Relaxed), 1);
assert_eq!(misses.load(Ordering::Relaxed), 1);
assert_eq!(updates.load(Ordering::Relaxed), 1);
}
#[tokio::test]
async fn test_cache_cleanup_large_batch_no_expired_breaks() {
let config = CacheConfig {
max_capacity: 1000,
default_ttl: 3600,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
for i in 0..101 {
cache
.set_with_ttl(
CacheKey::new("keep", &i.to_string()),
format!("v{}", i),
Duration::from_secs(3600),
)
.await;
}
assert!(cache.len().await > 100);
let removed = cache.cleanup().await;
assert_eq!(removed, 0);
assert!(cache.len().await > 100);
}
#[tokio::test]
async fn test_cache_cleanup_large_batch_multiple_iterations() {
let config = CacheConfig {
max_capacity: 1000,
default_ttl: 3600,
cleanup_interval: 10,
enable_stats: true,
};
let cache = CacheManager::<String>::new(config);
for i in 0..210 {
cache
.set_with_ttl(
CacheKey::new("exp", &i.to_string()),
format!("v{}", i),
Duration::from_secs(3600),
)
.await;
}
for i in 0..10 {
cache
.set_with_ttl(
CacheKey::new("keep", &i.to_string()),
format!("k{}", i),
Duration::from_secs(3600),
)
.await;
}
assert_eq!(cache.len().await, 220);
let (exp_key_count, keep_key_count) = {
let cache_guard = cache.cache.read().await;
let exp_count = cache_guard.keys().filter(|k| k.key.starts_with("exp:")).count();
let keep_count = cache_guard.keys().filter(|k| k.key.starts_with("keep:")).count();
(exp_count, keep_count)
};
assert_eq!(exp_key_count, 210);
assert_eq!(keep_key_count, 10);
{
let now = Instant::now();
let mut cache_guard = cache.cache.write().await;
for (key, entry) in cache_guard.iter_mut() {
if key.key.starts_with("exp:") {
entry.expires_at = now - Duration::from_secs(1);
}
}
}
let expired_after = {
let cache_guard = cache.cache.read().await;
cache_guard.iter().filter(|(_, entry)| entry.is_expired()).count()
};
assert_eq!(expired_after, 210);
let removed = cache.cleanup().await;
assert_eq!(removed, 210);
assert_eq!(cache.len().await, 10);
}
#[tokio::test]
async fn test_cache_zero_capacity() {
let config = CacheConfig {
max_capacity: 0,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::with_strategy(config, Box::new(LruStrategy::new(300)));
let key = CacheKey::new("users", "1");
cache.set(key.clone(), "value".to_string()).await;
let retrieved = cache.get(&key).await;
assert!(retrieved.is_some(), "Cache with 0 capacity - value stored successfully");
}
#[tokio::test]
async fn test_cache_zero_ttl() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 0,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::with_strategy(config, Box::new(LruStrategy::new(0)));
let key = CacheKey::new("users", "1");
let test_value = "value_with_zero_ttl".to_string();
cache.set(key.clone(), test_value.clone()).await;
let retrieved = cache.get(&key).await;
assert!(retrieved.is_none(), "Cache with 0 TTL should expire immediately");
}
#[tokio::test]
async fn test_cache_zero_cleanup_interval() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 1,
cleanup_interval: 0,
enable_stats: true,
};
let cache = CacheManager::with_strategy(config, Box::new(LruStrategy::new(1)));
let key = CacheKey::new("users", "1");
cache.set(key.clone(), "value".to_string()).await;
let cleanup_count = cache.cleanup().await;
assert_eq!(cleanup_count, 0, "Cleanup should handle zero interval gracefully");
}
#[tokio::test]
async fn test_cache_strategy_combo_operations() {
let lru = LruStrategy::new(300);
let config = CacheConfig {
max_capacity: 50,
default_ttl: 120,
cleanup_interval: 30,
enable_stats: true,
};
let cache = CacheManager::with_strategy(config, Box::new(lru));
for i in 0..30 {
let key = CacheKey::new("products", &i.to_string());
cache.set(key.clone(), format!("product_{}", i)).await;
}
for i in 0..30 {
let key = CacheKey::new("products", &i.to_string());
let retrieved: Option<String> = cache.get(&key).await;
assert!(retrieved.is_some(), "Should retrieve product_{}", i);
}
for i in 0..30 {
let key = CacheKey::new("products", &i.to_string());
let _ = cache.get(&key).await;
}
let _ = cache.cleanup().await;
}
#[tokio::test]
async fn test_cache_concurrent_reads() {
let config = CacheConfig {
max_capacity: 1000,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
let cache = Arc::new(cache);
for i in 0..100 {
let key = CacheKey::new("users", &i.to_string());
cache.set(key.clone(), format!("user_data_{}", i)).await;
}
let mut handles = Vec::new();
for _ in 0..10 {
let cache = cache.clone();
let handle = tokio::spawn(async move {
for i in 0..100 {
let key = CacheKey::new("users", &i.to_string());
let _ = cache.get(&key).await;
}
});
handles.push(handle);
}
futures::future::join_all(handles).await;
}
#[tokio::test]
async fn test_cache_concurrent_writes() {
let config = CacheConfig {
max_capacity: 10000,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
let cache = Arc::new(cache);
let mut handles = Vec::new();
for t in 0..10 {
let cache = cache.clone();
let handle = tokio::spawn(async move {
for i in 0..100 {
let key = CacheKey::new("concurrent", &format!("{}_{}", t, i));
cache.set(key.clone(), format!("value_{}_{}", t, i)).await;
}
});
handles.push(handle);
}
futures::future::join_all(handles).await;
let stats = cache.stats();
assert!(
stats.sets.load(std::sync::atomic::Ordering::SeqCst) == 1000,
"All 1000 writes should complete"
);
}
#[tokio::test]
async fn test_cache_concurrent_read_write() {
let config = CacheConfig {
max_capacity: 1000,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
let cache = Arc::new(cache);
let write_count = Arc::new(Mutex::new(0));
let read_count = Arc::new(Mutex::new(0));
let mut handles = Vec::new();
for i in 0..50 {
let cache = cache.clone();
let write_count = write_count.clone();
let handle = tokio::spawn(async move {
for j in 0..20 {
let key = CacheKey::new("shared", &j.to_string());
cache.set(key.clone(), format!("writer_{}_{}", i, j)).await;
let mut count = write_count.lock().unwrap();
*count += 1;
}
});
handles.push(handle);
}
for _ in 0..50 {
let cache = cache.clone();
let read_count = read_count.clone();
let handle = tokio::spawn(async move {
for j in 0..20 {
let key = CacheKey::new("shared", &j.to_string());
let _ = cache.get(&key).await;
let mut count = read_count.lock().unwrap();
*count += 1;
}
});
handles.push(handle);
}
futures::future::join_all(handles).await;
let writes = *write_count.lock().unwrap();
let reads = *read_count.lock().unwrap();
assert_eq!(writes, 1000, "All 1000 writes should complete");
assert_eq!(reads, 1000, "All 1000 reads should complete");
}
#[tokio::test]
async fn test_cache_concurrent_eviction() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
let cache = Arc::new(cache);
let mut handles = Vec::new();
for t in 0..10 {
let cache = cache.clone();
let handle = tokio::spawn(async move {
for i in 0..10 {
let key = CacheKey::new("evict", &format!("{}_{}", t, i));
cache.set(key.clone(), format!("evict_value_{}_{}", t, i)).await;
}
});
handles.push(handle);
}
futures::future::join_all(handles).await;
let key = CacheKey::new("evict", "9_9");
let retrieved: Option<String> = cache.get(&key).await;
assert!(
retrieved.is_some(),
"Should retrieve the last written value after concurrent eviction"
);
assert_eq!(retrieved, Some("evict_value_9_9".to_string()));
let stats = cache.stats();
assert!(
stats.sets.load(std::sync::atomic::Ordering::SeqCst) == 100,
"All 100 sets should complete"
);
}
#[tokio::test]
async fn test_cache_large_dataset_performance() {
let config = CacheConfig {
max_capacity: 10000,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
let start = std::time::Instant::now();
for i in 0..10000 {
let key = CacheKey::new("large_dataset", &i.to_string());
cache.set(key.clone(), format!("data_{}", i)).await;
}
let write_time = start.elapsed();
let read_start = std::time::Instant::now();
for i in 0..10000 {
let key = CacheKey::new("large_dataset", &i.to_string());
let _ = cache.get(&key).await;
}
let read_time = read_start.elapsed();
println!("Write time for 10000 items: {:?}", write_time);
println!("Read time for 10000 items: {:?}", read_time);
assert!(
write_time < Duration::from_secs(30),
"Write should complete in reasonable time"
);
assert!(
read_time < Duration::from_secs(30),
"Read should complete in reasonable time"
);
}
#[tokio::test]
async fn test_cache_throughput_benchmark() {
let config = CacheConfig {
max_capacity: 5000,
default_ttl: 60,
cleanup_interval: 30,
enable_stats: true,
};
let cache = CacheManager::new(config);
let iterations = 1000;
let batch_size = 100;
for i in 0..100 {
let key = CacheKey::new("benchmark", &i.to_string());
cache.set(key.clone(), format!("bench_{}", i)).await;
}
let start = std::time::Instant::now();
for _ in 0..iterations {
for i in 0..batch_size {
let key = CacheKey::new("benchmark", &i.to_string());
cache.set(key.clone(), format!("updated_{}", i)).await;
let _ = cache.get(&key).await;
}
}
let elapsed = start.elapsed();
let total_ops = iterations * batch_size * 2;
println!("Total operations: {}", total_ops);
println!("Total time: {:?}", elapsed);
println!("Operations per second: {:.2}", total_ops as f64 / elapsed.as_secs_f64());
assert!(
elapsed < Duration::from_secs(60),
"Should complete throughput test in under 60 seconds"
);
}
#[tokio::test]
async fn test_cache_cleanup_manual() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 1,
cleanup_interval: 3600,
enable_stats: true,
};
let cache = CacheManager::new(config);
for i in 0..50 {
let key = CacheKey::new("temp", &i.to_string());
cache.set(key.clone(), format!("temp_{}", i)).await;
}
tokio::time::sleep(Duration::from_millis(1500)).await;
let cleaned = cache.cleanup().await;
assert!(cleaned <= 50, "Should not clean more than 50 entries");
let key = CacheKey::new("temp", "25");
let retrieved: Option<String> = cache.get(&key).await;
assert!(retrieved.is_none(), "Expired entries should be cleaned");
}
#[tokio::test]
async fn test_cache_key_hash_consistency() {
let key1 = CacheKey::new("users", "123");
let key2 = CacheKey::new("users", "123");
let key3 = CacheKey::new("users", "456");
assert_eq!(key1, key2, "Same values should be equal");
assert_ne!(key1, key3, "Different values should not be equal");
let config = CacheConfig::default();
let cache = CacheManager::new(config);
cache.set(key1.clone(), "value1".to_string()).await;
let retrieved: Option<String> = cache.get(&key2).await;
assert!(retrieved.is_some(), "Should find value using equal key");
assert_eq!(retrieved.unwrap(), "value1");
let retrieved: Option<String> = cache.get(&key3).await;
assert!(retrieved.is_none(), "Should not find value for different key");
}
#[tokio::test]
async fn test_cache_from_value_different_types() {
let config = CacheConfig::default();
let cache = CacheManager::new(config);
let key1 = CacheKey::from_value("users", &123);
let key2 = CacheKey::from_value("users", &123);
let key3 = CacheKey::from_value("users", &456);
let key4 = CacheKey::from_value("users", &"test");
let key5 = CacheKey::from_value("users", &"test");
cache.set(key1.clone(), "int_value".to_string()).await;
cache.set(key4.clone(), "str_value".to_string()).await;
let retrieved1: Option<String> = cache.get(&key2).await;
let retrieved2: Option<String> = cache.get(&key3).await;
let retrieved3: Option<String> = cache.get(&key5).await;
assert!(
retrieved1.is_some() && retrieved1.unwrap() == "int_value",
"Integer key should work"
);
assert!(retrieved2.is_none(), "Different integer value should not match");
assert!(
retrieved3.is_some() && retrieved3.unwrap() == "str_value",
"String key should work"
);
}
#[tokio::test]
async fn test_cache_stats_verification() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
for i in 0..10 {
let key = CacheKey::new("stats", &i.to_string());
cache.set(key.clone(), format!("value_{}", i)).await;
}
for i in 0..5 {
let key = CacheKey::new("stats", &i.to_string());
let _ = cache.get(&key).await;
}
for i in 10..15 {
let key = CacheKey::new("stats", &i.to_string());
let _ = cache.get(&key).await;
}
let stats = cache.stats();
assert_eq!(
stats.sets.load(std::sync::atomic::Ordering::SeqCst),
10,
"Should have 10 sets"
);
assert_eq!(
stats.hits.load(std::sync::atomic::Ordering::SeqCst),
5,
"Should have 5 hits"
);
assert_eq!(
stats.misses.load(std::sync::atomic::Ordering::SeqCst),
5,
"Should have 5 misses"
);
}
#[tokio::test]
async fn test_cache_hit_rate() {
let config = CacheConfig {
max_capacity: 100,
default_ttl: 300,
cleanup_interval: 60,
enable_stats: true,
};
let cache = CacheManager::new(config);
for i in 0..10 {
let key = CacheKey::new("hit_rate_test", &i.to_string());
cache.set(key.clone(), format!("value_{}", i)).await;
}
for _ in 0..100 {
let key = CacheKey::new("hit_rate_test", "5");
let _ = cache.get(&key).await;
}
let stats = cache.stats();
let hit_rate = stats.hit_rate();
assert!(hit_rate > 0.0, "Hit rate should be greater than 0");
assert!(hit_rate <= 1.0, "Hit rate should not exceed 1.0");
}
}