Skip to main content

reinhardt_utils/cache/
in_memory.rs

1//! In-memory cache implementation
2
3use super::cache_trait::Cache;
4use super::entry::CacheEntry;
5use super::layered::LayeredCacheStore;
6use super::statistics::{CacheEntryInfo, CacheStatistics};
7use async_trait::async_trait;
8use reinhardt_core::exception::{Error, Result};
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11use std::sync::Arc;
12use std::sync::atomic::{AtomicU64, Ordering};
13use std::time::{Duration, SystemTime};
14use tokio::sync::RwLock;
15use tokio::task::AbortHandle;
16
17/// Cleanup strategy for in-memory cache
18#[derive(Clone, Copy, Debug)]
19pub enum CleanupStrategy {
20	/// Traditional O(n) full scan cleanup
21	///
22	/// Scans all entries to find and remove expired ones.
23	/// Simple but slow for large caches.
24	Naive,
25	/// Layered O(1) amortized cleanup (Redis-style)
26	///
27	/// Uses three layers:
28	/// - Passive expiration on access
29	/// - Active random sampling
30	/// - TTL index for batch cleanup
31	///
32	/// Much faster for large caches (100-1000x improvement).
33	Layered,
34}
35
36/// In-memory cache backend
37#[derive(Clone)]
38pub struct InMemoryCache {
39	store: Arc<RwLock<HashMap<String, CacheEntry>>>,
40	layered_store: Option<LayeredCacheStore>,
41	cleanup_strategy: CleanupStrategy,
42	default_ttl: Option<Duration>,
43	hits: Arc<AtomicU64>,
44	misses: Arc<AtomicU64>,
45	cleanup_interval: Option<Duration>,
46	/// Handle for cancelling the background cleanup task
47	cleanup_handle: Arc<std::sync::Mutex<Option<AbortHandle>>>,
48}
49
50impl InMemoryCache {
51	/// Create a new in-memory cache with naive cleanup strategy
52	///
53	/// Uses traditional O(n) full scan for cleanup.
54	/// Suitable for small caches or when simplicity is preferred.
55	///
56	/// # Examples
57	///
58	/// ```
59	/// use reinhardt_utils::cache::InMemoryCache;
60	///
61	/// let cache = InMemoryCache::new();
62	/// // Cache is ready to use with no default TTL
63	/// ```
64	pub fn new() -> Self {
65		Self {
66			store: Arc::new(RwLock::new(HashMap::new())),
67			layered_store: None,
68			cleanup_strategy: CleanupStrategy::Naive,
69			default_ttl: None,
70			hits: Arc::new(AtomicU64::new(0)),
71			misses: Arc::new(AtomicU64::new(0)),
72			cleanup_interval: None,
73			cleanup_handle: Arc::new(std::sync::Mutex::new(None)),
74		}
75	}
76
77	/// Create a new in-memory cache with layered cleanup strategy
78	///
79	/// Uses Redis-style layered cleanup with O(1) amortized complexity:
80	/// - Passive expiration on access
81	/// - Active random sampling (default: 20 keys, 25% threshold)
82	/// - TTL index for batch cleanup
83	///
84	/// Recommended for caches with many entries or frequent TTL usage.
85	///
86	/// # Performance
87	///
88	/// For caches with 100,000+ entries, layered cleanup is 100-1000x faster than naive cleanup.
89	///
90	/// # Examples
91	///
92	/// ```
93	/// use reinhardt_utils::cache::InMemoryCache;
94	///
95	/// let cache = InMemoryCache::with_layered_cleanup();
96	/// // Use layered cleanup for better performance
97	/// ```
98	pub fn with_layered_cleanup() -> Self {
99		Self {
100			store: Arc::new(RwLock::new(HashMap::new())),
101			layered_store: Some(LayeredCacheStore::new()),
102			cleanup_strategy: CleanupStrategy::Layered,
103			default_ttl: None,
104			hits: Arc::new(AtomicU64::new(0)),
105			misses: Arc::new(AtomicU64::new(0)),
106			cleanup_interval: None,
107			cleanup_handle: Arc::new(std::sync::Mutex::new(None)),
108		}
109	}
110
111	/// Create a new in-memory cache with custom layered cleanup parameters
112	///
113	/// # Arguments
114	///
115	/// * `sample_size` - Number of keys to sample per cleanup round (default: 20)
116	/// * `threshold` - Threshold for expired entries to trigger another round (default: 0.25)
117	///
118	/// # Examples
119	///
120	/// ```
121	/// use reinhardt_utils::cache::InMemoryCache;
122	///
123	/// // Sample 50 keys per round, trigger next round if >30% expired
124	/// let cache = InMemoryCache::with_custom_layered_cleanup(50, 0.30);
125	/// ```
126	pub fn with_custom_layered_cleanup(sample_size: usize, threshold: f32) -> Self {
127		Self {
128			store: Arc::new(RwLock::new(HashMap::new())),
129			layered_store: Some(LayeredCacheStore::with_sampler(sample_size, threshold)),
130			cleanup_strategy: CleanupStrategy::Layered,
131			default_ttl: None,
132			hits: Arc::new(AtomicU64::new(0)),
133			misses: Arc::new(AtomicU64::new(0)),
134			cleanup_interval: None,
135			cleanup_handle: Arc::new(std::sync::Mutex::new(None)),
136		}
137	}
138	/// Set a default TTL for all cache entries
139	///
140	/// # Examples
141	///
142	/// ```
143	/// use reinhardt_utils::cache::{InMemoryCache, Cache};
144	/// use std::time::Duration;
145	///
146	/// # async fn example() {
147	/// let cache = InMemoryCache::new()
148	///     .with_default_ttl(Duration::from_secs(1));
149	///
150	/// // Set a value without explicit TTL
151	/// cache.set("key", &"value", None).await.unwrap();
152	///
153	/// // Wait for default TTL to expire
154	///
155	/// // Value should be expired
156	/// let value: Option<String> = cache.get("key").await.unwrap();
157	/// assert_eq!(value, None);
158	/// # }
159	/// ```
160	pub fn with_default_ttl(mut self, ttl: Duration) -> Self {
161		self.default_ttl = Some(ttl);
162		self
163	}
164	/// Clean up expired entries
165	///
166	/// The cleanup strategy depends on how the cache was created:
167	/// - Naive strategy: O(n) full scan (simple but slow for large caches)
168	/// - Layered strategy: O(1) amortized (Redis-style, much faster)
169	///
170	/// # Examples
171	///
172	/// ```
173	/// use reinhardt_utils::cache::{InMemoryCache, Cache};
174	/// use std::time::Duration;
175	///
176	/// # async fn example() {
177	/// // Naive cleanup
178	/// let cache = InMemoryCache::new();
179	///
180	/// // Set a value with short TTL
181	/// cache.set("key1", &"value", Some(Duration::from_millis(10))).await.unwrap();
182	///
183	/// // Wait for expiration
184	///
185	/// // Clean up expired entries (O(n) scan)
186	/// cache.cleanup_expired().await;
187	///
188	/// // Verify the key is gone
189	/// assert!(!cache.has_key("key1").await.unwrap());
190	///
191	/// // Layered cleanup (faster for large caches)
192	/// let cache = InMemoryCache::with_layered_cleanup();
193	/// cache.set("key2", &"value", Some(Duration::from_millis(10))).await.unwrap();
194	/// cache.cleanup_expired().await; // O(1) amortized
195	/// # }
196	/// ```
197	pub async fn cleanup_expired(&self) {
198		match self.cleanup_strategy {
199			CleanupStrategy::Naive => {
200				let mut store = self.store.write().await;
201				store.retain(|_, entry| !entry.is_expired());
202			}
203			CleanupStrategy::Layered => {
204				if let Some(ref layered_store) = self.layered_store {
205					layered_store.cleanup().await;
206				}
207			}
208		}
209	}
210
211	/// Get cache statistics
212	///
213	/// # Examples
214	///
215	/// ```
216	/// use reinhardt_utils::cache::{InMemoryCache, Cache};
217	///
218	/// # async fn example() {
219	/// let cache = InMemoryCache::new();
220	///
221	/// // Set and get some values
222	/// cache.set("key1", &"value1", None).await.unwrap();
223	/// cache.set("key2", &"value2", None).await.unwrap();
224	///
225	/// let _: Option<String> = cache.get("key1").await.unwrap(); // Hit
226	/// let _: Option<String> = cache.get("key2").await.unwrap(); // Hit
227	/// let _: Option<String> = cache.get("key3").await.unwrap(); // Miss
228	///
229	/// let stats = cache.get_statistics().await;
230	/// assert_eq!(stats.hits, 2);
231	/// assert_eq!(stats.misses, 1);
232	/// assert_eq!(stats.total_requests, 3);
233	/// assert_eq!(stats.entry_count, 2);
234	/// assert_eq!(stats.hit_rate(), 2.0 / 3.0);
235	/// # }
236	/// ```
237	pub async fn get_statistics(&self) -> CacheStatistics {
238		let hits = self.hits.load(Ordering::Relaxed);
239		let misses = self.misses.load(Ordering::Relaxed);
240
241		let (entry_count, memory_usage) = match self.cleanup_strategy {
242			CleanupStrategy::Naive => {
243				let store = self.store.read().await;
244				let entry_count = store.len() as u64;
245				let memory_usage = store
246					.values()
247					.map(|entry| entry.value.len() as u64)
248					.sum::<u64>();
249				(entry_count, memory_usage)
250			}
251			CleanupStrategy::Layered => {
252				if let Some(ref layered_store) = self.layered_store {
253					let store_clone = layered_store.get_store_clone().await;
254					let entry_count = store_clone.len() as u64;
255					let memory_usage = store_clone
256						.values()
257						.map(|entry| entry.value.len() as u64)
258						.sum::<u64>();
259					(entry_count, memory_usage)
260				} else {
261					(0, 0)
262				}
263			}
264		};
265
266		CacheStatistics {
267			hits,
268			misses,
269			total_requests: hits + misses,
270			entry_count,
271			memory_usage,
272		}
273	}
274
275	/// List all keys in the cache
276	///
277	/// Returns a vector of all keys currently stored in the cache,
278	/// including expired entries that haven't been cleaned up yet.
279	///
280	/// # Examples
281	///
282	/// ```
283	/// use reinhardt_utils::cache::{InMemoryCache, Cache};
284	///
285	/// # async fn example() {
286	/// let cache = InMemoryCache::new();
287	///
288	/// cache.set("key1", &"value1", None).await.unwrap();
289	/// cache.set("key2", &"value2", None).await.unwrap();
290	/// cache.set("key3", &"value3", None).await.unwrap();
291	///
292	/// let keys = cache.list_keys().await;
293	/// assert_eq!(keys.len(), 3);
294	/// assert!(keys.contains(&"key1".to_string()));
295	/// assert!(keys.contains(&"key2".to_string()));
296	/// assert!(keys.contains(&"key3".to_string()));
297	/// # }
298	/// ```
299	pub async fn list_keys(&self) -> Vec<String> {
300		match self.cleanup_strategy {
301			CleanupStrategy::Naive => {
302				let store = self.store.read().await;
303				store.keys().cloned().collect()
304			}
305			CleanupStrategy::Layered => {
306				if let Some(ref layered_store) = self.layered_store {
307					layered_store.keys().await
308				} else {
309					Vec::new()
310				}
311			}
312		}
313	}
314
315	/// Inspect cache entry timestamps without deserializing the value
316	///
317	/// Returns the creation and last access timestamps for a cache entry.
318	/// This is useful for session metadata retrieval without deserializing the full session data.
319	///
320	/// # Arguments
321	///
322	/// * `key` - The cache key to inspect
323	///
324	/// # Returns
325	///
326	/// * `Ok(Some((created_at, accessed_at)))` - Entry found with timestamps
327	/// * `Ok(None)` - Entry not found or expired
328	/// * `Err(Error)` - Error occurred during inspection
329	///
330	/// # Examples
331	///
332	/// ```
333	/// use reinhardt_utils::cache::{Cache, InMemoryCache};
334	///
335	/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
336	/// let cache = InMemoryCache::new();
337	/// cache.set("session_123", &"session_data", None).await?;
338	///
339	/// if let Some((created, accessed)) = cache.inspect_entry_with_timestamps("session_123").await? {
340	///     println!("Created at: {:?}", created);
341	///     println!("Last accessed: {:?}", accessed);
342	/// }
343	/// # Ok(())
344	/// # }
345	/// ```
346	pub async fn inspect_entry_with_timestamps(
347		&self,
348		key: &str,
349	) -> Result<Option<(SystemTime, Option<SystemTime>)>> {
350		match self.cleanup_strategy {
351			CleanupStrategy::Naive => {
352				let store = self.store.read().await;
353
354				if let Some(entry) = store.get(key) {
355					if entry.is_expired() {
356						return Ok(None);
357					}
358
359					Ok(Some((entry.created_at, entry.accessed_at)))
360				} else {
361					Ok(None)
362				}
363			}
364			CleanupStrategy::Layered => {
365				if let Some(ref layered_store) = self.layered_store {
366					Ok(layered_store.get_entry_timestamps(key).await)
367				} else {
368					Ok(None)
369				}
370			}
371		}
372	}
373
374	/// Inspect a cache entry
375	///
376	/// Returns detailed information about a specific cache entry,
377	/// or None if the entry doesn't exist.
378	///
379	/// # Examples
380	///
381	/// ```
382	/// use reinhardt_utils::cache::{InMemoryCache, Cache};
383	/// use std::time::Duration;
384	///
385	/// # async fn example() {
386	/// let cache = InMemoryCache::new();
387	///
388	/// // Set a value with TTL
389	/// cache.set("key1", &"value1", Some(Duration::from_secs(300))).await.unwrap();
390	///
391	/// // Inspect the entry
392	/// let info = cache.inspect_entry("key1").await;
393	/// assert!(info.is_some());
394	///
395	/// let info = info.unwrap();
396	/// assert_eq!(info.key, "key1");
397	/// assert!(info.has_expiry);
398	/// assert!(info.ttl_seconds.is_some());
399	/// assert!(info.ttl_seconds.unwrap() <= 300);
400	///
401	/// // Non-existent key
402	/// let info = cache.inspect_entry("nonexistent").await;
403	/// assert!(info.is_none());
404	/// # }
405	/// ```
406	pub async fn inspect_entry(&self, key: &str) -> Option<CacheEntryInfo> {
407		let entry = match self.cleanup_strategy {
408			CleanupStrategy::Naive => {
409				let store = self.store.read().await;
410				store.get(key).cloned()
411			}
412			CleanupStrategy::Layered => {
413				if let Some(ref layered_store) = self.layered_store {
414					layered_store.get_entry(key).await
415				} else {
416					None
417				}
418			}
419		};
420
421		entry.map(|entry| {
422			let ttl_seconds = entry.expires_at.and_then(|expires_at| {
423				expires_at
424					.duration_since(SystemTime::now())
425					.ok()
426					.map(|d| d.as_secs())
427			});
428
429			CacheEntryInfo {
430				key: key.to_string(),
431				size: entry.value.len(),
432				has_expiry: entry.expires_at.is_some(),
433				ttl_seconds,
434			}
435		})
436	}
437
438	/// Start automatic cleanup of expired entries
439	///
440	/// Spawns a background task that periodically removes expired entries
441	/// from the cache. The task runs at the specified interval.
442	///
443	/// # Examples
444	///
445	/// ```
446	/// use reinhardt_utils::cache::InMemoryCache;
447	/// use std::time::Duration;
448	///
449	/// # async fn example() {
450	/// let cache = InMemoryCache::new();
451	///
452	/// // Start cleanup every 60 seconds
453	/// cache.start_auto_cleanup(Duration::from_secs(60));
454	///
455	/// // Cache will now automatically clean up expired entries
456	/// # }
457	/// ```
458	pub fn start_auto_cleanup(&self, interval: Duration) {
459		let mut handle_guard = self
460			.cleanup_handle
461			.lock()
462			.unwrap_or_else(|e| e.into_inner());
463
464		// Abort any previously running cleanup task to prevent duplicates
465		if let Some(existing) = handle_guard.take() {
466			existing.abort();
467		}
468
469		let cache = self.clone();
470		let abort_handle = tokio::spawn(async move {
471			let mut interval_timer = tokio::time::interval(interval);
472			loop {
473				interval_timer.tick().await;
474				cache.cleanup_expired().await;
475			}
476		})
477		.abort_handle();
478
479		*handle_guard = Some(abort_handle);
480	}
481
482	/// Stop the background auto-cleanup task if one is running.
483	///
484	/// After calling this method, no further automatic cleanup will occur
485	/// until `start_auto_cleanup` is called again.
486	pub fn stop_auto_cleanup(&self) {
487		let mut handle_guard = self
488			.cleanup_handle
489			.lock()
490			.unwrap_or_else(|e| e.into_inner());
491		if let Some(handle) = handle_guard.take() {
492			handle.abort();
493		}
494	}
495
496	/// Set cleanup interval and start automatic cleanup
497	///
498	/// This is a builder method that sets the cleanup interval
499	/// and immediately starts the background cleanup task.
500	///
501	/// # Examples
502	///
503	/// ```
504	/// use reinhardt_utils::cache::InMemoryCache;
505	/// use std::time::Duration;
506	///
507	/// # async fn example() {
508	/// let cache = InMemoryCache::new()
509	///     .with_auto_cleanup(Duration::from_secs(60));
510	///
511	/// // Cache will automatically clean up expired entries every 60 seconds
512	/// # }
513	/// ```
514	pub fn with_auto_cleanup(mut self, interval: Duration) -> Self {
515		self.cleanup_interval = Some(interval);
516		self.start_auto_cleanup(interval);
517		self
518	}
519}
520
521impl Default for InMemoryCache {
522	fn default() -> Self {
523		Self::new()
524	}
525}
526
527#[async_trait]
528impl Cache for InMemoryCache {
529	async fn get<T>(&self, key: &str) -> Result<Option<T>>
530	where
531		T: for<'de> Deserialize<'de> + Send,
532	{
533		match self.cleanup_strategy {
534			CleanupStrategy::Naive => {
535				// Use write lock to update accessed timestamp
536				let mut store = self.store.write().await;
537
538				if let Some(entry) = store.get_mut(key) {
539					if entry.is_expired() {
540						// Entry expired, count as miss
541						self.misses.fetch_add(1, Ordering::Relaxed);
542						return Ok(None);
543					}
544
545					// Cache hit - update access timestamp
546					entry.touch();
547					self.hits.fetch_add(1, Ordering::Relaxed);
548
549					let value = serde_json::from_slice(&entry.value)
550						.map_err(|e| Error::Serialization(e.to_string()))?;
551					Ok(Some(value))
552				} else {
553					// Cache miss
554					self.misses.fetch_add(1, Ordering::Relaxed);
555					Ok(None)
556				}
557			}
558			CleanupStrategy::Layered => {
559				if let Some(ref layered_store) = self.layered_store {
560					if let Some(data) = layered_store.get(key).await {
561						// Cache hit
562						self.hits.fetch_add(1, Ordering::Relaxed);
563						let value = serde_json::from_slice(&data)
564							.map_err(|e| Error::Serialization(e.to_string()))?;
565						Ok(Some(value))
566					} else {
567						// Cache miss
568						self.misses.fetch_add(1, Ordering::Relaxed);
569						Ok(None)
570					}
571				} else {
572					self.misses.fetch_add(1, Ordering::Relaxed);
573					Ok(None)
574				}
575			}
576		}
577	}
578
579	async fn set<T>(&self, key: &str, value: &T, ttl: Option<Duration>) -> Result<()>
580	where
581		T: Serialize + Send + Sync,
582	{
583		let serialized =
584			serde_json::to_vec(value).map_err(|e| Error::Serialization(e.to_string()))?;
585
586		let ttl = ttl.or(self.default_ttl);
587
588		match self.cleanup_strategy {
589			CleanupStrategy::Naive => {
590				let entry = CacheEntry::new(serialized, ttl);
591				let mut store = self.store.write().await;
592				store.insert(key.to_string(), entry);
593			}
594			CleanupStrategy::Layered => {
595				if let Some(ref layered_store) = self.layered_store {
596					layered_store.set(key.to_string(), serialized, ttl).await;
597				}
598			}
599		}
600
601		Ok(())
602	}
603
604	async fn delete(&self, key: &str) -> Result<()> {
605		match self.cleanup_strategy {
606			CleanupStrategy::Naive => {
607				let mut store = self.store.write().await;
608				store.remove(key);
609			}
610			CleanupStrategy::Layered => {
611				if let Some(ref layered_store) = self.layered_store {
612					layered_store.delete(key).await;
613				}
614			}
615		}
616		Ok(())
617	}
618
619	async fn has_key(&self, key: &str) -> Result<bool> {
620		match self.cleanup_strategy {
621			CleanupStrategy::Naive => {
622				let store = self.store.read().await;
623
624				if let Some(entry) = store.get(key) {
625					Ok(!entry.is_expired())
626				} else {
627					Ok(false)
628				}
629			}
630			CleanupStrategy::Layered => {
631				if let Some(ref layered_store) = self.layered_store {
632					Ok(layered_store.has_key(key).await)
633				} else {
634					Ok(false)
635				}
636			}
637		}
638	}
639
640	async fn clear(&self) -> Result<()> {
641		match self.cleanup_strategy {
642			CleanupStrategy::Naive => {
643				let mut store = self.store.write().await;
644				store.clear();
645			}
646			CleanupStrategy::Layered => {
647				if let Some(ref layered_store) = self.layered_store {
648					layered_store.clear().await;
649				}
650			}
651		}
652		Ok(())
653	}
654}
655
656#[cfg(test)]
657mod tests {
658	use super::*;
659
660	/// Polls a condition until it returns true or timeout is reached.
661	async fn poll_until<F, Fut>(
662		timeout: std::time::Duration,
663		interval: std::time::Duration,
664		mut condition: F,
665	) -> std::result::Result<(), String>
666	where
667		F: FnMut() -> Fut,
668		Fut: std::future::Future<Output = bool>,
669	{
670		let start = std::time::Instant::now();
671		while start.elapsed() < timeout {
672			if condition().await {
673				return Ok(());
674			}
675			tokio::time::sleep(interval).await;
676		}
677		Err(format!("Timeout after {:?} waiting for condition", timeout))
678	}
679
680	#[tokio::test]
681	async fn test_in_memory_cache_basic() {
682		let cache = InMemoryCache::new();
683
684		// Set and get
685		cache.set("key1", &"value1", None).await.unwrap();
686		let value: Option<String> = cache.get("key1").await.unwrap();
687		assert_eq!(value, Some("value1".to_string()));
688
689		// Has key
690		assert!(cache.has_key("key1").await.unwrap());
691		assert!(!cache.has_key("key2").await.unwrap());
692
693		// Delete
694		cache.delete("key1").await.unwrap();
695		let value: Option<String> = cache.get("key1").await.unwrap();
696		assert_eq!(value, None);
697	}
698
699	#[tokio::test]
700	async fn test_in_memory_cache_ttl() {
701		let cache = InMemoryCache::new();
702
703		// Set with short TTL
704		cache
705			.set("key1", &"value1", Some(Duration::from_millis(100)))
706			.await
707			.unwrap();
708
709		// Should exist immediately
710		let value: Option<String> = cache.get("key1").await.unwrap();
711		assert_eq!(value, Some("value1".to_string()));
712
713		// Poll until key expires
714		poll_until(
715			Duration::from_millis(200),
716			Duration::from_millis(10),
717			|| async {
718				let value: Option<String> = cache.get("key1").await.unwrap();
719				value.is_none()
720			},
721		)
722		.await
723		.expect("Key should expire within 200ms");
724	}
725
726	#[tokio::test]
727	async fn test_in_memory_cache_many() {
728		let cache = InMemoryCache::new();
729
730		// Set many
731		let mut values = HashMap::new();
732		values.insert("key1".to_string(), "value1".to_string());
733		values.insert("key2".to_string(), "value2".to_string());
734		cache.set_many(values, None).await.unwrap();
735
736		// Get many
737		let results: HashMap<String, String> =
738			cache.get_many(&["key1", "key2", "key3"]).await.unwrap();
739		assert_eq!(results.len(), 2);
740		assert_eq!(results.get("key1"), Some(&"value1".to_string()));
741		assert_eq!(results.get("key2"), Some(&"value2".to_string()));
742
743		// Delete many
744		cache.delete_many(&["key1", "key2"]).await.unwrap();
745		assert!(!cache.has_key("key1").await.unwrap());
746		assert!(!cache.has_key("key2").await.unwrap());
747	}
748
749	#[tokio::test]
750	async fn test_in_memory_cache_incr_decr() {
751		let cache = InMemoryCache::new();
752
753		// Increment from zero
754		let value = cache.incr("counter", 5).await.unwrap();
755		assert_eq!(value, 5);
756
757		// Increment again
758		let value = cache.incr("counter", 3).await.unwrap();
759		assert_eq!(value, 8);
760
761		// Decrement
762		let value = cache.decr("counter", 2).await.unwrap();
763		assert_eq!(value, 6);
764	}
765
766	#[tokio::test]
767	async fn test_in_memory_cache_clear() {
768		let cache = InMemoryCache::new();
769
770		cache.set("key1", &"value1", None).await.unwrap();
771		cache.set("key2", &"value2", None).await.unwrap();
772
773		cache.clear().await.unwrap();
774
775		assert!(!cache.has_key("key1").await.unwrap());
776		assert!(!cache.has_key("key2").await.unwrap());
777	}
778
779	#[tokio::test]
780	async fn test_cache_cleanup_expired() {
781		let cache = InMemoryCache::new();
782
783		// Set some values with different TTLs
784		cache
785			.set("key1", &"value1", Some(Duration::from_millis(100)))
786			.await
787			.unwrap();
788		cache.set("key2", &"value2", None).await.unwrap();
789
790		// Poll until first key expires
791		poll_until(
792			Duration::from_millis(200),
793			Duration::from_millis(10),
794			|| async {
795				let value: Option<String> = cache.get("key1").await.unwrap();
796				value.is_none()
797			},
798		)
799		.await
800		.expect("Key1 should expire within 200ms");
801
802		// Cleanup expired entries
803		cache.cleanup_expired().await;
804
805		// key1 should be gone, key2 should remain
806		assert!(!cache.has_key("key1").await.unwrap());
807		assert!(cache.has_key("key2").await.unwrap());
808	}
809
810	#[tokio::test]
811	async fn test_cache_statistics_basic() {
812		let cache = InMemoryCache::new();
813
814		// Initially, stats should be zero
815		let stats = cache.get_statistics().await;
816		assert_eq!(stats.hits, 0);
817		assert_eq!(stats.misses, 0);
818		assert_eq!(stats.total_requests, 0);
819		assert_eq!(stats.entry_count, 0);
820		assert_eq!(stats.memory_usage, 0);
821
822		// Set some values
823		cache.set("key1", &"value1", None).await.unwrap();
824		cache.set("key2", &"value2", None).await.unwrap();
825
826		// Check entry count
827		let stats = cache.get_statistics().await;
828		assert_eq!(stats.entry_count, 2);
829		assert!(stats.memory_usage > 0);
830
831		// Get existing keys (hits)
832		let _: Option<String> = cache.get("key1").await.unwrap();
833		let _: Option<String> = cache.get("key2").await.unwrap();
834
835		let stats = cache.get_statistics().await;
836		assert_eq!(stats.hits, 2);
837		assert_eq!(stats.misses, 0);
838		assert_eq!(stats.total_requests, 2);
839
840		// Get non-existing key (miss)
841		let _: Option<String> = cache.get("key3").await.unwrap();
842
843		let stats = cache.get_statistics().await;
844		assert_eq!(stats.hits, 2);
845		assert_eq!(stats.misses, 1);
846		assert_eq!(stats.total_requests, 3);
847	}
848
849	#[tokio::test]
850	async fn test_cache_statistics_hit_miss_rate() {
851		let cache = InMemoryCache::new();
852
853		cache.set("key1", &"value1", None).await.unwrap();
854		cache.set("key2", &"value2", None).await.unwrap();
855
856		// 2 hits
857		let _: Option<String> = cache.get("key1").await.unwrap();
858		let _: Option<String> = cache.get("key2").await.unwrap();
859
860		// 1 miss
861		let _: Option<String> = cache.get("key3").await.unwrap();
862
863		let stats = cache.get_statistics().await;
864		assert_eq!(stats.hit_rate(), 2.0 / 3.0);
865		assert_eq!(stats.miss_rate(), 1.0 / 3.0);
866	}
867
868	#[tokio::test]
869	async fn test_cache_statistics_expired_counts_as_miss() {
870		let cache = InMemoryCache::new();
871
872		// Set with short TTL
873		cache
874			.set("key1", &"value1", Some(Duration::from_millis(10)))
875			.await
876			.unwrap();
877
878		// Wait for key to expire (10ms TTL + 5ms buffer)
879		tokio::time::sleep(Duration::from_millis(15)).await;
880
881		// Access expired key - should count as miss
882		let value: Option<String> = cache.get("key1").await.unwrap();
883		assert!(value.is_none(), "Key should have expired");
884
885		// Statistics should show exactly 1 miss and 0 hits
886		let stats = cache.get_statistics().await;
887		assert_eq!(stats.hits, 0, "Expected 0 hits, got {}", stats.hits);
888		assert_eq!(stats.misses, 1, "Expected 1 miss, got {}", stats.misses);
889	}
890
891	#[tokio::test]
892	async fn test_cache_statistics_memory_usage() {
893		let cache = InMemoryCache::new();
894
895		// Set some values
896		cache.set("key1", &"short", None).await.unwrap();
897		cache.set("key2", &"a longer value", None).await.unwrap();
898
899		let stats = cache.get_statistics().await;
900		assert!(stats.memory_usage > 0);
901
902		// Memory usage should increase with more data
903		let initial_usage = stats.memory_usage;
904
905		cache
906			.set("key3", &"even longer value here", None)
907			.await
908			.unwrap();
909
910		let stats = cache.get_statistics().await;
911		assert!(stats.memory_usage > initial_usage);
912	}
913
914	#[tokio::test]
915	async fn test_list_keys() {
916		let cache = InMemoryCache::new();
917
918		// Initially empty
919		let keys = cache.list_keys().await;
920		assert_eq!(keys.len(), 0);
921
922		// Add some keys
923		cache.set("key1", &"value1", None).await.unwrap();
924		cache.set("key2", &"value2", None).await.unwrap();
925		cache.set("key3", &"value3", None).await.unwrap();
926
927		let keys = cache.list_keys().await;
928		assert_eq!(keys.len(), 3);
929		assert!(keys.contains(&"key1".to_string()));
930		assert!(keys.contains(&"key2".to_string()));
931		assert!(keys.contains(&"key3".to_string()));
932
933		// Delete a key
934		cache.delete("key2").await.unwrap();
935
936		let keys = cache.list_keys().await;
937		assert_eq!(keys.len(), 2);
938		assert!(keys.contains(&"key1".to_string()));
939		assert!(!keys.contains(&"key2".to_string()));
940		assert!(keys.contains(&"key3".to_string()));
941	}
942
943	#[tokio::test]
944	async fn test_list_keys_includes_expired() {
945		let cache = InMemoryCache::new();
946
947		// Set a key with short TTL
948		cache
949			.set("expired_key", &"value", Some(Duration::from_millis(10)))
950			.await
951			.unwrap();
952
953		// Set a key without TTL
954		cache.set("valid_key", &"value", None).await.unwrap();
955
956		// Poll until first key expires
957		poll_until(
958			Duration::from_millis(50),
959			Duration::from_millis(5),
960			|| async {
961				let value: Option<String> = cache.get("expired_key").await.unwrap();
962				value.is_none()
963			},
964		)
965		.await
966		.expect("Expired key should expire within 50ms");
967
968		// list_keys should include expired keys (until cleanup)
969		let keys = cache.list_keys().await;
970		assert_eq!(keys.len(), 2);
971		assert!(keys.contains(&"expired_key".to_string()));
972		assert!(keys.contains(&"valid_key".to_string()));
973
974		// After cleanup, expired key should be gone
975		cache.cleanup_expired().await;
976		let keys = cache.list_keys().await;
977		assert_eq!(keys.len(), 1);
978		assert!(!keys.contains(&"expired_key".to_string()));
979		assert!(keys.contains(&"valid_key".to_string()));
980	}
981
982	#[tokio::test]
983	async fn test_inspect_entry_basic() {
984		let cache = InMemoryCache::new();
985
986		// Non-existent key
987		let info = cache.inspect_entry("nonexistent").await;
988		assert!(info.is_none());
989
990		// Add a key without expiry
991		cache.set("key1", &"value1", None).await.unwrap();
992
993		let info = cache.inspect_entry("key1").await;
994		let info = info.unwrap();
995		assert_eq!(info.key, "key1");
996		assert!(!info.has_expiry);
997		assert!(info.ttl_seconds.is_none());
998		assert!(info.size > 0);
999	}
1000
1001	#[tokio::test]
1002	async fn test_inspect_entry_with_ttl() {
1003		let cache = InMemoryCache::new();
1004
1005		// Set a value with TTL
1006		cache
1007			.set("key1", &"value1", Some(Duration::from_secs(300)))
1008			.await
1009			.unwrap();
1010
1011		let info = cache.inspect_entry("key1").await;
1012		let info = info.unwrap();
1013		assert_eq!(info.key, "key1");
1014		assert!(info.has_expiry);
1015		assert!(info.ttl_seconds.is_some());
1016
1017		// TTL should be <= 300 seconds
1018		let ttl = info.ttl_seconds.unwrap();
1019		assert!(ttl <= 300);
1020		assert!(ttl > 0);
1021	}
1022
1023	#[tokio::test]
1024	async fn test_inspect_entry_size() {
1025		let cache = InMemoryCache::new();
1026
1027		// Set values of different sizes
1028		cache.set("small", &"x", None).await.unwrap();
1029		cache.set("large", &"x".repeat(1000), None).await.unwrap();
1030
1031		let small_info = cache.inspect_entry("small").await.unwrap();
1032		let large_info = cache.inspect_entry("large").await.unwrap();
1033
1034		assert!(large_info.size > small_info.size);
1035	}
1036
1037	#[tokio::test]
1038	async fn test_inspect_entry_expired() {
1039		let cache = InMemoryCache::new();
1040
1041		// Set with short TTL
1042		cache
1043			.set("key1", &"value1", Some(Duration::from_millis(10)))
1044			.await
1045			.unwrap();
1046
1047		// Inspect before expiration
1048		let info = cache.inspect_entry("key1").await;
1049		assert!(info.is_some());
1050
1051		// Poll until key expires
1052		poll_until(
1053			Duration::from_millis(50),
1054			Duration::from_millis(5),
1055			|| async {
1056				let value: Option<String> = cache.get("key1").await.unwrap();
1057				value.is_none()
1058			},
1059		)
1060		.await
1061		.expect("Key should expire within 50ms");
1062
1063		// Entry still exists (not cleaned up)
1064		let info = cache.inspect_entry("key1").await;
1065		let info = info.unwrap();
1066		assert!(info.has_expiry);
1067		// TTL should be None because it's expired
1068		assert!(info.ttl_seconds.is_none());
1069
1070		// After cleanup, entry should be gone
1071		cache.cleanup_expired().await;
1072		let info = cache.inspect_entry("key1").await;
1073		assert!(info.is_none());
1074	}
1075
1076	#[tokio::test]
1077	async fn test_start_auto_cleanup() {
1078		let cache = InMemoryCache::new();
1079
1080		// Set some values with short TTL
1081		cache
1082			.set("key1", &"value1", Some(Duration::from_millis(50)))
1083			.await
1084			.unwrap();
1085		cache
1086			.set("key2", &"value2", Some(Duration::from_millis(50)))
1087			.await
1088			.unwrap();
1089
1090		// Start auto cleanup with short interval
1091		cache.start_auto_cleanup(Duration::from_millis(30));
1092
1093		// Keys should exist initially
1094		assert!(cache.has_key("key1").await.unwrap());
1095		assert!(cache.has_key("key2").await.unwrap());
1096
1097		// Poll until auto-cleanup removes expired keys
1098		poll_until(
1099			Duration::from_millis(200),
1100			Duration::from_millis(10),
1101			|| async {
1102				!cache.has_key("key1").await.unwrap() && !cache.has_key("key2").await.unwrap()
1103			},
1104		)
1105		.await
1106		.expect("Keys should be auto-cleaned within 200ms");
1107
1108		// Keys should be cleaned up automatically
1109		assert!(!cache.has_key("key1").await.unwrap());
1110		assert!(!cache.has_key("key2").await.unwrap());
1111	}
1112
1113	#[tokio::test]
1114	async fn test_with_auto_cleanup() {
1115		let cache = InMemoryCache::new().with_auto_cleanup(Duration::from_millis(30));
1116
1117		// Set values with short TTL
1118		cache
1119			.set("key1", &"value1", Some(Duration::from_millis(50)))
1120			.await
1121			.unwrap();
1122		cache
1123			.set("key2", &"value2", Some(Duration::from_millis(50)))
1124			.await
1125			.unwrap();
1126
1127		// Keys should exist initially
1128		assert!(cache.has_key("key1").await.unwrap());
1129		assert!(cache.has_key("key2").await.unwrap());
1130
1131		// Poll until auto-cleanup removes expired keys
1132		poll_until(
1133			Duration::from_millis(200),
1134			Duration::from_millis(10),
1135			|| async {
1136				!cache.has_key("key1").await.unwrap() && !cache.has_key("key2").await.unwrap()
1137			},
1138		)
1139		.await
1140		.expect("Keys should be auto-cleaned within 200ms");
1141	}
1142
1143	#[tokio::test]
1144	async fn test_stop_auto_cleanup() {
1145		let cache = InMemoryCache::new();
1146
1147		// Start auto cleanup
1148		cache.start_auto_cleanup(Duration::from_millis(30));
1149
1150		// Set a value with short TTL
1151		cache
1152			.set("key1", &"value1", Some(Duration::from_millis(50)))
1153			.await
1154			.unwrap();
1155
1156		// Stop cleanup before it can run
1157		cache.stop_auto_cleanup();
1158
1159		// Wait long enough for cleanup to have run if it were still active
1160		tokio::time::sleep(Duration::from_millis(150)).await;
1161
1162		// Key should be expired but not cleaned up from store (only passive expiration)
1163		let value: Option<String> = cache.get("key1").await.unwrap();
1164		assert!(value.is_none(), "Key should be expired");
1165	}
1166
1167	#[tokio::test]
1168	async fn test_start_auto_cleanup_replaces_previous() {
1169		let cache = InMemoryCache::new();
1170
1171		// Start cleanup twice - should not spawn duplicate tasks
1172		cache.start_auto_cleanup(Duration::from_millis(30));
1173		cache.start_auto_cleanup(Duration::from_millis(30));
1174
1175		// Set a value with short TTL
1176		cache
1177			.set("key1", &"value1", Some(Duration::from_millis(50)))
1178			.await
1179			.unwrap();
1180
1181		// Wait for cleanup
1182		poll_until(
1183			Duration::from_millis(200),
1184			Duration::from_millis(10),
1185			|| async { !cache.has_key("key1").await.unwrap() },
1186		)
1187		.await
1188		.expect("Key should be cleaned up");
1189	}
1190
1191	#[tokio::test]
1192	async fn test_auto_cleanup_preserves_non_expired() {
1193		let cache = InMemoryCache::new();
1194
1195		// Start auto cleanup
1196		cache.start_auto_cleanup(Duration::from_millis(30));
1197
1198		// Set one key with short TTL and one without
1199		cache
1200			.set("short_lived", &"value1", Some(Duration::from_millis(50)))
1201			.await
1202			.unwrap();
1203		cache.set("long_lived", &"value2", None).await.unwrap();
1204
1205		// Both should exist initially
1206		assert!(cache.has_key("short_lived").await.unwrap());
1207		assert!(cache.has_key("long_lived").await.unwrap());
1208
1209		// Poll until auto-cleanup removes short_lived key
1210		poll_until(
1211			Duration::from_millis(200),
1212			Duration::from_millis(10),
1213			|| async {
1214				!cache.has_key("short_lived").await.unwrap()
1215					&& cache.has_key("long_lived").await.unwrap()
1216			},
1217		)
1218		.await
1219		.expect("Short-lived key should be cleaned, long-lived should remain");
1220	}
1221
1222	// Layered cleanup strategy tests
1223
1224	#[tokio::test]
1225	async fn test_layered_cache_basic() {
1226		let cache = InMemoryCache::with_layered_cleanup();
1227
1228		// Set and get
1229		cache.set("key1", &"value1", None).await.unwrap();
1230		let value: Option<String> = cache.get("key1").await.unwrap();
1231		assert_eq!(value, Some("value1".to_string()));
1232
1233		// Has key
1234		assert!(cache.has_key("key1").await.unwrap());
1235		assert!(!cache.has_key("key2").await.unwrap());
1236
1237		// Delete
1238		cache.delete("key1").await.unwrap();
1239		let value: Option<String> = cache.get("key1").await.unwrap();
1240		assert_eq!(value, None);
1241	}
1242
1243	#[tokio::test]
1244	async fn test_layered_cache_ttl() {
1245		let cache = InMemoryCache::with_layered_cleanup();
1246
1247		// Set with short TTL
1248		cache
1249			.set("key1", &"value1", Some(Duration::from_millis(100)))
1250			.await
1251			.unwrap();
1252
1253		// Should exist immediately
1254		let value: Option<String> = cache.get("key1").await.unwrap();
1255		assert_eq!(value, Some("value1".to_string()));
1256
1257		// Poll until key expires (passive expiration on get)
1258		poll_until(
1259			Duration::from_millis(200),
1260			Duration::from_millis(10),
1261			|| async {
1262				let value: Option<String> = cache.get("key1").await.unwrap();
1263				value.is_none()
1264			},
1265		)
1266		.await
1267		.expect("Key should expire within 200ms");
1268	}
1269
1270	#[tokio::test]
1271	async fn test_layered_cleanup_expired() {
1272		let cache = InMemoryCache::with_layered_cleanup();
1273
1274		// Set some values with different TTLs
1275		cache
1276			.set("key1", &"value1", Some(Duration::from_millis(50)))
1277			.await
1278			.unwrap();
1279		cache.set("key2", &"value2", None).await.unwrap();
1280
1281		// Wait for first key to expire
1282		tokio::time::sleep(Duration::from_millis(100)).await;
1283
1284		// Cleanup expired entries
1285		cache.cleanup_expired().await;
1286
1287		// key1 should be gone, key2 should remain
1288		assert!(!cache.has_key("key1").await.unwrap());
1289		assert!(cache.has_key("key2").await.unwrap());
1290	}
1291
1292	#[tokio::test]
1293	async fn test_layered_cache_statistics() {
1294		let cache = InMemoryCache::with_layered_cleanup();
1295
1296		// Set some values
1297		cache.set("key1", &"value1", None).await.unwrap();
1298		cache.set("key2", &"value2", None).await.unwrap();
1299
1300		let stats = cache.get_statistics().await;
1301		assert_eq!(stats.entry_count, 2);
1302
1303		// Get existing keys (hits)
1304		let _: Option<String> = cache.get("key1").await.unwrap();
1305		let _: Option<String> = cache.get("key2").await.unwrap();
1306
1307		let stats = cache.get_statistics().await;
1308		assert_eq!(stats.hits, 2);
1309		assert_eq!(stats.misses, 0);
1310
1311		// Get non-existing key (miss)
1312		let _: Option<String> = cache.get("key3").await.unwrap();
1313
1314		let stats = cache.get_statistics().await;
1315		assert_eq!(stats.hits, 2);
1316		assert_eq!(stats.misses, 1);
1317	}
1318
1319	#[tokio::test]
1320	async fn test_layered_list_keys() {
1321		let cache = InMemoryCache::with_layered_cleanup();
1322
1323		// Initially empty
1324		let keys = cache.list_keys().await;
1325		assert_eq!(keys.len(), 0);
1326
1327		// Add some keys
1328		cache.set("key1", &"value1", None).await.unwrap();
1329		cache.set("key2", &"value2", None).await.unwrap();
1330		cache.set("key3", &"value3", None).await.unwrap();
1331
1332		let keys = cache.list_keys().await;
1333		assert_eq!(keys.len(), 3);
1334		assert!(keys.contains(&"key1".to_string()));
1335		assert!(keys.contains(&"key2".to_string()));
1336		assert!(keys.contains(&"key3".to_string()));
1337	}
1338
1339	#[tokio::test]
1340	async fn test_layered_inspect_entry() {
1341		let cache = InMemoryCache::with_layered_cleanup();
1342
1343		// Non-existent key
1344		let info = cache.inspect_entry("nonexistent").await;
1345		assert!(info.is_none());
1346
1347		// Add a key with TTL
1348		cache
1349			.set("key1", &"value1", Some(Duration::from_secs(300)))
1350			.await
1351			.unwrap();
1352
1353		let info = cache.inspect_entry("key1").await;
1354		let info = info.unwrap();
1355		assert_eq!(info.key, "key1");
1356		assert!(info.has_expiry);
1357		assert!(info.ttl_seconds.is_some());
1358		assert!(info.ttl_seconds.unwrap() <= 300);
1359	}
1360
1361	#[tokio::test]
1362	async fn test_layered_large_dataset() {
1363		let cache = InMemoryCache::with_layered_cleanup();
1364
1365		// Set many keys with short TTL
1366		let num_keys = 1000;
1367		for i in 0..num_keys {
1368			cache
1369				.set(
1370					&format!("key{}", i),
1371					&format!("value{}", i),
1372					Some(Duration::from_millis(50)),
1373				)
1374				.await
1375				.unwrap();
1376		}
1377
1378		// All keys should exist
1379		let stats = cache.get_statistics().await;
1380		assert_eq!(stats.entry_count, num_keys);
1381
1382		// Wait for expiration
1383		tokio::time::sleep(Duration::from_millis(60)).await;
1384
1385		// Cleanup (should be fast with layered strategy)
1386		cache.cleanup_expired().await;
1387
1388		// All keys should be gone
1389		let stats = cache.get_statistics().await;
1390		assert_eq!(stats.entry_count, 0);
1391	}
1392
1393	#[tokio::test]
1394	async fn test_custom_layered_cleanup() {
1395		// Create cache with custom sampler (sample 50 keys, 30% threshold)
1396		let cache = InMemoryCache::with_custom_layered_cleanup(50, 0.30);
1397
1398		// Set many keys
1399		for i in 0..100 {
1400			cache
1401				.set(
1402					&format!("key{}", i),
1403					&format!("value{}", i),
1404					Some(Duration::from_millis(50)),
1405				)
1406				.await
1407				.unwrap();
1408		}
1409
1410		// Wait for expiration
1411		tokio::time::sleep(Duration::from_millis(100)).await;
1412
1413		// Cleanup with custom sampler
1414		cache.cleanup_expired().await;
1415
1416		// All keys should be cleaned up
1417		let stats = cache.get_statistics().await;
1418		assert_eq!(stats.entry_count, 0);
1419	}
1420}