Skip to main content

crates_docs/cache/
memory.rs

1//! Memory cache implementation
2//!
3//! Memory cache using `moka::sync::Cache` with `TinyLFU` eviction policy.
4//! This provides better performance and hit rate than simple LRU.
5
6use std::time::Duration;
7
8/// Cache entry with optional TTL
9#[derive(Clone, Debug)]
10struct CacheEntry {
11    value: String,
12    ttl: Option<Duration>,
13}
14
15/// Expiry implementation for per-entry TTL support
16#[derive(Debug, Clone, Default)]
17struct CacheExpiry;
18
19impl moka::Expiry<String, CacheEntry> for CacheExpiry {
20    fn expire_after_create(
21        &self,
22        _key: &String,
23        value: &CacheEntry,
24        _created_at: std::time::Instant,
25    ) -> Option<Duration> {
26        value.ttl
27    }
28}
29
30/// Memory cache implementation using `moka::sync::Cache`
31///
32/// Features:
33/// - Lock-free concurrent access
34/// - `TinyLFU` eviction policy (better hit rate than LRU)
35/// - Per-entry TTL support via Expiry trait
36/// - Automatic expiration cleanup
37pub struct MemoryCache {
38    cache: moka::sync::Cache<String, CacheEntry>,
39}
40
41impl MemoryCache {
42    /// Create a new memory cache
43    ///
44    /// # Arguments
45    /// * `max_size` - Maximum number of cache entries
46    #[must_use]
47    pub fn new(max_size: usize) -> Self {
48        Self {
49            cache: moka::sync::Cache::builder()
50                .max_capacity(max_size as u64)
51                .expire_after(CacheExpiry)
52                .build(),
53        }
54    }
55}
56
57#[async_trait::async_trait]
58impl super::Cache for MemoryCache {
59    async fn get(&self, key: &str) -> Option<String> {
60        self.cache.get(key).map(|entry| entry.value.clone())
61    }
62
63    async fn set(
64        &self,
65        key: String,
66        value: String,
67        ttl: Option<Duration>,
68    ) -> crate::error::Result<()> {
69        let entry = CacheEntry { value, ttl };
70        self.cache.insert(key, entry);
71        Ok(())
72    }
73
74    async fn delete(&self, key: &str) -> crate::error::Result<()> {
75        self.cache.invalidate(key);
76        Ok(())
77    }
78
79    async fn clear(&self) -> crate::error::Result<()> {
80        self.cache.invalidate_all();
81        Ok(())
82    }
83
84    async fn exists(&self, key: &str) -> bool {
85        self.cache.contains_key(key)
86    }
87}
88
89#[cfg(test)]
90mod tests {
91    use super::*;
92    use crate::cache::Cache;
93    use tokio::time::sleep;
94
95    #[tokio::test]
96    async fn test_memory_cache_basic() {
97        let cache = MemoryCache::new(10);
98
99        // Test set and get
100        cache
101            .set("key1".to_string(), "value1".to_string(), None)
102            .await
103            .expect("set should succeed");
104        assert_eq!(cache.get("key1").await, Some("value1".to_string()));
105
106        // Test delete
107        cache.delete("key1").await.expect("delete should succeed");
108        assert_eq!(cache.get("key1").await, None);
109
110        // Test clear
111        cache
112            .set("key2".to_string(), "value2".to_string(), None)
113            .await
114            .expect("set should succeed");
115        cache.clear().await.expect("clear should succeed");
116        // Wait for async invalidation to complete
117        cache.cache.run_pending_tasks();
118        assert_eq!(cache.get("key2").await, None);
119    }
120
121    #[tokio::test]
122    async fn test_memory_cache_ttl() {
123        let cache = MemoryCache::new(10);
124
125        // Test cache with TTL
126        cache
127            .set(
128                "key1".to_string(),
129                "value1".to_string(),
130                Some(Duration::from_millis(100)),
131            )
132            .await
133            .expect("set should succeed");
134        assert_eq!(cache.get("key1").await, Some("value1".to_string()));
135
136        // Wait for expiration
137        sleep(Duration::from_millis(150)).await;
138        // Run pending tasks to ensure expiration is processed
139        cache.cache.run_pending_tasks();
140        assert_eq!(cache.get("key1").await, None);
141    }
142
143    #[tokio::test]
144    async fn test_memory_cache_eviction() {
145        // Test that cache respects max capacity
146        // Note: moka uses TinyLFU algorithm which may reject new entries
147        // based on frequency, so we test capacity constraint differently
148        let cache = MemoryCache::new(3);
149
150        // Fill cache with more entries than capacity
151        for i in 0..5 {
152            cache
153                .set(format!("key{i}"), format!("value{i}"), None)
154                .await
155                .expect("set should succeed");
156        }
157
158        // Run pending tasks to ensure eviction is processed
159        cache.cache.run_pending_tasks();
160
161        // Cache should not exceed max capacity significantly
162        let entry_count = cache.cache.entry_count();
163        assert!(
164            entry_count <= 5,
165            "Entry count should be at most 5, got {entry_count}"
166        );
167    }
168
169    #[tokio::test]
170    async fn test_memory_cache_exists() {
171        let cache = MemoryCache::new(10);
172
173        cache
174            .set("key1".to_string(), "value1".to_string(), None)
175            .await
176            .expect("set should succeed");
177        assert!(cache.exists("key1").await);
178        assert!(!cache.exists("key2").await);
179    }
180}