Skip to main content

crates_docs/cache/
memory.rs

1//!//! Memory cache implementation
2//!
3//! Memory cache using `moka::sync::Cache` with `TinyLFU` eviction policy.
4//! This provides better performance and hit rate than simple LRU.
5
6use std::time::Duration;
7
8/// Cache entry with optional TTL
9#[derive(Clone, Debug)]
10struct CacheEntry {
11    value: String,
12    ttl: Option<Duration>,
13}
14
15/// Expiry implementation for per-entry TTL support
16#[derive(Debug, Clone, Default)]
17struct CacheExpiry;
18
19impl moka::Expiry<String, CacheEntry> for CacheExpiry {
20    fn expire_after_create(
21        &self,
22        _key: &String,
23        value: &CacheEntry,
24        _created_at: std::time::Instant,
25    ) -> Option<Duration> {
26        value.ttl
27    }
28}
29
30/// Memory cache implementation using `moka::sync::Cache`
31///
32/// Features:
33/// - Lock-free concurrent access
34/// - `TinyLFU` eviction policy (better hit rate than LRU)
35/// - Per-entry TTL support via Expiry trait
36/// - Automatic expiration cleanup
37pub struct MemoryCache {
38    cache: moka::sync::Cache<String, CacheEntry>,
39}
40
41impl MemoryCache {
42    /// Create a new memory cache
43    ///
44    /// # Arguments
45    /// * `max_size` - Maximum number of cache entries
46    #[must_use]
47    pub fn new(max_size: usize) -> Self {
48        Self {
49            cache: moka::sync::Cache::builder()
50                .max_capacity(max_size as u64)
51                .expire_after(CacheExpiry)
52                .build(),
53        }
54    }
55}
56
57#[async_trait::async_trait]
58impl super::Cache for MemoryCache {
59    #[tracing::instrument(skip(self), level = "trace")]
60    async fn get(&self, key: &str) -> Option<String> {
61        let result = self.cache.get(key).map(|entry| entry.value.clone());
62        if result.is_some() {
63            tracing::trace!(cache_type = "memory", key = %key, "Cache hit");
64        } else {
65            tracing::trace!(cache_type = "memory", key = %key, "Cache miss");
66        }
67        result
68    }
69
70    #[tracing::instrument(skip(self), level = "trace")]
71    async fn set(
72        &self,
73        key: String,
74        value: String,
75        ttl: Option<Duration>,
76    ) -> crate::error::Result<()> {
77        let entry = CacheEntry { value, ttl };
78        tracing::trace!(cache_type = "memory", key = %key, "Setting cache entry");
79        self.cache.insert(key, entry);
80        Ok(())
81    }
82
83    #[tracing::instrument(skip(self), level = "trace")]
84    async fn delete(&self, key: &str) -> crate::error::Result<()> {
85        tracing::trace!(cache_type = "memory", key = %key, "Deleting cache entry");
86        self.cache.invalidate(key);
87        Ok(())
88    }
89
90    #[tracing::instrument(skip(self), level = "trace")]
91    async fn clear(&self) -> crate::error::Result<()> {
92        tracing::trace!(cache_type = "memory", "Clearing all cache entries");
93        self.cache.invalidate_all();
94        Ok(())
95    }
96
97    #[tracing::instrument(skip(self), level = "trace")]
98    async fn exists(&self, key: &str) -> bool {
99        let result = self.cache.contains_key(key);
100        tracing::trace!(cache_type = "memory", key = %key, exists = result, "Checking cache entry existence");
101        result
102    }
103}
104
105#[cfg(test)]
106mod tests {
107    use super::*;
108    use crate::cache::Cache;
109    use tokio::time::sleep;
110
111    /// Default cache capacity for tests
112    const DEFAULT_TEST_CACHE_CAPACITY: usize = 10;
113
114    /// Test TTL duration in milliseconds
115    const TEST_TTL_MS: u64 = 100;
116
117    /// Test TTL wait duration in milliseconds
118    const TEST_TTL_WAIT_MS: u64 = 150;
119
120    #[tokio::test]
121    async fn test_memory_cache_basic() {
122        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
123
124        // Test set and get
125        cache
126            .set("key1".to_string(), "value1".to_string(), None)
127            .await
128            .expect("set should succeed");
129        assert_eq!(cache.get("key1").await, Some("value1".to_string()));
130
131        // Test delete
132        cache.delete("key1").await.expect("delete should succeed");
133        assert_eq!(cache.get("key1").await, None);
134
135        // Test clear
136        cache
137            .set("key2".to_string(), "value2".to_string(), None)
138            .await
139            .expect("set should succeed");
140        cache.clear().await.expect("clear should succeed");
141        // Wait for async invalidation to complete
142        cache.cache.run_pending_tasks();
143        assert_eq!(cache.get("key2").await, None);
144    }
145
146    #[tokio::test]
147    async fn test_memory_cache_ttl() {
148        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
149
150        // Test cache with TTL
151        cache
152            .set(
153                "key1".to_string(),
154                "value1".to_string(),
155                Some(Duration::from_millis(TEST_TTL_MS)),
156            )
157            .await
158            .expect("set should succeed");
159        assert_eq!(cache.get("key1").await, Some("value1".to_string()));
160
161        // Wait for expiration
162        sleep(Duration::from_millis(TEST_TTL_WAIT_MS)).await;
163        // Run pending tasks to ensure expiration is processed
164        cache.cache.run_pending_tasks();
165        assert_eq!(cache.get("key1").await, None);
166    }
167
168    #[tokio::test]
169    async fn test_memory_cache_eviction() {
170        // Test that cache respects max capacity
171        // Note: moka uses TinyLFU algorithm which may reject new entries
172        // based on frequency, so we test capacity constraint differently
173        let cache = MemoryCache::new(3);
174
175        // Fill cache with more entries than capacity
176        for i in 0..5 {
177            cache
178                .set(format!("key{i}"), format!("value{i}"), None)
179                .await
180                .expect("set should succeed");
181        }
182
183        // Run pending tasks to ensure eviction is processed
184        cache.cache.run_pending_tasks();
185
186        // Cache should not exceed max capacity significantly
187        let entry_count = cache.cache.entry_count();
188        assert!(
189            entry_count <= 5,
190            "Entry count should be at most 5, got {entry_count}"
191        );
192    }
193
194    #[tokio::test]
195    async fn test_memory_cache_exists() {
196        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
197
198        cache
199            .set("key1".to_string(), "value1".to_string(), None)
200            .await
201            .expect("set should succeed");
202        assert!(cache.exists("key1").await);
203        assert!(!cache.exists("key2").await);
204    }
205}