Skip to main content

crates_docs/cache/
memory.rs

1//! Memory cache implementation
2//!
3//! Memory cache using `moka::sync::Cache` with `TinyLFU` eviction policy.
4//! This provides better performance and hit rate than simple LRU.
5
6use std::sync::Arc;
7use std::time::Duration;
8
9/// Cache entry with optional TTL
10#[derive(Clone, Debug)]
11struct CacheEntry {
12    value: Arc<str>,
13    ttl: Option<Duration>,
14}
15
16/// Expiry implementation for per-entry TTL support
17#[derive(Debug, Clone, Default)]
18struct CacheExpiry;
19
20impl moka::Expiry<String, CacheEntry> for CacheExpiry {
21    fn expire_after_create(
22        &self,
23        _key: &String,
24        value: &CacheEntry,
25        _created_at: std::time::Instant,
26    ) -> Option<Duration> {
27        value.ttl
28    }
29}
30
31/// Memory cache implementation using `moka::sync::Cache`
32///
33/// Features:
34/// - Lock-free concurrent access
35/// - `TinyLFU` eviction policy (better hit rate than LRU)
36/// - Per-entry TTL support via Expiry trait
37/// - Automatic expiration cleanup
38pub struct MemoryCache {
39    cache: moka::sync::Cache<String, CacheEntry>,
40}
41
42impl MemoryCache {
43    /// Create a new memory cache
44    ///
45    /// # Arguments
46    /// * `max_size` - Maximum number of cache entries
47    #[must_use]
48    pub fn new(max_size: usize) -> Self {
49        Self {
50            cache: moka::sync::Cache::builder()
51                .max_capacity(max_size as u64)
52                .expire_after(CacheExpiry)
53                .build(),
54        }
55    }
56
57    /// Run pending maintenance tasks on the cache.
58    /// This is primarily used in tests to ensure TTL expiration is processed.
59    ///
60    /// # Note
61    /// This method is only available in test builds via `#[cfg(test)]`.
62    #[cfg(test)]
63    pub fn run_pending_tasks(&self) {
64        self.cache.run_pending_tasks();
65    }
66
67    /// Get the number of entries in the cache.
68    /// This is primarily used in tests to verify cache state.
69    ///
70    /// # Note
71    /// This method is only available in test builds via `#[cfg(test)]`.
72    #[cfg(test)]
73    #[must_use]
74    pub fn entry_count(&self) -> usize {
75        usize::try_from(self.cache.entry_count()).expect("cache entry count should fit in usize")
76    }
77}
78
79#[async_trait::async_trait]
80impl super::Cache for MemoryCache {
81    #[tracing::instrument(skip(self), level = "trace")]
82    async fn get(&self, key: &str) -> Option<Arc<str>> {
83        let result = self.cache.get(key).map(|entry| Arc::clone(&entry.value));
84        if result.is_some() {
85            tracing::trace!(cache_type = "memory", key = %key, "Cache hit");
86        } else {
87            tracing::trace!(cache_type = "memory", key = %key, "Cache miss");
88        }
89        result
90    }
91
92    #[tracing::instrument(skip(self), level = "trace")]
93    async fn set(
94        &self,
95        key: String,
96        value: String,
97        ttl: Option<Duration>,
98    ) -> crate::error::Result<()> {
99        let entry = CacheEntry {
100            value: Arc::from(value.into_boxed_str()),
101            ttl,
102        };
103        tracing::trace!(cache_type = "memory", key = %key, "Setting cache entry");
104        self.cache.insert(key, entry);
105        Ok(())
106    }
107
108    #[tracing::instrument(skip(self), level = "trace")]
109    async fn delete(&self, key: &str) -> crate::error::Result<()> {
110        tracing::trace!(cache_type = "memory", key = %key, "Deleting cache entry");
111        self.cache.invalidate(key);
112        Ok(())
113    }
114
115    #[tracing::instrument(skip(self), level = "trace")]
116    async fn clear(&self) -> crate::error::Result<()> {
117        tracing::trace!(cache_type = "memory", "Clearing all cache entries");
118        self.cache.invalidate_all();
119        Ok(())
120    }
121
122    #[tracing::instrument(skip(self), level = "trace")]
123    async fn exists(&self, key: &str) -> bool {
124        let result = self.cache.contains_key(key);
125        tracing::trace!(cache_type = "memory", key = %key, exists = result, "Checking cache entry existence");
126        result
127    }
128}
129
130#[cfg(test)]
131mod tests {
132    use super::*;
133    use crate::cache::Cache;
134    use tokio::time::sleep;
135
136    /// Default cache capacity for tests
137    const DEFAULT_TEST_CACHE_CAPACITY: usize = 10;
138
139    /// Test TTL duration in milliseconds
140    const TEST_TTL_MS: u64 = 100;
141
142    /// Test TTL wait duration in milliseconds
143    const TEST_TTL_WAIT_MS: u64 = 150;
144
145    #[tokio::test]
146    async fn test_memory_cache_basic() {
147        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
148
149        // Test set and get
150        cache
151            .set("key1".to_string(), "value1".to_string(), None)
152            .await
153            .expect("set should succeed");
154        let result = cache.get("key1").await;
155        assert!(result.is_some());
156        assert_eq!(result.unwrap().as_ref(), "value1");
157
158        // Test delete
159        cache.delete("key1").await.expect("delete should succeed");
160        assert_eq!(cache.get("key1").await, None);
161
162        // Test clear
163        cache
164            .set("key2".to_string(), "value2".to_string(), None)
165            .await
166            .expect("set should succeed");
167        cache.clear().await.expect("clear should succeed");
168        // Wait for async invalidation to complete
169        cache.run_pending_tasks();
170        assert_eq!(cache.get("key2").await, None);
171    }
172
173    #[tokio::test]
174    async fn test_memory_cache_ttl() {
175        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
176
177        // Test cache with TTL
178        cache
179            .set(
180                "key1".to_string(),
181                "value1".to_string(),
182                Some(Duration::from_millis(TEST_TTL_MS)),
183            )
184            .await
185            .expect("set should succeed");
186        let result = cache.get("key1").await;
187        assert!(result.is_some());
188        assert_eq!(result.unwrap().as_ref(), "value1");
189
190        // Wait for expiration
191        sleep(Duration::from_millis(TEST_TTL_WAIT_MS)).await;
192        // Run pending tasks to ensure expiration is processed
193        cache.run_pending_tasks();
194        assert_eq!(cache.get("key1").await, None);
195    }
196
197    #[tokio::test]
198    async fn test_memory_cache_eviction() {
199        // Test that cache respects max capacity
200        // Note: moka uses TinyLFU algorithm which may reject new entries
201        // based on frequency, so we test capacity constraint differently
202        let cache = MemoryCache::new(3);
203
204        // Fill cache with more entries than capacity
205        for i in 0..5 {
206            cache
207                .set(format!("key{i}"), format!("value{i}"), None)
208                .await
209                .expect("set should succeed");
210        }
211
212        // Run pending tasks to ensure eviction is processed
213        cache.run_pending_tasks();
214
215        // Cache should not exceed max capacity significantly
216        let entry_count = cache.entry_count();
217        assert!(
218            entry_count <= 5,
219            "Entry count should be at most 5, got {entry_count}"
220        );
221    }
222
223    #[tokio::test]
224    async fn test_memory_cache_exists() {
225        let cache = MemoryCache::new(DEFAULT_TEST_CACHE_CAPACITY);
226
227        cache
228            .set("key1".to_string(), "value1".to_string(), None)
229            .await
230            .expect("set should succeed");
231        assert!(cache.exists("key1").await);
232        assert!(!cache.exists("key2").await);
233    }
234}