nt_memory/cache/
hot.rs

1//! High-performance hot cache using DashMap
2//!
3//! Lock-free concurrent hashmap with LRU eviction
4
5use dashmap::DashMap;
6use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
7use std::sync::Arc;
8use std::time::{Duration, Instant};
9
10/// Cache configuration
11#[derive(Debug, Clone)]
12pub struct CacheConfig {
13    /// Maximum number of entries
14    pub max_entries: usize,
15
16    /// Entry time-to-live
17    pub ttl: Duration,
18
19    /// Enable access time tracking
20    pub track_access: bool,
21}
22
23impl Default for CacheConfig {
24    fn default() -> Self {
25        Self {
26            max_entries: 100_000,
27            ttl: Duration::from_secs(3600), // 1 hour
28            track_access: true,
29        }
30    }
31}
32
33/// Cache entry with metadata
34#[derive(Debug, Clone)]
35pub struct CacheEntry {
36    /// Entry data
37    pub data: Vec<u8>,
38
39    /// Creation timestamp
40    pub created_at: Instant,
41
42    /// Last access timestamp
43    pub accessed_at: Instant,
44
45    /// Access count
46    pub access_count: u32,
47}
48
49impl CacheEntry {
50    fn new(data: Vec<u8>) -> Self {
51        let now = Instant::now();
52        Self {
53            data,
54            created_at: now,
55            accessed_at: now,
56            access_count: 1,
57        }
58    }
59
60    fn access(&mut self) {
61        self.accessed_at = Instant::now();
62        self.access_count = self.access_count.saturating_add(1);
63    }
64
65    fn is_expired(&self, ttl: Duration) -> bool {
66        self.created_at.elapsed() > ttl
67    }
68}
69
70/// High-performance hot cache
71pub struct HotCache {
72    /// Lock-free concurrent map
73    map: Arc<DashMap<String, CacheEntry>>,
74
75    /// Configuration
76    config: CacheConfig,
77
78    /// Statistics
79    hits: AtomicU64,
80    misses: AtomicU64,
81    evictions: AtomicU64,
82    size: AtomicUsize,
83}
84
85impl HotCache {
86    /// Create new hot cache
87    pub fn new(config: CacheConfig) -> Self {
88        Self {
89            map: Arc::new(DashMap::with_capacity(config.max_entries)),
90            config,
91            hits: AtomicU64::new(0),
92            misses: AtomicU64::new(0),
93            evictions: AtomicU64::new(0),
94            size: AtomicUsize::new(0),
95        }
96    }
97
98    /// Get entry from cache (<1μs)
99    pub fn get(&self, key: &str) -> Option<CacheEntry> {
100        if let Some(mut entry) = self.map.get_mut(key) {
101            // Check expiration
102            if entry.is_expired(self.config.ttl) {
103                drop(entry); // Release lock before removal
104                self.map.remove(key);
105                self.misses.fetch_add(1, Ordering::Relaxed);
106                self.size.fetch_sub(1, Ordering::Relaxed);
107                return None;
108            }
109
110            // Update access metadata
111            if self.config.track_access {
112                entry.access();
113            }
114
115            self.hits.fetch_add(1, Ordering::Relaxed);
116            Some(entry.clone())
117        } else {
118            self.misses.fetch_add(1, Ordering::Relaxed);
119            None
120        }
121    }
122
123    /// Insert entry into cache (<2μs)
124    pub fn insert(&self, key: &str, data: Vec<u8>) {
125        // Check if we need to evict
126        if self.size.load(Ordering::Relaxed) >= self.config.max_entries {
127            self.evict_lru();
128        }
129
130        let entry = CacheEntry::new(data);
131
132        if self.map.insert(key.to_string(), entry).is_none() {
133            self.size.fetch_add(1, Ordering::Relaxed);
134        }
135    }
136
137    /// Remove entry from cache
138    pub fn remove(&self, key: &str) -> Option<CacheEntry> {
139        let result = self.map.remove(key).map(|(_, v)| v);
140        if result.is_some() {
141            self.size.fetch_sub(1, Ordering::Relaxed);
142        }
143        result
144    }
145
146    /// Clear all entries
147    pub fn clear(&self) {
148        self.map.clear();
149        self.size.store(0, Ordering::Relaxed);
150        self.evictions.fetch_add(
151            self.size.load(Ordering::Relaxed) as u64,
152            Ordering::Relaxed,
153        );
154    }
155
156    /// Number of entries
157    pub fn len(&self) -> usize {
158        self.size.load(Ordering::Relaxed)
159    }
160
161    /// Check if cache is empty
162    pub fn is_empty(&self) -> bool {
163        self.len() == 0
164    }
165
166    /// Cache hit rate
167    pub fn hit_rate(&self) -> f64 {
168        let hits = self.hits.load(Ordering::Relaxed);
169        let misses = self.misses.load(Ordering::Relaxed);
170        let total = hits + misses;
171
172        if total == 0 {
173            0.0
174        } else {
175            hits as f64 / total as f64
176        }
177    }
178
179    /// Evict least recently used entry
180    fn evict_lru(&self) {
181        // Find oldest accessed entry
182        let mut oldest_key: Option<String> = None;
183        let mut oldest_time = Instant::now();
184
185        for entry in self.map.iter() {
186            if entry.accessed_at < oldest_time {
187                oldest_time = entry.accessed_at;
188                oldest_key = Some(entry.key().clone());
189            }
190        }
191
192        // Evict if found
193        if let Some(key) = oldest_key {
194            self.map.remove(&key);
195            self.evictions.fetch_add(1, Ordering::Relaxed);
196            self.size.fetch_sub(1, Ordering::Relaxed);
197        }
198    }
199
200    /// Get cache statistics
201    pub fn stats(&self) -> CacheStats {
202        CacheStats {
203            entries: self.len(),
204            hits: self.hits.load(Ordering::Relaxed),
205            misses: self.misses.load(Ordering::Relaxed),
206            evictions: self.evictions.load(Ordering::Relaxed),
207            hit_rate: self.hit_rate(),
208        }
209    }
210}
211
212/// Cache statistics
213#[derive(Debug, Clone)]
214pub struct CacheStats {
215    pub entries: usize,
216    pub hits: u64,
217    pub misses: u64,
218    pub evictions: u64,
219    pub hit_rate: f64,
220}
221
222#[cfg(test)]
223mod tests {
224    use super::*;
225
226    #[test]
227    fn test_cache_basic_operations() {
228        let cache = HotCache::new(CacheConfig::default());
229
230        // Insert
231        cache.insert("key1", vec![1, 2, 3]);
232        assert_eq!(cache.len(), 1);
233
234        // Get
235        let entry = cache.get("key1");
236        assert!(entry.is_some());
237        assert_eq!(entry.unwrap().data, vec![1, 2, 3]);
238
239        // Remove
240        let removed = cache.remove("key1");
241        assert!(removed.is_some());
242        assert_eq!(cache.len(), 0);
243    }
244
245    #[test]
246    fn test_cache_expiration() {
247        let _config = CacheConfig {
248            ttl: Duration::from_millis(100),
249            ..Default::default()
250        };
251        let cache = HotCache::new(config);
252
253        cache.insert("key1", vec![1, 2, 3]);
254        assert!(cache.get("key1").is_some());
255
256        // Wait for expiration
257        std::thread::sleep(Duration::from_millis(150));
258
259        assert!(cache.get("key1").is_none());
260    }
261
262    #[test]
263    fn test_cache_lru_eviction() {
264        let _config = CacheConfig {
265            max_entries: 2,
266            ..Default::default()
267        };
268        let cache = HotCache::new(config);
269
270        cache.insert("key1", vec![1]);
271        cache.insert("key2", vec![2]);
272
273        // Access key1 to make it more recent
274        cache.get("key1");
275
276        // Insert key3, should evict key2
277        cache.insert("key3", vec![3]);
278
279        assert!(cache.get("key1").is_some());
280        assert!(cache.get("key2").is_none());
281        assert!(cache.get("key3").is_some());
282    }
283
284    #[test]
285    fn test_cache_concurrent_access() {
286        use std::sync::Arc;
287        use std::thread;
288
289        let cache = Arc::new(HotCache::new(CacheConfig::default()));
290        let mut handles = vec![];
291
292        // Spawn 10 threads doing concurrent operations
293        for i in 0..10 {
294            let cache = cache.clone();
295            let handle = thread::spawn(move || {
296                for j in 0..100 {
297                    let key = format!("key_{}_{}", i, j);
298                    cache.insert(&key, vec![i as u8, j as u8]);
299                    cache.get(&key);
300                }
301            });
302            handles.push(handle);
303        }
304
305        // Wait for all threads
306        for handle in handles {
307            handle.join().unwrap();
308        }
309
310        // Verify no data corruption
311        assert!(cache.len() > 0);
312        assert!(cache.hit_rate() > 0.0);
313    }
314
315    #[test]
316    fn test_cache_hit_rate() {
317        let cache = HotCache::new(CacheConfig::default());
318
319        cache.insert("key1", vec![1]);
320
321        // Generate hits
322        for _ in 0..9 {
323            cache.get("key1");
324        }
325
326        // Generate miss
327        cache.get("key2");
328
329        // Hit rate should be 90%
330        let hit_rate = cache.hit_rate();
331        assert!((hit_rate - 0.9).abs() < 0.01);
332    }
333}