multi_tier_cache/
l1_cache.rs

1//! L1 Cache - Moka In-Memory Cache
2//! 
3//! High-performance in-memory cache using Moka for hot data storage.
4
5use std::sync::Arc;
6use std::time::{Duration, Instant};
7use anyhow::Result;
8use serde_json;
9use moka::future::Cache;
10use std::sync::atomic::{AtomicU64, Ordering};
11
12/// Cache entry with TTL information
13#[derive(Debug, Clone)]
14struct CacheEntry {
15    value: serde_json::Value,
16    expires_at: Instant,
17}
18
19impl CacheEntry {
20    fn new(value: serde_json::Value, ttl: Duration) -> Self {
21        Self {
22            value,
23            expires_at: Instant::now() + ttl,
24        }
25    }
26    
27    fn is_expired(&self) -> bool {
28        Instant::now() > self.expires_at
29    }
30}
31
32/// L1 Cache using Moka with per-key TTL support
33pub struct L1Cache {
34    /// Moka cache instance
35    cache: Cache<String, CacheEntry>,
36    /// Hit counter
37    hits: Arc<AtomicU64>,
38    /// Miss counter  
39    misses: Arc<AtomicU64>,
40    /// Set counter
41    sets: Arc<AtomicU64>,
42    /// Coalesced requests counter (requests that waited for an ongoing computation)
43    #[allow(dead_code)]
44    coalesced_requests: Arc<AtomicU64>,
45}
46
47impl L1Cache {
48    /// Create new L1 cache
49    pub async fn new() -> Result<Self> {
50        println!("  🚀 Initializing L1 Cache (Moka)...");
51        
52        let cache = Cache::builder()
53            .max_capacity(2000) // 2000 entries max
54            .time_to_live(Duration::from_secs(3600)) // 1 hour max TTL as safety net
55            .time_to_idle(Duration::from_secs(120)) // 2 minutes idle time
56            .build();
57            
58        println!("  ✅ L1 Cache initialized with 2000 capacity, per-key TTL support");
59        
60        Ok(Self {
61            cache,
62            hits: Arc::new(AtomicU64::new(0)),
63            misses: Arc::new(AtomicU64::new(0)),
64            sets: Arc::new(AtomicU64::new(0)),
65            coalesced_requests: Arc::new(AtomicU64::new(0)),
66        })
67    }
68    
69    /// Get value from L1 cache
70    pub async fn get(&self, key: &str) -> Option<serde_json::Value> {
71        match self.cache.get(key).await {
72            Some(entry) => {
73                if entry.is_expired() {
74                    // Remove expired entry
75                    let _ = self.cache.remove(key).await;
76                    self.misses.fetch_add(1, Ordering::Relaxed);
77                    None
78                } else {
79                    self.hits.fetch_add(1, Ordering::Relaxed);
80                    Some(entry.value)
81                }
82            }
83            None => {
84                self.misses.fetch_add(1, Ordering::Relaxed);
85                None
86            }
87        }
88    }
89    
90    /// Set value with custom TTL (same as set method now)
91    pub async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
92        let entry = CacheEntry::new(value, ttl);
93        self.cache.insert(key.to_string(), entry).await;
94        self.sets.fetch_add(1, Ordering::Relaxed);
95        println!("💾 [L1] Cached '{}' with TTL {:?}", key, ttl);
96        Ok(())
97    }
98    
99    /// Remove value from cache
100    pub async fn remove(&self, key: &str) -> Result<()> {
101        self.cache.remove(key).await;
102        Ok(())
103    }
104    
105    /// Health check
106    pub async fn health_check(&self) -> bool {
107        // Test basic functionality with custom TTL
108        let test_key = "health_check_l1";
109        let test_value = serde_json::json!({"test": true});
110        
111        match self.set_with_ttl(test_key, test_value.clone(), Duration::from_secs(60)).await {
112            Ok(_) => {
113                match self.get(test_key).await {
114                    Some(retrieved) => {
115                        let _ = self.remove(test_key).await;
116                        retrieved == test_value
117                    }
118                    None => false
119                }
120            }
121            Err(_) => false
122        }
123    }
124
125    
126
127
128}
129
130/// Cache statistics
131#[allow(dead_code)]
132#[derive(Debug, Clone)]
133pub struct CacheStats {
134    pub hits: u64,
135    pub misses: u64,
136    pub sets: u64,
137    pub coalesced_requests: u64,
138    pub size: u64,
139}