multi_tier_cache/backends/
moka_cache.rs

1//! Moka Cache - In-Memory Cache Backend
2//!
3//! High-performance in-memory cache using Moka for hot data storage.
4
5use std::sync::Arc;
6use std::time::{Duration, Instant};
7use anyhow::Result;
8use serde_json;
9use moka::future::Cache;
10use std::sync::atomic::{AtomicU64, Ordering};
11
12/// Cache entry with TTL information
13#[derive(Debug, Clone)]
14struct CacheEntry {
15    value: serde_json::Value,
16    expires_at: Instant,
17}
18
19impl CacheEntry {
20    fn new(value: serde_json::Value, ttl: Duration) -> Self {
21        Self {
22            value,
23            expires_at: Instant::now() + ttl,
24        }
25    }
26
27    fn is_expired(&self) -> bool {
28        Instant::now() > self.expires_at
29    }
30}
31
32/// Moka in-memory cache with per-key TTL support
33///
34/// This is the default L1 (hot tier) cache backend, providing:
35/// - Fast in-memory access (< 1ms latency)
36/// - Automatic eviction via LRU
37/// - Per-key TTL support
38/// - Statistics tracking
39pub struct MokaCache {
40    /// Moka cache instance
41    cache: Cache<String, CacheEntry>,
42    /// Hit counter
43    hits: Arc<AtomicU64>,
44    /// Miss counter
45    misses: Arc<AtomicU64>,
46    /// Set counter
47    sets: Arc<AtomicU64>,
48    /// Coalesced requests counter (requests that waited for an ongoing computation)
49    #[allow(dead_code)]
50    coalesced_requests: Arc<AtomicU64>,
51}
52
53impl MokaCache {
54    /// Create new Moka cache
55    pub async fn new() -> Result<Self> {
56        println!("  🚀 Initializing Moka Cache...");
57
58        let cache = Cache::builder()
59            .max_capacity(2000) // 2000 entries max
60            .time_to_live(Duration::from_secs(3600)) // 1 hour max TTL as safety net
61            .time_to_idle(Duration::from_secs(120)) // 2 minutes idle time
62            .build();
63
64        println!("  ✅ Moka Cache initialized with 2000 capacity, per-key TTL support");
65
66        Ok(Self {
67            cache,
68            hits: Arc::new(AtomicU64::new(0)),
69            misses: Arc::new(AtomicU64::new(0)),
70            sets: Arc::new(AtomicU64::new(0)),
71            coalesced_requests: Arc::new(AtomicU64::new(0)),
72        })
73    }
74
75    /// Get value from Moka cache
76    pub async fn get(&self, key: &str) -> Option<serde_json::Value> {
77        match self.cache.get(key).await {
78            Some(entry) => {
79                if entry.is_expired() {
80                    // Remove expired entry
81                    let _ = self.cache.remove(key).await;
82                    self.misses.fetch_add(1, Ordering::Relaxed);
83                    None
84                } else {
85                    self.hits.fetch_add(1, Ordering::Relaxed);
86                    Some(entry.value)
87                }
88            }
89            None => {
90                self.misses.fetch_add(1, Ordering::Relaxed);
91                None
92            }
93        }
94    }
95
96    /// Set value with custom TTL
97    pub async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
98        let entry = CacheEntry::new(value, ttl);
99        self.cache.insert(key.to_string(), entry).await;
100        self.sets.fetch_add(1, Ordering::Relaxed);
101        println!("💾 [Moka] Cached '{}' with TTL {:?}", key, ttl);
102        Ok(())
103    }
104
105    /// Remove value from cache
106    pub async fn remove(&self, key: &str) -> Result<()> {
107        self.cache.remove(key).await;
108        Ok(())
109    }
110
111    /// Health check
112    pub async fn health_check(&self) -> bool {
113        // Test basic functionality with custom TTL
114        let test_key = "health_check_moka";
115        let test_value = serde_json::json!({"test": true});
116
117        match self.set_with_ttl(test_key, test_value.clone(), Duration::from_secs(60)).await {
118            Ok(_) => {
119                match self.get(test_key).await {
120                    Some(retrieved) => {
121                        let _ = self.remove(test_key).await;
122                        retrieved == test_value
123                    }
124                    None => false
125                }
126            }
127            Err(_) => false
128        }
129    }
130}
131
132// ===== Trait Implementations =====
133
134use crate::traits::CacheBackend;
135use async_trait::async_trait;
136
137/// Implement CacheBackend trait for MokaCache
138///
139/// This allows MokaCache to be used as a pluggable backend in the multi-tier cache system.
140#[async_trait]
141impl CacheBackend for MokaCache {
142    async fn get(&self, key: &str) -> Option<serde_json::Value> {
143        MokaCache::get(self, key).await
144    }
145
146    async fn set_with_ttl(
147        &self,
148        key: &str,
149        value: serde_json::Value,
150        ttl: Duration,
151    ) -> Result<()> {
152        MokaCache::set_with_ttl(self, key, value, ttl).await
153    }
154
155    async fn remove(&self, key: &str) -> Result<()> {
156        MokaCache::remove(self, key).await
157    }
158
159    async fn health_check(&self) -> bool {
160        MokaCache::health_check(self).await
161    }
162
163    fn name(&self) -> &str {
164        "Moka"
165    }
166}
167
168/// Cache statistics
169#[allow(dead_code)]
170#[derive(Debug, Clone)]
171pub struct CacheStats {
172    pub hits: u64,
173    pub misses: u64,
174    pub sets: u64,
175    pub coalesced_requests: u64,
176    pub size: u64,
177}