ai_lib_rust/cache/
manager.rs1use serde::{de::DeserializeOwned, Serialize};
4use std::sync::atomic::{AtomicU64, Ordering};
5use std::sync::Arc;
6use std::time::Duration;
7use super::backend::CacheBackend;
8use super::key::CacheKey;
9use crate::Result;
10
11#[derive(Debug, Clone)]
12pub struct CacheConfig { pub default_ttl: Duration, pub enabled: bool, pub max_entry_size: usize, pub key_prefix: Option<String> }
13
14impl Default for CacheConfig {
15 fn default() -> Self { Self { default_ttl: Duration::from_secs(3600), enabled: true, max_entry_size: 10 * 1024 * 1024, key_prefix: None } }
16}
17
18impl CacheConfig {
19 pub fn new() -> Self { Self::default() }
20 pub fn with_ttl(mut self, ttl: Duration) -> Self { self.default_ttl = ttl; self }
21 pub fn with_enabled(mut self, enabled: bool) -> Self { self.enabled = enabled; self }
22 pub fn with_key_prefix(mut self, prefix: impl Into<String>) -> Self { self.key_prefix = Some(prefix.into()); self }
23}
24
25#[derive(Debug, Clone, Default)]
26pub struct CacheStats { pub hits: u64, pub misses: u64, pub sets: u64, pub deletes: u64, pub errors: u64 }
27
28impl CacheStats {
29 pub fn hit_ratio(&self) -> f64 { let total = self.hits + self.misses; if total == 0 { 0.0 } else { self.hits as f64 / total as f64 } }
30}
31
32struct AtomicStats { hits: AtomicU64, misses: AtomicU64, sets: AtomicU64, deletes: AtomicU64, errors: AtomicU64 }
33impl AtomicStats {
34 fn new() -> Self { Self { hits: AtomicU64::new(0), misses: AtomicU64::new(0), sets: AtomicU64::new(0), deletes: AtomicU64::new(0), errors: AtomicU64::new(0) } }
35 fn to_stats(&self) -> CacheStats { CacheStats { hits: self.hits.load(Ordering::Relaxed), misses: self.misses.load(Ordering::Relaxed), sets: self.sets.load(Ordering::Relaxed), deletes: self.deletes.load(Ordering::Relaxed), errors: self.errors.load(Ordering::Relaxed) } }
36}
37
38pub struct CacheManager { config: CacheConfig, backend: Box<dyn CacheBackend>, stats: Arc<AtomicStats> }
39
40impl CacheManager {
41 pub fn new(config: CacheConfig, backend: Box<dyn CacheBackend>) -> Self { Self { config, backend, stats: Arc::new(AtomicStats::new()) } }
42
43 pub async fn get<T: DeserializeOwned>(&self, key: &CacheKey) -> Result<Option<T>> {
44 if !self.config.enabled { return Ok(None); }
45 let prefixed = self.prefix_key(key);
46 match self.backend.get(&prefixed).await {
47 Ok(Some(data)) => {
48 self.stats.hits.fetch_add(1, Ordering::Relaxed);
49 match serde_json::from_slice(&data) {
50 Ok(val) => Ok(Some(val)),
51 Err(_) => { self.stats.errors.fetch_add(1, Ordering::Relaxed); Ok(None) }
52 }
53 }
54 Ok(None) => { self.stats.misses.fetch_add(1, Ordering::Relaxed); Ok(None) }
55 Err(e) => { self.stats.errors.fetch_add(1, Ordering::Relaxed); Err(e) }
56 }
57 }
58
59 pub async fn set<T: Serialize>(&self, key: &CacheKey, value: &T) -> Result<()> { self.set_with_ttl(key, value, self.config.default_ttl).await }
60
61 pub async fn set_with_ttl<T: Serialize>(&self, key: &CacheKey, value: &T, ttl: Duration) -> Result<()> {
62 if !self.config.enabled { return Ok(()); }
63 let data = serde_json::to_vec(value)?;
64 if data.len() > self.config.max_entry_size { return Ok(()); }
65 let prefixed = self.prefix_key(key);
66 match self.backend.set(&prefixed, &data, ttl).await { Ok(()) => { self.stats.sets.fetch_add(1, Ordering::Relaxed); Ok(()) } Err(e) => { self.stats.errors.fetch_add(1, Ordering::Relaxed); Err(e) } }
67 }
68
69 pub async fn delete(&self, key: &CacheKey) -> Result<bool> {
70 if !self.config.enabled { return Ok(false); }
71 let prefixed = self.prefix_key(key);
72 match self.backend.delete(&prefixed).await { Ok(d) => { if d { self.stats.deletes.fetch_add(1, Ordering::Relaxed); } Ok(d) } Err(e) => { self.stats.errors.fetch_add(1, Ordering::Relaxed); Err(e) } }
73 }
74
75 pub fn stats(&self) -> CacheStats { self.stats.to_stats() }
76 pub fn backend_name(&self) -> &'static str { self.backend.name() }
77
78 fn prefix_key(&self, key: &CacheKey) -> CacheKey {
79 if let Some(ref p) = self.config.key_prefix { CacheKey::new(format!("{}:{}", p, key.hash)) } else { key.clone() }
80 }
81}