ai_lib_rust/cache/
manager.rs1use super::backend::CacheBackend;
4use super::key::CacheKey;
5use crate::Result;
6use serde::{de::DeserializeOwned, Serialize};
7use std::sync::atomic::{AtomicU64, Ordering};
8use std::sync::Arc;
9use std::time::Duration;
10
11#[derive(Debug, Clone)]
12pub struct CacheConfig {
13 pub default_ttl: Duration,
14 pub enabled: bool,
15 pub max_entry_size: usize,
16 pub key_prefix: Option<String>,
17}
18
19impl Default for CacheConfig {
20 fn default() -> Self {
21 Self {
22 default_ttl: Duration::from_secs(3600),
23 enabled: true,
24 max_entry_size: 10 * 1024 * 1024,
25 key_prefix: None,
26 }
27 }
28}
29
30impl CacheConfig {
31 pub fn new() -> Self {
32 Self::default()
33 }
34 pub fn with_ttl(mut self, ttl: Duration) -> Self {
35 self.default_ttl = ttl;
36 self
37 }
38 pub fn with_enabled(mut self, enabled: bool) -> Self {
39 self.enabled = enabled;
40 self
41 }
42 pub fn with_key_prefix(mut self, prefix: impl Into<String>) -> Self {
43 self.key_prefix = Some(prefix.into());
44 self
45 }
46}
47
48#[derive(Debug, Clone, Default)]
49pub struct CacheStats {
50 pub hits: u64,
51 pub misses: u64,
52 pub sets: u64,
53 pub deletes: u64,
54 pub errors: u64,
55}
56
57impl CacheStats {
58 pub fn hit_ratio(&self) -> f64 {
59 let total = self.hits + self.misses;
60 if total == 0 {
61 0.0
62 } else {
63 self.hits as f64 / total as f64
64 }
65 }
66}
67
68struct AtomicStats {
69 hits: AtomicU64,
70 misses: AtomicU64,
71 sets: AtomicU64,
72 deletes: AtomicU64,
73 errors: AtomicU64,
74}
75impl AtomicStats {
76 fn new() -> Self {
77 Self {
78 hits: AtomicU64::new(0),
79 misses: AtomicU64::new(0),
80 sets: AtomicU64::new(0),
81 deletes: AtomicU64::new(0),
82 errors: AtomicU64::new(0),
83 }
84 }
85 fn to_stats(&self) -> CacheStats {
86 CacheStats {
87 hits: self.hits.load(Ordering::Relaxed),
88 misses: self.misses.load(Ordering::Relaxed),
89 sets: self.sets.load(Ordering::Relaxed),
90 deletes: self.deletes.load(Ordering::Relaxed),
91 errors: self.errors.load(Ordering::Relaxed),
92 }
93 }
94}
95
96pub struct CacheManager {
97 config: CacheConfig,
98 backend: Box<dyn CacheBackend>,
99 stats: Arc<AtomicStats>,
100}
101
102impl CacheManager {
103 pub fn new(config: CacheConfig, backend: Box<dyn CacheBackend>) -> Self {
104 Self {
105 config,
106 backend,
107 stats: Arc::new(AtomicStats::new()),
108 }
109 }
110
111 pub async fn get<T: DeserializeOwned>(&self, key: &CacheKey) -> Result<Option<T>> {
112 if !self.config.enabled {
113 return Ok(None);
114 }
115 let prefixed = self.prefix_key(key);
116 match self.backend.get(&prefixed).await {
117 Ok(Some(data)) => {
118 self.stats.hits.fetch_add(1, Ordering::Relaxed);
119 match serde_json::from_slice(&data) {
120 Ok(val) => Ok(Some(val)),
121 Err(_) => {
122 self.stats.errors.fetch_add(1, Ordering::Relaxed);
123 Ok(None)
124 }
125 }
126 }
127 Ok(None) => {
128 self.stats.misses.fetch_add(1, Ordering::Relaxed);
129 Ok(None)
130 }
131 Err(e) => {
132 self.stats.errors.fetch_add(1, Ordering::Relaxed);
133 Err(e)
134 }
135 }
136 }
137
138 pub async fn set<T: Serialize>(&self, key: &CacheKey, value: &T) -> Result<()> {
139 self.set_with_ttl(key, value, self.config.default_ttl).await
140 }
141
142 pub async fn set_with_ttl<T: Serialize>(
143 &self,
144 key: &CacheKey,
145 value: &T,
146 ttl: Duration,
147 ) -> Result<()> {
148 if !self.config.enabled {
149 return Ok(());
150 }
151 let data = serde_json::to_vec(value)?;
152 if data.len() > self.config.max_entry_size {
153 return Ok(());
154 }
155 let prefixed = self.prefix_key(key);
156 match self.backend.set(&prefixed, &data, ttl).await {
157 Ok(()) => {
158 self.stats.sets.fetch_add(1, Ordering::Relaxed);
159 Ok(())
160 }
161 Err(e) => {
162 self.stats.errors.fetch_add(1, Ordering::Relaxed);
163 Err(e)
164 }
165 }
166 }
167
168 pub async fn delete(&self, key: &CacheKey) -> Result<bool> {
169 if !self.config.enabled {
170 return Ok(false);
171 }
172 let prefixed = self.prefix_key(key);
173 match self.backend.delete(&prefixed).await {
174 Ok(d) => {
175 if d {
176 self.stats.deletes.fetch_add(1, Ordering::Relaxed);
177 }
178 Ok(d)
179 }
180 Err(e) => {
181 self.stats.errors.fetch_add(1, Ordering::Relaxed);
182 Err(e)
183 }
184 }
185 }
186
187 pub fn stats(&self) -> CacheStats {
188 self.stats.to_stats()
189 }
190 pub fn backend_name(&self) -> &'static str {
191 self.backend.name()
192 }
193
194 fn prefix_key(&self, key: &CacheKey) -> CacheKey {
195 if let Some(ref p) = self.config.key_prefix {
196 CacheKey::new(format!("{}:{}", p, key.hash))
197 } else {
198 key.clone()
199 }
200 }
201}