1use crate::{HybridRebacEngine, RelationTuple, Result};
36use serde::{Deserialize, Serialize};
37use std::sync::Arc;
38use std::time::{Duration, Instant};
39use tokio::sync::Semaphore;
40
41#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
43pub enum WarmingStrategy {
44 HotPaths,
46 CriticalTenants,
48 Namespaces,
50 Recent,
52 Custom,
54}
55
56#[derive(Debug, Clone)]
58pub struct WarmingConfig {
59 pub strategy: WarmingStrategy,
61 pub max_entries: usize,
63 pub concurrency: usize,
65 pub timeout_sec: u64,
67 pub skip_if_warm: bool,
69 pub namespaces: Vec<String>,
71 pub tenant_ids: Vec<String>,
73 pub recent_days: u32,
75}
76
77impl Default for WarmingConfig {
78 fn default() -> Self {
79 Self {
80 strategy: WarmingStrategy::HotPaths,
81 max_entries: 1000,
82 concurrency: 10,
83 timeout_sec: 60,
84 skip_if_warm: true,
85 namespaces: Vec::new(),
86 tenant_ids: Vec::new(),
87 recent_days: 7,
88 }
89 }
90}
91
92#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct WarmingStats {
95 pub entries_loaded: usize,
97 pub entries_failed: usize,
99 pub duration: Duration,
101 pub cache_hit_rate: f64,
103}
104
105pub struct CacheWarmer {
107 engine: Arc<HybridRebacEngine>,
108}
109
110impl CacheWarmer {
111 pub fn new(engine: Arc<HybridRebacEngine>) -> Self {
113 Self { engine }
114 }
115
116 pub async fn warm(&self, config: WarmingConfig) -> Result<WarmingStats> {
118 let start = Instant::now();
119
120 if config.skip_if_warm && self.is_cache_warm().await {
122 tracing::info!("Cache already warm, skipping warming");
123 return Ok(WarmingStats {
124 entries_loaded: 0,
125 entries_failed: 0,
126 duration: start.elapsed(),
127 cache_hit_rate: 1.0,
128 });
129 }
130
131 let tuples = match config.strategy {
133 WarmingStrategy::HotPaths => self.load_hot_paths(config.max_entries).await?,
134 WarmingStrategy::CriticalTenants => {
135 self.load_critical_tenants(&config.tenant_ids, config.max_entries)
136 .await?
137 }
138 WarmingStrategy::Namespaces => {
139 self.load_namespaces(&config.namespaces, config.max_entries)
140 .await?
141 }
142 WarmingStrategy::Recent => {
143 self.load_recent_tuples(config.recent_days, config.max_entries)
144 .await?
145 }
146 WarmingStrategy::Custom => {
147 Vec::new()
149 }
150 };
151
152 let stats = self.preload_tuples(tuples, config.concurrency).await?;
154
155 tracing::info!(
156 "Cache warming complete: {} entries loaded, {} failed in {:?}",
157 stats.entries_loaded,
158 stats.entries_failed,
159 stats.duration
160 );
161
162 Ok(stats)
163 }
164
165 pub async fn warm_hot_paths(&self, limit: usize) -> Result<WarmingStats> {
167 let config = WarmingConfig {
168 strategy: WarmingStrategy::HotPaths,
169 max_entries: limit,
170 ..Default::default()
171 };
172 self.warm(config).await
173 }
174
175 pub async fn warm_tenant(&self, tenant_id: &str) -> Result<WarmingStats> {
177 let config = WarmingConfig {
178 strategy: WarmingStrategy::CriticalTenants,
179 tenant_ids: vec![tenant_id.to_string()],
180 max_entries: 10_000,
181 ..Default::default()
182 };
183 self.warm(config).await
184 }
185
186 pub async fn warm_namespace(&self, namespace: &str) -> Result<WarmingStats> {
188 let config = WarmingConfig {
189 strategy: WarmingStrategy::Namespaces,
190 namespaces: vec![namespace.to_string()],
191 max_entries: 10_000,
192 ..Default::default()
193 };
194 self.warm(config).await
195 }
196
197 async fn load_hot_paths(&self, limit: usize) -> Result<Vec<RelationTuple>> {
199 self.load_recent_tuples(7, limit).await
202 }
203
204 async fn load_critical_tenants(
206 &self,
207 tenant_ids: &[String],
208 limit: usize,
209 ) -> Result<Vec<RelationTuple>> {
210 let mut all_tuples = Vec::new();
211
212 for tenant_id in tenant_ids {
213 tracing::info!("Loading tuples for tenant: {}", tenant_id);
216 }
218
219 all_tuples.truncate(limit);
220 Ok(all_tuples)
221 }
222
223 async fn load_namespaces(
225 &self,
226 _namespaces: &[String],
227 _limit: usize,
228 ) -> Result<Vec<RelationTuple>> {
229 Ok(Vec::new())
232 }
233
234 async fn load_recent_tuples(&self, _days: u32, _limit: usize) -> Result<Vec<RelationTuple>> {
236 Ok(Vec::new())
239 }
240
241 async fn preload_tuples(
243 &self,
244 tuples: Vec<RelationTuple>,
245 concurrency: usize,
246 ) -> Result<WarmingStats> {
247 let start = Instant::now();
248 let total = tuples.len();
249 let semaphore = Arc::new(Semaphore::new(concurrency));
250 let mut tasks = Vec::new();
251
252 let mut loaded = 0;
253 let mut failed = 0;
254
255 for tuple in tuples {
256 let engine = self.engine.clone();
257 let sem = semaphore.clone();
258
259 let task = tokio::spawn(async move {
260 let _permit = sem.acquire().await;
261 engine
263 .check(crate::CheckRequest {
264 namespace: tuple.namespace.clone(),
265 object_id: tuple.object_id.clone(),
266 relation: tuple.relation.clone(),
267 subject: tuple.subject.clone(),
268 context: None,
269 })
270 .await
271 });
272
273 tasks.push(task);
274 }
275
276 for task in tasks {
278 match task.await {
279 Ok(Ok(_)) => loaded += 1,
280 Ok(Err(e)) => {
281 tracing::warn!("Failed to preload tuple: {}", e);
282 failed += 1;
283 }
284 Err(e) => {
285 tracing::error!("Task panicked: {}", e);
286 failed += 1;
287 }
288 }
289 }
290
291 Ok(WarmingStats {
292 entries_loaded: loaded,
293 entries_failed: failed,
294 duration: start.elapsed(),
295 cache_hit_rate: loaded as f64 / total as f64,
296 })
297 }
298
299 async fn is_cache_warm(&self) -> bool {
301 false }
305}
306
307pub struct BackgroundWarmer {
309 warmer: Arc<CacheWarmer>,
310 config: WarmingConfig,
311 interval: Duration,
312}
313
314impl BackgroundWarmer {
315 pub fn new(engine: Arc<HybridRebacEngine>, config: WarmingConfig, interval_sec: u64) -> Self {
317 Self {
318 warmer: Arc::new(CacheWarmer::new(engine)),
319 config,
320 interval: Duration::from_secs(interval_sec),
321 }
322 }
323
324 pub fn start(&self) {
326 let warmer = self.warmer.clone();
327 let config = self.config.clone();
328 let interval = self.interval;
329
330 tokio::spawn(async move {
331 loop {
332 tokio::time::sleep(interval).await;
333 tracing::info!("Starting background cache warming");
334
335 match warmer.warm(config.clone()).await {
336 Ok(stats) => {
337 tracing::info!(
338 "Background warming complete: {} entries loaded in {:?}",
339 stats.entries_loaded,
340 stats.duration
341 );
342 }
343 Err(e) => {
344 tracing::error!("Background warming failed: {}", e);
345 }
346 }
347 }
348 });
349 }
350}
351
352#[cfg(test)]
353mod tests {
354 use super::*;
355
356 #[test]
357 fn test_warming_config_default() {
358 let config = WarmingConfig::default();
359 assert_eq!(config.strategy, WarmingStrategy::HotPaths);
360 assert_eq!(config.max_entries, 1000);
361 assert_eq!(config.concurrency, 10);
362 assert!(config.skip_if_warm);
363 }
364
365 #[test]
366 fn test_warming_stats() {
367 let stats = WarmingStats {
368 entries_loaded: 950,
369 entries_failed: 50,
370 duration: Duration::from_secs(10),
371 cache_hit_rate: 0.95,
372 };
373
374 assert_eq!(stats.entries_loaded, 950);
375 assert_eq!(stats.entries_failed, 50);
376 assert_eq!(stats.cache_hit_rate, 0.95);
377 }
378}