Skip to main content

things3_core/
cache.rs

1//! Caching layer for frequently accessed Things 3 data
2
3use crate::models::{Area, Project, Task};
4use anyhow::Result;
5use chrono::{DateTime, Utc};
6use moka::future::Cache;
7use parking_lot::RwLock;
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use std::sync::Arc;
11use std::time::Duration;
12use uuid::Uuid;
13
14/// Cache invalidation strategy
15#[derive(Debug, Clone, PartialEq, Eq)]
16pub enum InvalidationStrategy {
17    /// Time-based invalidation (TTL)
18    TimeBased,
19    /// Event-based invalidation (manual triggers)
20    EventBased,
21    /// Dependency-based invalidation (related data changes)
22    DependencyBased,
23    /// Hybrid approach combining multiple strategies
24    Hybrid,
25}
26
27/// Cache dependency tracking for intelligent invalidation
28#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct CacheDependency {
30    /// The entity type this cache entry depends on
31    pub entity_type: String,
32    /// The specific entity ID this cache entry depends on
33    pub entity_id: Option<Uuid>,
34    /// The operation that would invalidate this cache entry
35    pub invalidating_operations: Vec<String>,
36}
37
38impl CacheDependency {
39    /// Test whether this dependency matches a mutation on `(entity_type, entity_id)`.
40    ///
41    /// `entity_id == None` on either side acts as a wildcard: a dependency with
42    /// no specific id matches any concrete mutation of the same type, and a
43    /// caller passing `None` matches every dependency of that type.
44    #[must_use]
45    pub fn matches(&self, entity_type: &str, entity_id: Option<&Uuid>) -> bool {
46        if self.entity_type != entity_type {
47            return false;
48        }
49        match (self.entity_id, entity_id) {
50            (Some(dep_id), Some(req_id)) => dep_id == *req_id,
51            _ => true,
52        }
53    }
54
55    /// Test whether this dependency lists `operation` as one of its invalidators.
56    #[must_use]
57    pub fn matches_operation(&self, operation: &str) -> bool {
58        self.invalidating_operations
59            .iter()
60            .any(|op| op == operation)
61    }
62}
63
64/// Enhanced cache configuration with intelligent invalidation
65#[derive(Debug, Clone)]
66pub struct CacheConfig {
67    /// Maximum number of entries in the cache
68    pub max_capacity: u64,
69    /// Time to live for cache entries
70    pub ttl: Duration,
71    /// Time to idle for cache entries
72    pub tti: Duration,
73    /// Invalidation strategy to use
74    pub invalidation_strategy: InvalidationStrategy,
75    /// Enable cache warming for frequently accessed data
76    pub enable_cache_warming: bool,
77    /// Cache warming interval
78    pub warming_interval: Duration,
79    /// Maximum cache warming entries
80    pub max_warming_entries: usize,
81}
82
83impl Default for CacheConfig {
84    fn default() -> Self {
85        Self {
86            max_capacity: 1000,
87            ttl: Duration::from_secs(300), // 5 minutes
88            tti: Duration::from_secs(60),  // 1 minute
89            invalidation_strategy: InvalidationStrategy::Hybrid,
90            enable_cache_warming: true,
91            warming_interval: Duration::from_secs(60), // 1 minute
92            max_warming_entries: 50,
93        }
94    }
95}
96
97/// Enhanced cached data wrapper with dependency tracking
98#[derive(Debug, Clone, Serialize, Deserialize)]
99pub struct CachedData<T> {
100    pub data: T,
101    pub cached_at: DateTime<Utc>,
102    pub expires_at: DateTime<Utc>,
103    /// Dependencies for intelligent invalidation
104    pub dependencies: Vec<CacheDependency>,
105    /// Access count for cache warming
106    pub access_count: u64,
107    /// Last access time for TTI calculation
108    pub last_accessed: DateTime<Utc>,
109    /// Cache warming priority (higher = more likely to be warmed)
110    pub warming_priority: u32,
111}
112
113impl<T> CachedData<T> {
114    pub fn new(data: T, ttl: Duration) -> Self {
115        let now = Utc::now();
116        Self {
117            data,
118            cached_at: now,
119            expires_at: now + chrono::Duration::from_std(ttl).unwrap_or_default(),
120            dependencies: Vec::new(),
121            access_count: 0,
122            last_accessed: now,
123            warming_priority: 0,
124        }
125    }
126
127    pub fn new_with_dependencies(
128        data: T,
129        ttl: Duration,
130        dependencies: Vec<CacheDependency>,
131    ) -> Self {
132        let now = Utc::now();
133        Self {
134            data,
135            cached_at: now,
136            expires_at: now + chrono::Duration::from_std(ttl).unwrap_or_default(),
137            dependencies,
138            access_count: 0,
139            last_accessed: now,
140            warming_priority: 0,
141        }
142    }
143
144    pub fn is_expired(&self) -> bool {
145        Utc::now() > self.expires_at
146    }
147
148    pub fn is_idle(&self, tti: Duration) -> bool {
149        let now = Utc::now();
150        let idle_duration = now - self.last_accessed;
151        idle_duration > chrono::Duration::from_std(tti).unwrap_or_default()
152    }
153
154    pub fn record_access(&mut self) {
155        self.access_count += 1;
156        self.last_accessed = Utc::now();
157    }
158
159    pub fn update_warming_priority(&mut self, priority: u32) {
160        self.warming_priority = priority;
161    }
162
163    pub fn add_dependency(&mut self, dependency: CacheDependency) {
164        self.dependencies.push(dependency);
165    }
166
167    pub fn has_dependency(&self, entity_type: &str, entity_id: Option<&Uuid>) -> bool {
168        self.dependencies
169            .iter()
170            .any(|dep| dep.matches(entity_type, entity_id))
171    }
172}
173
174/// Cache statistics
175#[derive(Debug, Clone, Default, Serialize, Deserialize)]
176pub struct CacheStats {
177    pub hits: u64,
178    pub misses: u64,
179    pub entries: u64,
180    pub hit_rate: f64,
181    /// Total number of times the warming loop has called `preloader.warm(key)`.
182    pub warmed_keys: u64,
183    /// Total number of warming loop ticks that dispatched at least one key to the registered preloader.
184    pub warming_runs: u64,
185}
186
187/// Hook for predictive cache preloading.
188///
189/// `ThingsCache` calls [`CachePreloader::predict`] after every `get_*` access
190/// (hit or miss) to ask "given that key X was just accessed, what should we
191/// queue for background warming?" The returned `(key, priority)` pairs are
192/// pushed into the cache's priority queue via [`ThingsCache::add_to_warming`].
193///
194/// On each warming-loop tick, the cache picks the top-priority queued keys
195/// and calls [`CachePreloader::warm`] for each. The implementor is expected
196/// to fetch the data and populate the cache (typically by `tokio::spawn`ing
197/// a task that calls back into `cache.get_*(key, fetcher)`). `warm` is
198/// fire-and-forget — errors must be handled internally.
199///
200/// The trait is synchronous to stay dyn-compatible without `async-trait`.
201/// Implementors that need async work should spawn it inside `warm`.
202pub trait CachePreloader: Send + Sync + 'static {
203    /// Called after a cache access. Returns `(key, priority)` pairs to enqueue
204    /// for background warming. Return `vec![]` to opt out for this access.
205    fn predict(&self, accessed_key: &str) -> Vec<(String, u32)>;
206
207    /// Called by the warming loop for each top-priority queued key.
208    /// Implementor fetches and populates the cache, typically via `tokio::spawn`.
209    fn warm(&self, key: &str);
210}
211
212impl CacheStats {
213    pub fn calculate_hit_rate(&mut self) {
214        let total = self.hits + self.misses;
215        self.hit_rate = if total > 0 {
216            #[allow(clippy::cast_precision_loss)]
217            {
218                self.hits as f64 / total as f64
219            }
220        } else {
221            0.0
222        };
223    }
224}
225
226/// Main cache manager for Things 3 data with intelligent invalidation
227pub struct ThingsCache {
228    /// Tasks cache
229    tasks: Cache<String, CachedData<Vec<Task>>>,
230    /// Projects cache
231    projects: Cache<String, CachedData<Vec<Project>>>,
232    /// Areas cache
233    areas: Cache<String, CachedData<Vec<Area>>>,
234    /// Search results cache
235    search_results: Cache<String, CachedData<Vec<Task>>>,
236    /// Statistics
237    stats: Arc<RwLock<CacheStats>>,
238    /// Configuration
239    config: CacheConfig,
240    /// Cache warming entries (key -> priority)
241    warming_entries: Arc<RwLock<HashMap<String, u32>>>,
242    /// Optional preloader consulted on every `get_*` access and on every
243    /// warming-loop tick. `None` means no predictive preloading.
244    preloader: Arc<RwLock<Option<Arc<dyn CachePreloader>>>>,
245    /// Cache warming task handle
246    warming_task: Option<tokio::task::JoinHandle<()>>,
247}
248
249impl ThingsCache {
250    /// Create a new cache with the given configuration
251    #[must_use]
252    pub fn new(config: &CacheConfig) -> Self {
253        let tasks = Cache::builder()
254            .max_capacity(config.max_capacity)
255            .time_to_live(config.ttl)
256            .time_to_idle(config.tti)
257            .build();
258
259        let projects = Cache::builder()
260            .max_capacity(config.max_capacity)
261            .time_to_live(config.ttl)
262            .time_to_idle(config.tti)
263            .build();
264
265        let areas = Cache::builder()
266            .max_capacity(config.max_capacity)
267            .time_to_live(config.ttl)
268            .time_to_idle(config.tti)
269            .build();
270
271        let search_results = Cache::builder()
272            .max_capacity(config.max_capacity)
273            .time_to_live(config.ttl)
274            .time_to_idle(config.tti)
275            .build();
276
277        let mut cache = Self {
278            tasks,
279            projects,
280            areas,
281            search_results,
282            stats: Arc::new(RwLock::new(CacheStats::default())),
283            config: config.clone(),
284            warming_entries: Arc::new(RwLock::new(HashMap::new())),
285            preloader: Arc::new(RwLock::new(None)),
286            warming_task: None,
287        };
288
289        // Start cache warming task if enabled
290        if config.enable_cache_warming {
291            cache.start_cache_warming();
292        }
293
294        cache
295    }
296
297    /// Create a new cache with default configuration
298    #[must_use]
299    pub fn new_default() -> Self {
300        Self::new(&CacheConfig::default())
301    }
302
303    /// Get tasks from cache or execute the provided function
304    /// Get tasks from cache or fetch if not cached
305    ///
306    /// # Errors
307    ///
308    /// Returns an error if the fetcher function fails.
309    pub async fn get_tasks<F, Fut>(&self, key: &str, fetcher: F) -> Result<Vec<Task>>
310    where
311        F: FnOnce() -> Fut,
312        Fut: std::future::Future<Output = Result<Vec<Task>>>,
313    {
314        if let Some(mut cached) = self.tasks.get(key).await {
315            if !cached.is_expired() && !cached.is_idle(self.config.tti) {
316                cached.record_access();
317                self.record_hit();
318
319                // Add to warming if frequently accessed
320                if cached.access_count > 3 {
321                    self.add_to_warming(key.to_string(), cached.warming_priority + 1);
322                }
323
324                self.notify_preloader(key);
325                return Ok(cached.data);
326            }
327        }
328
329        self.record_miss();
330        let data = fetcher().await?;
331
332        // Create dependencies for intelligent invalidation
333        let dependencies = Self::create_task_dependencies(&data);
334        let mut cached_data =
335            CachedData::new_with_dependencies(data.clone(), self.config.ttl, dependencies);
336
337        // Set initial warming priority based on key type
338        let priority = if key.starts_with("inbox:") {
339            10
340        } else if key.starts_with("today:") {
341            8
342        } else {
343            5
344        };
345        cached_data.update_warming_priority(priority);
346
347        self.tasks.insert(key.to_string(), cached_data).await;
348        self.notify_preloader(key);
349        Ok(data)
350    }
351
352    /// Get projects from cache or execute the provided function
353    /// Get projects from cache or fetch if not cached
354    ///
355    /// # Errors
356    ///
357    /// Returns an error if the fetcher function fails.
358    pub async fn get_projects<F, Fut>(&self, key: &str, fetcher: F) -> Result<Vec<Project>>
359    where
360        F: FnOnce() -> Fut,
361        Fut: std::future::Future<Output = Result<Vec<Project>>>,
362    {
363        if let Some(mut cached) = self.projects.get(key).await {
364            if !cached.is_expired() && !cached.is_idle(self.config.tti) {
365                cached.record_access();
366                self.record_hit();
367
368                // Add to warming if frequently accessed
369                if cached.access_count > 3 {
370                    self.add_to_warming(key.to_string(), cached.warming_priority + 1);
371                }
372
373                self.notify_preloader(key);
374                return Ok(cached.data);
375            }
376        }
377
378        self.record_miss();
379        let data = fetcher().await?;
380
381        // Create dependencies for intelligent invalidation
382        let dependencies = Self::create_project_dependencies(&data);
383        let mut cached_data =
384            CachedData::new_with_dependencies(data.clone(), self.config.ttl, dependencies);
385
386        // Set initial warming priority
387        let priority = if key.starts_with("projects:") { 7 } else { 5 };
388        cached_data.update_warming_priority(priority);
389
390        self.projects.insert(key.to_string(), cached_data).await;
391        self.notify_preloader(key);
392        Ok(data)
393    }
394
395    /// Get areas from cache or execute the provided function
396    /// Get areas from cache or fetch if not cached
397    ///
398    /// # Errors
399    ///
400    /// Returns an error if the fetcher function fails.
401    pub async fn get_areas<F, Fut>(&self, key: &str, fetcher: F) -> Result<Vec<Area>>
402    where
403        F: FnOnce() -> Fut,
404        Fut: std::future::Future<Output = Result<Vec<Area>>>,
405    {
406        if let Some(mut cached) = self.areas.get(key).await {
407            if !cached.is_expired() && !cached.is_idle(self.config.tti) {
408                cached.record_access();
409                self.record_hit();
410
411                // Add to warming if frequently accessed
412                if cached.access_count > 3 {
413                    self.add_to_warming(key.to_string(), cached.warming_priority + 1);
414                }
415
416                self.notify_preloader(key);
417                return Ok(cached.data);
418            }
419        }
420
421        self.record_miss();
422        let data = fetcher().await?;
423
424        // Create dependencies for intelligent invalidation
425        let dependencies = Self::create_area_dependencies(&data);
426        let mut cached_data =
427            CachedData::new_with_dependencies(data.clone(), self.config.ttl, dependencies);
428
429        // Set initial warming priority
430        let priority = if key.starts_with("areas:") { 6 } else { 5 };
431        cached_data.update_warming_priority(priority);
432
433        self.areas.insert(key.to_string(), cached_data).await;
434        self.notify_preloader(key);
435        Ok(data)
436    }
437
438    /// Get search results from cache or execute the provided function
439    /// Get search results from cache or fetch if not cached
440    ///
441    /// # Errors
442    ///
443    /// Returns an error if the fetcher function fails.
444    pub async fn get_search_results<F, Fut>(&self, key: &str, fetcher: F) -> Result<Vec<Task>>
445    where
446        F: FnOnce() -> Fut,
447        Fut: std::future::Future<Output = Result<Vec<Task>>>,
448    {
449        if let Some(mut cached) = self.search_results.get(key).await {
450            if !cached.is_expired() && !cached.is_idle(self.config.tti) {
451                cached.record_access();
452                self.record_hit();
453
454                // Add to warming if frequently accessed
455                if cached.access_count > 3 {
456                    self.add_to_warming(key.to_string(), cached.warming_priority + 1);
457                }
458
459                self.notify_preloader(key);
460                return Ok(cached.data);
461            }
462        }
463
464        self.record_miss();
465        let data = fetcher().await?;
466
467        // Create dependencies for intelligent invalidation
468        let dependencies = Self::create_task_dependencies(&data);
469        let mut cached_data =
470            CachedData::new_with_dependencies(data.clone(), self.config.ttl, dependencies);
471
472        // Set initial warming priority for search results
473        let priority = if key.starts_with("search:") { 4 } else { 3 };
474        cached_data.update_warming_priority(priority);
475
476        self.search_results
477            .insert(key.to_string(), cached_data)
478            .await;
479        self.notify_preloader(key);
480        Ok(data)
481    }
482
483    /// Invalidate all caches
484    pub fn invalidate_all(&self) {
485        self.tasks.invalidate_all();
486        self.projects.invalidate_all();
487        self.areas.invalidate_all();
488        self.search_results.invalidate_all();
489    }
490
491    /// Invalidate specific cache entry
492    pub async fn invalidate(&self, key: &str) {
493        self.tasks.remove(key).await;
494        self.projects.remove(key).await;
495        self.areas.remove(key).await;
496        self.search_results.remove(key).await;
497    }
498
499    /// Get cache statistics
500    #[must_use]
501    pub fn get_stats(&self) -> CacheStats {
502        let mut stats = self.stats.read().clone();
503        stats.entries = self.tasks.entry_count()
504            + self.projects.entry_count()
505            + self.areas.entry_count()
506            + self.search_results.entry_count();
507        stats.calculate_hit_rate();
508        stats
509    }
510
511    /// Reset cache statistics
512    pub fn reset_stats(&self) {
513        let mut stats = self.stats.write();
514        *stats = CacheStats::default();
515    }
516
517    /// Record a cache hit
518    fn record_hit(&self) {
519        let mut stats = self.stats.write();
520        stats.hits += 1;
521    }
522
523    /// Record a cache miss
524    fn record_miss(&self) {
525        let mut stats = self.stats.write();
526        stats.misses += 1;
527    }
528
529    /// Create dependencies for task data
530    fn create_task_dependencies(tasks: &[Task]) -> Vec<CacheDependency> {
531        let mut dependencies = Vec::new();
532
533        // Add dependencies for each task
534        for task in tasks {
535            dependencies.push(CacheDependency {
536                entity_type: "task".to_string(),
537                entity_id: Some(task.uuid),
538                invalidating_operations: vec![
539                    "task_updated".to_string(),
540                    "task_deleted".to_string(),
541                    "task_completed".to_string(),
542                ],
543            });
544
545            // Add project dependency if task belongs to a project
546            if let Some(project_uuid) = task.project_uuid {
547                dependencies.push(CacheDependency {
548                    entity_type: "project".to_string(),
549                    entity_id: Some(project_uuid),
550                    invalidating_operations: vec![
551                        "project_updated".to_string(),
552                        "project_deleted".to_string(),
553                    ],
554                });
555            }
556
557            // Add area dependency if task belongs to an area
558            if let Some(area_uuid) = task.area_uuid {
559                dependencies.push(CacheDependency {
560                    entity_type: "area".to_string(),
561                    entity_id: Some(area_uuid),
562                    invalidating_operations: vec![
563                        "area_updated".to_string(),
564                        "area_deleted".to_string(),
565                    ],
566                });
567            }
568        }
569
570        dependencies
571    }
572
573    /// Create dependencies for project data
574    fn create_project_dependencies(projects: &[Project]) -> Vec<CacheDependency> {
575        let mut dependencies = Vec::new();
576
577        for project in projects {
578            dependencies.push(CacheDependency {
579                entity_type: "project".to_string(),
580                entity_id: Some(project.uuid),
581                invalidating_operations: vec![
582                    "project_updated".to_string(),
583                    "project_deleted".to_string(),
584                ],
585            });
586
587            if let Some(area_uuid) = project.area_uuid {
588                dependencies.push(CacheDependency {
589                    entity_type: "area".to_string(),
590                    entity_id: Some(area_uuid),
591                    invalidating_operations: vec![
592                        "area_updated".to_string(),
593                        "area_deleted".to_string(),
594                    ],
595                });
596            }
597        }
598
599        dependencies
600    }
601
602    /// Create dependencies for area data
603    fn create_area_dependencies(areas: &[Area]) -> Vec<CacheDependency> {
604        let mut dependencies = Vec::new();
605
606        for area in areas {
607            dependencies.push(CacheDependency {
608                entity_type: "area".to_string(),
609                entity_id: Some(area.uuid),
610                invalidating_operations: vec![
611                    "area_updated".to_string(),
612                    "area_deleted".to_string(),
613                ],
614            });
615        }
616
617        dependencies
618    }
619
620    /// Start cache warming background task.
621    ///
622    /// Each tick, drains the top-priority queued keys and dispatches each to
623    /// the registered [`CachePreloader`] (if any). Keys are removed from the
624    /// queue after dispatch — the preloader's own `predict` calls re-add them
625    /// later if they remain hot.
626    fn start_cache_warming(&mut self) {
627        let warming_entries = Arc::clone(&self.warming_entries);
628        let preloader = Arc::clone(&self.preloader);
629        let stats = Arc::clone(&self.stats);
630        let warming_interval = self.config.warming_interval;
631        let max_entries = self.config.max_warming_entries;
632
633        let handle = tokio::spawn(async move {
634            let mut interval = tokio::time::interval(warming_interval);
635            loop {
636                interval.tick().await;
637
638                let entries_to_warm = {
639                    let entries = warming_entries.read();
640                    let mut sorted_entries: Vec<_> = entries.iter().collect();
641                    sorted_entries.sort_by(|a, b| b.1.cmp(a.1));
642                    sorted_entries
643                        .into_iter()
644                        .take(max_entries)
645                        .map(|(key, _)| key.clone())
646                        .collect::<Vec<_>>()
647                };
648
649                if entries_to_warm.is_empty() {
650                    continue;
651                }
652
653                let p_snapshot = preloader.read().clone();
654                if let Some(p) = p_snapshot {
655                    for key in &entries_to_warm {
656                        p.warm(key);
657                    }
658                    let mut s = stats.write();
659                    s.warming_runs += 1;
660                    s.warmed_keys += entries_to_warm.len() as u64;
661                } else {
662                    tracing::debug!(
663                        "Cache warming {} entries (no preloader registered)",
664                        entries_to_warm.len()
665                    );
666                }
667
668                let mut entries = warming_entries.write();
669                for key in &entries_to_warm {
670                    entries.remove(key);
671                }
672            }
673        });
674
675        self.warming_task = Some(handle);
676    }
677
678    /// Register a preloader. Replaces any previously-registered preloader.
679    ///
680    /// The preloader's `predict` will be invoked after every `get_*` call,
681    /// and `warm` will be invoked by the warming-loop tick for queued keys.
682    pub fn set_preloader(&self, preloader: Arc<dyn CachePreloader>) {
683        *self.preloader.write() = Some(preloader);
684    }
685
686    /// Remove the registered preloader. Subsequent `get_*` calls and warming
687    /// ticks become no-ops with respect to predictive preloading.
688    pub fn clear_preloader(&self) {
689        *self.preloader.write() = None;
690    }
691
692    /// Returns `true` if `key` is present in any of the four underlying caches.
693    fn contains_cached_key(&self, key: &str) -> bool {
694        self.tasks.contains_key(key)
695            || self.projects.contains_key(key)
696            || self.areas.contains_key(key)
697            || self.search_results.contains_key(key)
698    }
699
700    /// Snapshot the registered preloader and call its `predict`, pushing any
701    /// returned `(key, priority)` pairs into `warming_entries`.
702    /// Keys already present in the cache are skipped — this prevents a
703    /// self-reinforcing loop where warming a key triggers predict on its
704    /// counterpart, which re-enqueues the original key indefinitely.
705    fn notify_preloader(&self, accessed_key: &str) {
706        let p_snapshot = self.preloader.read().clone();
707        let Some(p) = p_snapshot else { return };
708        for (k, prio) in p.predict(accessed_key) {
709            if !self.contains_cached_key(&k) {
710                self.add_to_warming(k, prio);
711            }
712        }
713    }
714
715    /// Add entry to cache warming list
716    pub fn add_to_warming(&self, key: String, priority: u32) {
717        let mut entries = self.warming_entries.write();
718        entries.insert(key, priority);
719    }
720
721    /// Remove entry from cache warming list
722    pub fn remove_from_warming(&self, key: &str) {
723        let mut entries = self.warming_entries.write();
724        entries.remove(key);
725    }
726
727    /// Selectively invalidate cache entries whose dependencies match
728    /// `(entity_type, entity_id)`. Returns the number of keys submitted for
729    /// eviction (moka eviction may complete asynchronously).
730    ///
731    /// `entity_id == None` is a wildcard that matches any cached entry
732    /// depending on `entity_type`. Entries that do not depend on the mutated
733    /// entity are left untouched.
734    pub async fn invalidate_by_entity(&self, entity_type: &str, entity_id: Option<&Uuid>) -> usize {
735        let (task_keys, project_keys, area_keys, search_keys) = {
736            let pred = |dep: &CacheDependency| dep.matches(entity_type, entity_id);
737            (
738                collect_matching_keys(&self.tasks, &pred),
739                collect_matching_keys(&self.projects, &pred),
740                collect_matching_keys(&self.areas, &pred),
741                collect_matching_keys(&self.search_results, &pred),
742            )
743        };
744        let removed = evict_keys(&self.tasks, &task_keys).await
745            + evict_keys(&self.projects, &project_keys).await
746            + evict_keys(&self.areas, &area_keys).await
747            + evict_keys(&self.search_results, &search_keys).await;
748
749        tracing::debug!(
750            "Invalidated {} cache entries depending on {} {:?}",
751            removed,
752            entity_type,
753            entity_id
754        );
755        removed
756    }
757
758    /// Selectively invalidate cache entries whose dependencies list `operation`
759    /// among their invalidating operations. Returns the number of keys submitted
760    /// for eviction (moka eviction may complete asynchronously).
761    pub async fn invalidate_by_operation(&self, operation: &str) -> usize {
762        let (task_keys, project_keys, area_keys, search_keys) = {
763            let pred = |dep: &CacheDependency| dep.matches_operation(operation);
764            (
765                collect_matching_keys(&self.tasks, &pred),
766                collect_matching_keys(&self.projects, &pred),
767                collect_matching_keys(&self.areas, &pred),
768                collect_matching_keys(&self.search_results, &pred),
769            )
770        };
771        let removed = evict_keys(&self.tasks, &task_keys).await
772            + evict_keys(&self.projects, &project_keys).await
773            + evict_keys(&self.areas, &area_keys).await
774            + evict_keys(&self.search_results, &search_keys).await;
775
776        tracing::debug!(
777            "Invalidated {} cache entries due to operation {}",
778            removed,
779            operation
780        );
781        removed
782    }
783
784    /// Get cache warming statistics
785    #[must_use]
786    pub fn get_warming_stats(&self) -> (usize, u32) {
787        let entries = self.warming_entries.read();
788        let count = entries.len();
789        let max_priority = entries.values().max().copied().unwrap_or(0);
790        (count, max_priority)
791    }
792
793    /// Stop cache warming
794    pub fn stop_cache_warming(&mut self) {
795        if let Some(handle) = self.warming_task.take() {
796            handle.abort();
797        }
798    }
799}
800
801/// Walk a moka cache synchronously and collect keys whose dependency list
802/// satisfies `pred`. Split from [`evict_keys`] so the (non-`Send`) predicate is
803/// dropped before any `.await`, keeping the surrounding async fn `Send`.
804fn collect_matching_keys<V>(
805    cache: &Cache<String, CachedData<V>>,
806    pred: &dyn Fn(&CacheDependency) -> bool,
807) -> Vec<String>
808where
809    V: Clone + Send + Sync + 'static,
810{
811    cache
812        .iter()
813        .filter_map(|(k, v)| {
814            if v.dependencies.iter().any(pred) {
815                Some((*k).clone())
816            } else {
817                None
818            }
819        })
820        .collect()
821}
822
823/// Evict the given keys from a moka cache.
824///
825/// Returns the number of keys submitted for eviction. Moka's `invalidate` is
826/// async but the actual removal may lag slightly; callers that need to observe
827/// the post-eviction state should `await` a short yield or sleep.
828async fn evict_keys<V>(cache: &Cache<String, CachedData<V>>, keys: &[String]) -> usize
829where
830    V: Clone + Send + Sync + 'static,
831{
832    for k in keys {
833        cache.invalidate(k).await;
834    }
835    keys.len()
836}
837
838/// Default [`CachePreloader`] with a small set of hardcoded heuristics over
839/// the existing top-level cache keys.
840///
841/// Holds a [`Weak`] reference to the cache to avoid the obvious
842/// `Arc<ThingsCache>` ↔ `Arc<dyn CachePreloader>` reference cycle. Once the
843/// last strong reference to the cache is dropped, [`CachePreloader::warm`]
844/// becomes a no-op.
845///
846/// Heuristics:
847/// - Accessing `inbox:all` predicts `today:all` (priority 8).
848/// - Accessing `today:all` predicts `inbox:all` (priority 10).
849/// - Accessing `areas:all` predicts `projects:all` (priority 7).
850///
851/// Other keys produce no predictions. Future preloaders (per-project tasks,
852/// search-history-driven) plug in via the same trait.
853///
854/// # Warm-loop behaviour
855///
856/// The `inbox:all` ↔ `today:all` pair is mutually predictive, which would
857/// ordinarily create a perpetual warming loop. [`ThingsCache::notify_preloader`]
858/// guards against this: a predicted key is only enqueued when it is *not*
859/// already present in the cache. Once both keys are warm, no further
860/// enqueuing occurs until one of them expires or is invalidated.
861pub struct DefaultPreloader {
862    cache: std::sync::Weak<ThingsCache>,
863    db: Arc<crate::database::ThingsDatabase>,
864}
865
866impl DefaultPreloader {
867    /// Construct a preloader that holds a [`Weak`] handle to `cache` and a
868    /// strong handle to `db`. Wrap in [`Arc`] before registering with
869    /// [`ThingsCache::set_preloader`].
870    #[must_use]
871    pub fn new(cache: &Arc<ThingsCache>, db: Arc<crate::database::ThingsDatabase>) -> Arc<Self> {
872        Arc::new(Self {
873            cache: Arc::downgrade(cache),
874            db,
875        })
876    }
877}
878
879impl CachePreloader for DefaultPreloader {
880    fn predict(&self, accessed_key: &str) -> Vec<(String, u32)> {
881        match accessed_key {
882            "inbox:all" => vec![("today:all".to_string(), 8)],
883            "today:all" => vec![("inbox:all".to_string(), 10)],
884            "areas:all" => vec![("projects:all".to_string(), 7)],
885            _ => vec![],
886        }
887    }
888
889    fn warm(&self, key: &str) {
890        let Some(cache) = self.cache.upgrade() else {
891            return;
892        };
893        let db = Arc::clone(&self.db);
894        let key = key.to_string();
895        tokio::spawn(async move {
896            let result: Result<()> = match key.as_str() {
897                "inbox:all" => cache
898                    .get_tasks(&key, || async {
899                        db.get_inbox(None).await.map_err(anyhow::Error::from)
900                    })
901                    .await
902                    .map(|_| ()),
903                "today:all" => cache
904                    .get_tasks(&key, || async {
905                        db.get_today(None).await.map_err(anyhow::Error::from)
906                    })
907                    .await
908                    .map(|_| ()),
909                "areas:all" => cache
910                    .get_areas(&key, || async {
911                        db.get_areas().await.map_err(anyhow::Error::from)
912                    })
913                    .await
914                    .map(|_| ()),
915                "projects:all" => cache
916                    .get_projects(&key, || async {
917                        db.get_projects(None).await.map_err(anyhow::Error::from)
918                    })
919                    .await
920                    .map(|_| ()),
921                _ => Ok(()),
922            };
923            if let Err(e) = result {
924                tracing::warn!("DefaultPreloader::warm({key}) failed: {e}");
925            }
926        });
927    }
928}
929
930/// Cache key generators
931pub mod keys {
932    /// Generate cache key for inbox tasks
933    #[must_use]
934    pub fn inbox(limit: Option<usize>) -> String {
935        format!(
936            "inbox:{}",
937            limit.map_or("all".to_string(), |l| l.to_string())
938        )
939    }
940
941    /// Generate cache key for today's tasks
942    #[must_use]
943    pub fn today(limit: Option<usize>) -> String {
944        format!(
945            "today:{}",
946            limit.map_or("all".to_string(), |l| l.to_string())
947        )
948    }
949
950    /// Generate cache key for projects
951    #[must_use]
952    pub fn projects(area_uuid: Option<&str>) -> String {
953        format!("projects:{}", area_uuid.unwrap_or("all"))
954    }
955
956    /// Generate cache key for areas
957    #[must_use]
958    pub fn areas() -> String {
959        "areas:all".to_string()
960    }
961
962    /// Generate cache key for search results
963    #[must_use]
964    pub fn search(query: &str, limit: Option<usize>) -> String {
965        format!(
966            "search:{}:{}",
967            query,
968            limit.map_or("all".to_string(), |l| l.to_string())
969        )
970    }
971}
972
973#[cfg(test)]
974mod tests {
975    use super::*;
976    use crate::test_utils::{create_mock_areas, create_mock_projects, create_mock_tasks};
977    use std::time::Duration;
978
979    #[test]
980    fn test_cache_config_default() {
981        let config = CacheConfig::default();
982
983        assert_eq!(config.max_capacity, 1000);
984        assert_eq!(config.ttl, Duration::from_secs(300));
985        assert_eq!(config.tti, Duration::from_secs(60));
986    }
987
988    #[test]
989    fn test_cache_config_custom() {
990        let config = CacheConfig {
991            max_capacity: 500,
992            ttl: Duration::from_secs(600),
993            tti: Duration::from_secs(120),
994            invalidation_strategy: InvalidationStrategy::Hybrid,
995            enable_cache_warming: true,
996            warming_interval: Duration::from_secs(60),
997            max_warming_entries: 50,
998        };
999
1000        assert_eq!(config.max_capacity, 500);
1001        assert_eq!(config.ttl, Duration::from_secs(600));
1002        assert_eq!(config.tti, Duration::from_secs(120));
1003    }
1004
1005    #[test]
1006    fn test_cached_data_creation() {
1007        let data = vec![1, 2, 3];
1008        let ttl = Duration::from_secs(60);
1009        let cached = CachedData::new(data.clone(), ttl);
1010
1011        assert_eq!(cached.data, data);
1012        assert!(cached.cached_at <= Utc::now());
1013        assert!(cached.expires_at > cached.cached_at);
1014        assert!(!cached.is_expired());
1015    }
1016
1017    #[test]
1018    fn test_cached_data_expiration() {
1019        let data = vec![1, 2, 3];
1020        let ttl = Duration::from_millis(1);
1021        let cached = CachedData::new(data, ttl);
1022
1023        // Should not be expired immediately
1024        assert!(!cached.is_expired());
1025
1026        // Wait a bit and check again
1027        std::thread::sleep(Duration::from_millis(10));
1028        // Note: This test might be flaky due to timing, but it's testing the logic
1029    }
1030
1031    #[test]
1032    fn test_cached_data_serialization() {
1033        let data = vec![1, 2, 3];
1034        let ttl = Duration::from_secs(60);
1035        let cached = CachedData::new(data, ttl);
1036
1037        // Test serialization
1038        let json = serde_json::to_string(&cached).unwrap();
1039        assert!(json.contains("data"));
1040        assert!(json.contains("cached_at"));
1041        assert!(json.contains("expires_at"));
1042
1043        // Test deserialization
1044        let deserialized: CachedData<Vec<i32>> = serde_json::from_str(&json).unwrap();
1045        assert_eq!(deserialized.data, cached.data);
1046    }
1047
1048    #[test]
1049    fn test_cache_stats_default() {
1050        let stats = CacheStats::default();
1051
1052        assert_eq!(stats.hits, 0);
1053        assert_eq!(stats.misses, 0);
1054        assert_eq!(stats.entries, 0);
1055        assert!((stats.hit_rate - 0.0).abs() < f64::EPSILON);
1056    }
1057
1058    #[test]
1059    fn test_cache_stats_calculation() {
1060        let mut stats = CacheStats {
1061            hits: 8,
1062            misses: 2,
1063            entries: 5,
1064            hit_rate: 0.0,
1065            ..Default::default()
1066        };
1067
1068        stats.calculate_hit_rate();
1069        assert!((stats.hit_rate - 0.8).abs() < f64::EPSILON);
1070    }
1071
1072    #[test]
1073    fn test_cache_stats_zero_total() {
1074        let mut stats = CacheStats {
1075            hits: 0,
1076            misses: 0,
1077            entries: 0,
1078            hit_rate: 0.0,
1079            ..Default::default()
1080        };
1081
1082        stats.calculate_hit_rate();
1083        assert!((stats.hit_rate - 0.0).abs() < f64::EPSILON);
1084    }
1085
1086    #[test]
1087    fn test_cache_stats_serialization() {
1088        let stats = CacheStats {
1089            hits: 10,
1090            misses: 5,
1091            entries: 3,
1092            hit_rate: 0.67,
1093            ..Default::default()
1094        };
1095
1096        // Test serialization
1097        let json = serde_json::to_string(&stats).unwrap();
1098        assert!(json.contains("hits"));
1099        assert!(json.contains("misses"));
1100        assert!(json.contains("entries"));
1101        assert!(json.contains("hit_rate"));
1102
1103        // Test deserialization
1104        let deserialized: CacheStats = serde_json::from_str(&json).unwrap();
1105        assert_eq!(deserialized.hits, stats.hits);
1106        assert_eq!(deserialized.misses, stats.misses);
1107        assert_eq!(deserialized.entries, stats.entries);
1108        assert!((deserialized.hit_rate - stats.hit_rate).abs() < f64::EPSILON);
1109    }
1110
1111    #[test]
1112    fn test_cache_stats_clone() {
1113        let stats = CacheStats {
1114            hits: 5,
1115            misses: 3,
1116            entries: 2,
1117            hit_rate: 0.625,
1118            ..Default::default()
1119        };
1120
1121        let cloned = stats.clone();
1122        assert_eq!(cloned.hits, stats.hits);
1123        assert_eq!(cloned.misses, stats.misses);
1124        assert_eq!(cloned.entries, stats.entries);
1125        assert!((cloned.hit_rate - stats.hit_rate).abs() < f64::EPSILON);
1126    }
1127
1128    #[test]
1129    fn test_cache_stats_debug() {
1130        let stats = CacheStats {
1131            hits: 1,
1132            misses: 1,
1133            entries: 1,
1134            hit_rate: 0.5,
1135            ..Default::default()
1136        };
1137
1138        let debug_str = format!("{stats:?}");
1139        assert!(debug_str.contains("CacheStats"));
1140        assert!(debug_str.contains("hits"));
1141        assert!(debug_str.contains("misses"));
1142    }
1143
1144    #[tokio::test]
1145    async fn test_cache_new() {
1146        let config = CacheConfig::default();
1147        let _cache = ThingsCache::new(&config);
1148
1149        // Just test that it can be created
1150        // Test passes if we reach this point
1151    }
1152
1153    #[tokio::test]
1154    async fn test_cache_new_default() {
1155        let _cache = ThingsCache::new_default();
1156
1157        // Just test that it can be created
1158        // Test passes if we reach this point
1159    }
1160
1161    #[tokio::test]
1162    async fn test_cache_basic_operations() {
1163        let cache = ThingsCache::new_default();
1164
1165        // Test cache miss
1166        let result = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1167        assert!(result.is_ok());
1168
1169        // Test cache hit
1170        let result = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1171        assert!(result.is_ok());
1172
1173        let stats = cache.get_stats();
1174        assert_eq!(stats.hits, 1);
1175        assert_eq!(stats.misses, 1);
1176    }
1177
1178    #[tokio::test]
1179    async fn test_cache_tasks_with_data() {
1180        let cache = ThingsCache::new_default();
1181        let mock_tasks = create_mock_tasks();
1182
1183        // Test cache miss with data
1184        let result = cache
1185            .get_tasks("tasks", || async { Ok(mock_tasks.clone()) })
1186            .await;
1187        assert!(result.is_ok());
1188        assert_eq!(result.unwrap().len(), mock_tasks.len());
1189
1190        // Test cache hit
1191        let result = cache.get_tasks("tasks", || async { Ok(vec![]) }).await;
1192        assert!(result.is_ok());
1193        assert_eq!(result.unwrap().len(), mock_tasks.len());
1194
1195        let stats = cache.get_stats();
1196        assert_eq!(stats.hits, 1);
1197        assert_eq!(stats.misses, 1);
1198    }
1199
1200    #[tokio::test]
1201    async fn test_cache_projects() {
1202        let cache = ThingsCache::new_default();
1203        let mock_projects = create_mock_projects();
1204
1205        // Test cache miss
1206        let result = cache
1207            .get_projects("projects", || async { Ok(mock_projects.clone()) })
1208            .await;
1209        assert!(result.is_ok());
1210
1211        // Test cache hit
1212        let result = cache
1213            .get_projects("projects", || async { Ok(vec![]) })
1214            .await;
1215        assert!(result.is_ok());
1216
1217        let stats = cache.get_stats();
1218        assert_eq!(stats.hits, 1);
1219        assert_eq!(stats.misses, 1);
1220    }
1221
1222    #[tokio::test]
1223    async fn test_cache_areas() {
1224        let cache = ThingsCache::new_default();
1225        let mock_areas = create_mock_areas();
1226
1227        // Test cache miss
1228        let result = cache
1229            .get_areas("areas", || async { Ok(mock_areas.clone()) })
1230            .await;
1231        assert!(result.is_ok());
1232
1233        // Test cache hit
1234        let result = cache.get_areas("areas", || async { Ok(vec![]) }).await;
1235        assert!(result.is_ok());
1236
1237        let stats = cache.get_stats();
1238        assert_eq!(stats.hits, 1);
1239        assert_eq!(stats.misses, 1);
1240    }
1241
1242    #[tokio::test]
1243    async fn test_cache_search_results() {
1244        let cache = ThingsCache::new_default();
1245        let mock_tasks = create_mock_tasks();
1246
1247        // Test cache miss
1248        let result = cache
1249            .get_search_results("search:test", || async { Ok(mock_tasks.clone()) })
1250            .await;
1251        assert!(result.is_ok());
1252
1253        // Test cache hit
1254        let result = cache
1255            .get_search_results("search:test", || async { Ok(vec![]) })
1256            .await;
1257        assert!(result.is_ok());
1258
1259        let stats = cache.get_stats();
1260        assert_eq!(stats.hits, 1);
1261        assert_eq!(stats.misses, 1);
1262    }
1263
1264    #[tokio::test]
1265    async fn test_cache_fetcher_error() {
1266        let cache = ThingsCache::new_default();
1267
1268        // Test that fetcher errors are propagated
1269        let result = cache
1270            .get_tasks("error", || async { Err(anyhow::anyhow!("Test error")) })
1271            .await;
1272
1273        assert!(result.is_err());
1274        assert!(result.unwrap_err().to_string().contains("Test error"));
1275
1276        let stats = cache.get_stats();
1277        assert_eq!(stats.hits, 0);
1278        assert_eq!(stats.misses, 1);
1279    }
1280
1281    #[tokio::test]
1282    async fn test_cache_expiration() {
1283        let config = CacheConfig {
1284            max_capacity: 100,
1285            ttl: Duration::from_millis(10),
1286            tti: Duration::from_millis(5),
1287            invalidation_strategy: InvalidationStrategy::Hybrid,
1288            enable_cache_warming: true,
1289            warming_interval: Duration::from_secs(60),
1290            max_warming_entries: 50,
1291        };
1292        let cache = ThingsCache::new(&config);
1293
1294        // Insert data
1295        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1296
1297        // Wait for expiration
1298        tokio::time::sleep(Duration::from_millis(20)).await;
1299
1300        // Should be a miss due to expiration
1301        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1302
1303        let stats = cache.get_stats();
1304        assert_eq!(stats.misses, 2);
1305    }
1306
1307    #[tokio::test]
1308    async fn test_cache_invalidate_all() {
1309        let cache = ThingsCache::new_default();
1310
1311        // Insert data into all caches
1312        let _ = cache.get_tasks("tasks", || async { Ok(vec![]) }).await;
1313        let _ = cache
1314            .get_projects("projects", || async { Ok(vec![]) })
1315            .await;
1316        let _ = cache.get_areas("areas", || async { Ok(vec![]) }).await;
1317        let _ = cache
1318            .get_search_results("search", || async { Ok(vec![]) })
1319            .await;
1320
1321        // Invalidate all
1322        cache.invalidate_all();
1323
1324        // All should be misses now
1325        let _ = cache.get_tasks("tasks", || async { Ok(vec![]) }).await;
1326        let _ = cache
1327            .get_projects("projects", || async { Ok(vec![]) })
1328            .await;
1329        let _ = cache.get_areas("areas", || async { Ok(vec![]) }).await;
1330        let _ = cache
1331            .get_search_results("search", || async { Ok(vec![]) })
1332            .await;
1333
1334        let stats = cache.get_stats();
1335        assert_eq!(stats.misses, 8); // 4 initial + 4 after invalidation
1336    }
1337
1338    #[tokio::test]
1339    async fn test_cache_invalidate_specific() {
1340        let cache = ThingsCache::new_default();
1341
1342        // Insert data
1343        let _ = cache.get_tasks("key1", || async { Ok(vec![]) }).await;
1344        let _ = cache.get_tasks("key2", || async { Ok(vec![]) }).await;
1345
1346        // Invalidate specific key
1347        cache.invalidate("key1").await;
1348
1349        // key1 should be a miss, key2 should be a hit
1350        let _ = cache.get_tasks("key1", || async { Ok(vec![]) }).await;
1351        let _ = cache.get_tasks("key2", || async { Ok(vec![]) }).await;
1352
1353        let stats = cache.get_stats();
1354        assert_eq!(stats.hits, 1); // key2 hit
1355        assert_eq!(stats.misses, 3); // key1 initial + key1 after invalidation + key2 initial
1356    }
1357
1358    #[tokio::test]
1359    async fn test_cache_reset_stats() {
1360        let cache = ThingsCache::new_default();
1361
1362        // Generate some stats
1363        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1364        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await;
1365
1366        let stats_before = cache.get_stats();
1367        assert!(stats_before.hits > 0 || stats_before.misses > 0);
1368
1369        // Reset stats
1370        cache.reset_stats();
1371
1372        let stats_after = cache.get_stats();
1373        assert_eq!(stats_after.hits, 0);
1374        assert_eq!(stats_after.misses, 0);
1375        assert!((stats_after.hit_rate - 0.0).abs() < f64::EPSILON);
1376    }
1377
1378    #[test]
1379    fn test_cache_keys_inbox() {
1380        assert_eq!(keys::inbox(None), "inbox:all");
1381        assert_eq!(keys::inbox(Some(10)), "inbox:10");
1382        assert_eq!(keys::inbox(Some(0)), "inbox:0");
1383    }
1384
1385    #[test]
1386    fn test_cache_keys_today() {
1387        assert_eq!(keys::today(None), "today:all");
1388        assert_eq!(keys::today(Some(5)), "today:5");
1389        assert_eq!(keys::today(Some(100)), "today:100");
1390    }
1391
1392    #[test]
1393    fn test_cache_keys_projects() {
1394        assert_eq!(keys::projects(None), "projects:all");
1395        assert_eq!(keys::projects(Some("uuid-123")), "projects:uuid-123");
1396        assert_eq!(keys::projects(Some("")), "projects:");
1397    }
1398
1399    #[test]
1400    fn test_cache_keys_areas() {
1401        assert_eq!(keys::areas(), "areas:all");
1402    }
1403
1404    #[test]
1405    fn test_cache_keys_search() {
1406        assert_eq!(keys::search("test query", None), "search:test query:all");
1407        assert_eq!(keys::search("test query", Some(10)), "search:test query:10");
1408        assert_eq!(keys::search("", Some(5)), "search::5");
1409    }
1410
1411    #[tokio::test]
1412    async fn test_cache_multiple_keys() {
1413        let cache = ThingsCache::new_default();
1414        let mock_tasks1 = create_mock_tasks();
1415        let mock_tasks2 = create_mock_tasks();
1416
1417        // Test different keys don't interfere
1418        let _ = cache
1419            .get_tasks("key1", || async { Ok(mock_tasks1.clone()) })
1420            .await;
1421        let _ = cache
1422            .get_tasks("key2", || async { Ok(mock_tasks2.clone()) })
1423            .await;
1424
1425        // Both should be hits
1426        let result1 = cache
1427            .get_tasks("key1", || async { Ok(vec![]) })
1428            .await
1429            .unwrap();
1430        let result2 = cache
1431            .get_tasks("key2", || async { Ok(vec![]) })
1432            .await
1433            .unwrap();
1434
1435        assert_eq!(result1.len(), mock_tasks1.len());
1436        assert_eq!(result2.len(), mock_tasks2.len());
1437
1438        let stats = cache.get_stats();
1439        assert_eq!(stats.hits, 2);
1440        assert_eq!(stats.misses, 2);
1441    }
1442
1443    #[tokio::test]
1444    async fn test_cache_entry_count() {
1445        let cache = ThingsCache::new_default();
1446
1447        // Initially no entries
1448        let stats = cache.get_stats();
1449        assert_eq!(stats.entries, 0);
1450
1451        // Add some entries
1452        let _ = cache.get_tasks("tasks", || async { Ok(vec![]) }).await;
1453        let _ = cache
1454            .get_projects("projects", || async { Ok(vec![]) })
1455            .await;
1456        let _ = cache.get_areas("areas", || async { Ok(vec![]) }).await;
1457        let _ = cache
1458            .get_search_results("search", || async { Ok(vec![]) })
1459            .await;
1460
1461        // The entry count might not be immediately updated due to async nature
1462        // Let's just verify that we can get stats without panicking
1463        let stats = cache.get_stats();
1464        // Verify stats can be retrieved without panicking
1465        let _ = stats.entries;
1466    }
1467
1468    #[tokio::test]
1469    async fn test_cache_hit_rate_calculation() {
1470        let cache = ThingsCache::new_default();
1471
1472        // Generate some hits and misses
1473        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await; // miss
1474        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await; // hit
1475        let _ = cache.get_tasks("test", || async { Ok(vec![]) }).await; // hit
1476
1477        let stats = cache.get_stats();
1478        assert_eq!(stats.hits, 2);
1479        assert_eq!(stats.misses, 1);
1480        assert!((stats.hit_rate - 2.0 / 3.0).abs() < 0.001);
1481    }
1482
1483    #[test]
1484    fn test_cache_dependency_matches_rules() {
1485        let id_a = Uuid::new_v4();
1486        let id_b = Uuid::new_v4();
1487        let dep_concrete = CacheDependency {
1488            entity_type: "task".to_string(),
1489            entity_id: Some(id_a),
1490            invalidating_operations: vec!["task_updated".to_string()],
1491        };
1492        let dep_wildcard = CacheDependency {
1493            entity_type: "task".to_string(),
1494            entity_id: None,
1495            invalidating_operations: vec!["task_updated".to_string()],
1496        };
1497
1498        // concrete dep matches its own id, not a different id
1499        assert!(dep_concrete.matches("task", Some(&id_a)));
1500        assert!(!dep_concrete.matches("task", Some(&id_b)));
1501        // wildcard request matches concrete dep
1502        assert!(dep_concrete.matches("task", None));
1503        // wildcard dep matches any concrete id of same type
1504        assert!(dep_wildcard.matches("task", Some(&id_a)));
1505        // type mismatch never matches
1506        assert!(!dep_concrete.matches("project", Some(&id_a)));
1507
1508        // operation matching
1509        assert!(dep_concrete.matches_operation("task_updated"));
1510        assert!(!dep_concrete.matches_operation("task_deleted"));
1511    }
1512
1513    /// Build a `Task` whose `uuid`, `project_uuid`, and `area_uuid` we control,
1514    /// so dependency lists carry the IDs we expect.
1515    fn task_with_ids(uuid: Uuid, project: Option<Uuid>, area: Option<Uuid>) -> Task {
1516        let mut t = create_mock_tasks().into_iter().next().unwrap();
1517        t.uuid = uuid;
1518        t.project_uuid = project;
1519        t.area_uuid = area;
1520        t
1521    }
1522
1523    #[tokio::test]
1524    async fn test_invalidate_by_entity_selective_by_id() {
1525        let cache = ThingsCache::new_default();
1526        let id_x = Uuid::new_v4();
1527        let id_y = Uuid::new_v4();
1528
1529        cache
1530            .get_tasks("key_x", || async {
1531                Ok(vec![task_with_ids(id_x, None, None)])
1532            })
1533            .await
1534            .unwrap();
1535        cache
1536            .get_tasks("key_y", || async {
1537                Ok(vec![task_with_ids(id_y, None, None)])
1538            })
1539            .await
1540            .unwrap();
1541
1542        let removed = cache.invalidate_by_entity("task", Some(&id_x)).await;
1543        assert_eq!(removed, 1, "only the entry depending on id_x should evict");
1544        cache.tasks.run_pending_tasks().await;
1545        assert!(cache.tasks.get("key_x").await.is_none());
1546        assert!(cache.tasks.get("key_y").await.is_some());
1547    }
1548
1549    #[tokio::test]
1550    async fn test_invalidate_by_entity_wildcard_id() {
1551        let cache = ThingsCache::new_default();
1552        let id_x = Uuid::new_v4();
1553        let id_y = Uuid::new_v4();
1554
1555        cache
1556            .get_tasks("key_x", || async {
1557                Ok(vec![task_with_ids(id_x, None, None)])
1558            })
1559            .await
1560            .unwrap();
1561        cache
1562            .get_tasks("key_y", || async {
1563                Ok(vec![task_with_ids(id_y, None, None)])
1564            })
1565            .await
1566            .unwrap();
1567
1568        let removed = cache.invalidate_by_entity("task", None).await;
1569        assert_eq!(removed, 2);
1570        cache.tasks.run_pending_tasks().await;
1571        assert!(cache.tasks.get("key_x").await.is_none());
1572        assert!(cache.tasks.get("key_y").await.is_none());
1573    }
1574
1575    #[tokio::test]
1576    async fn test_invalidate_by_entity_leaves_unrelated_caches() {
1577        let cache = ThingsCache::new_default();
1578        let task_id = Uuid::new_v4();
1579        let project_id = Uuid::new_v4();
1580
1581        // task entry depends on its own task_id AND on project_id
1582        cache
1583            .get_tasks("inbox", || async {
1584                Ok(vec![task_with_ids(task_id, Some(project_id), None)])
1585            })
1586            .await
1587            .unwrap();
1588        // project entry: cached projects keyed under "projects:all"
1589        let mut p = create_mock_projects().into_iter().next().unwrap();
1590        p.uuid = project_id;
1591        cache
1592            .get_projects("projects:all", || async { Ok(vec![p]) })
1593            .await
1594            .unwrap();
1595
1596        // invalidate by *task* id — must not nuke the projects cache
1597        let removed = cache.invalidate_by_entity("task", Some(&task_id)).await;
1598        assert_eq!(removed, 1);
1599        cache.tasks.run_pending_tasks().await;
1600        cache.projects.run_pending_tasks().await;
1601        assert!(cache.tasks.get("inbox").await.is_none());
1602        assert!(cache.projects.get("projects:all").await.is_some());
1603    }
1604
1605    #[tokio::test]
1606    async fn test_invalidate_by_operation_selective() {
1607        let cache = ThingsCache::new_default();
1608        let task_id = Uuid::new_v4();
1609        let area_id = Uuid::new_v4();
1610
1611        // task entry: invalidating_operations include "task_updated"
1612        cache
1613            .get_tasks("inbox", || async {
1614                Ok(vec![task_with_ids(task_id, None, None)])
1615            })
1616            .await
1617            .unwrap();
1618        // area entry: invalidating_operations include "area_updated", NOT "task_updated"
1619        let mut a = create_mock_areas().into_iter().next().unwrap();
1620        a.uuid = area_id;
1621        cache
1622            .get_areas("areas:all", || async { Ok(vec![a]) })
1623            .await
1624            .unwrap();
1625
1626        let removed = cache.invalidate_by_operation("task_updated").await;
1627        assert_eq!(removed, 1);
1628        cache.tasks.run_pending_tasks().await;
1629        cache.areas.run_pending_tasks().await;
1630        assert!(cache.tasks.get("inbox").await.is_none());
1631        assert!(cache.areas.get("areas:all").await.is_some());
1632    }
1633
1634    // ─── Predictive preloading (#94) ──────────────────────────────────────
1635
1636    /// Recording preloader: captures every `predict` and `warm` call so tests
1637    /// can assert that the cache fired the hooks at the right moments.
1638    struct RecordingPreloader {
1639        predictions: Arc<RwLock<Vec<(String, u32)>>>,
1640        seen_predict: Arc<RwLock<Vec<String>>>,
1641        seen_warm: Arc<RwLock<Vec<String>>>,
1642    }
1643
1644    impl RecordingPreloader {
1645        fn new(predictions: Vec<(String, u32)>) -> Self {
1646            Self {
1647                predictions: Arc::new(RwLock::new(predictions)),
1648                seen_predict: Arc::new(RwLock::new(Vec::new())),
1649                seen_warm: Arc::new(RwLock::new(Vec::new())),
1650            }
1651        }
1652    }
1653
1654    impl CachePreloader for RecordingPreloader {
1655        fn predict(&self, accessed_key: &str) -> Vec<(String, u32)> {
1656            self.seen_predict.write().push(accessed_key.to_string());
1657            self.predictions.read().clone()
1658        }
1659        fn warm(&self, key: &str) {
1660            self.seen_warm.write().push(key.to_string());
1661        }
1662    }
1663
1664    #[tokio::test]
1665    async fn test_default_preloader_predict_rules() {
1666        // All three heuristic rules tested against the real DefaultPreloader.
1667        // predict() is pure (doesn't touch self.cache or self.db), so we only
1668        // need a minimal DB to satisfy DefaultPreloader::new.
1669        let f = tempfile::NamedTempFile::new().unwrap();
1670        crate::test_utils::create_test_database(f.path())
1671            .await
1672            .unwrap();
1673        let db = Arc::new(crate::ThingsDatabase::new(f.path()).await.unwrap());
1674        let cache = Arc::new(ThingsCache::new_default());
1675        let pre = DefaultPreloader::new(&cache, db);
1676
1677        assert_eq!(pre.predict("inbox:all"), vec![("today:all".to_string(), 8)]);
1678        assert_eq!(
1679            pre.predict("today:all"),
1680            vec![("inbox:all".to_string(), 10)]
1681        );
1682        assert_eq!(
1683            pre.predict("areas:all"),
1684            vec![("projects:all".to_string(), 7)]
1685        );
1686        assert!(pre.predict("search:foo").is_empty());
1687    }
1688
1689    #[tokio::test]
1690    async fn test_predict_fires_on_get_tasks_miss_and_hit() {
1691        let cache = ThingsCache::new_default();
1692        let pre = Arc::new(RecordingPreloader::new(vec![]));
1693        cache.set_preloader(pre.clone());
1694
1695        cache
1696            .get_tasks("inbox:all", || async { Ok(vec![]) })
1697            .await
1698            .unwrap();
1699        cache
1700            .get_tasks("inbox:all", || async { Ok(vec![]) })
1701            .await
1702            .unwrap();
1703
1704        let seen = pre.seen_predict.read().clone();
1705        assert_eq!(seen, vec!["inbox:all".to_string(), "inbox:all".to_string()]);
1706    }
1707
1708    #[tokio::test]
1709    async fn test_predict_enqueues_warming() {
1710        let cache = ThingsCache::new_default();
1711        let pre = Arc::new(RecordingPreloader::new(vec![("today:all".to_string(), 5)]));
1712        cache.set_preloader(pre);
1713
1714        cache
1715            .get_tasks("inbox:all", || async { Ok(vec![]) })
1716            .await
1717            .unwrap();
1718
1719        let entries = cache.warming_entries.read();
1720        assert_eq!(entries.get("today:all"), Some(&5));
1721    }
1722
1723    #[tokio::test]
1724    async fn test_no_preloader_is_noop() {
1725        // Default cache (no preloader) — get_* must not panic; stats counters
1726        // for warming must stay at zero even if the warming loop ticks.
1727        let config = CacheConfig {
1728            warming_interval: Duration::from_millis(20),
1729            ..Default::default()
1730        };
1731        let cache = ThingsCache::new(&config);
1732        cache
1733            .get_tasks("inbox:all", || async { Ok(vec![]) })
1734            .await
1735            .unwrap();
1736        // Let the warming loop tick a few times.
1737        tokio::time::sleep(Duration::from_millis(80)).await;
1738        let stats = cache.get_stats();
1739        assert_eq!(stats.warmed_keys, 0);
1740        assert_eq!(stats.warming_runs, 0);
1741    }
1742
1743    #[tokio::test]
1744    async fn test_warming_loop_invokes_warm() {
1745        let config = CacheConfig {
1746            warming_interval: Duration::from_millis(20),
1747            max_warming_entries: 10,
1748            ..Default::default()
1749        };
1750        let cache = ThingsCache::new(&config);
1751
1752        let pre = Arc::new(RecordingPreloader::new(vec![]));
1753        cache.set_preloader(pre.clone());
1754
1755        cache.add_to_warming("inbox:all".to_string(), 10);
1756        cache.add_to_warming("today:all".to_string(), 8);
1757
1758        // Wait long enough for at least one warming-loop tick.
1759        tokio::time::sleep(Duration::from_millis(100)).await;
1760
1761        let warmed = pre.seen_warm.read().clone();
1762        assert!(warmed.contains(&"inbox:all".to_string()));
1763        assert!(warmed.contains(&"today:all".to_string()));
1764
1765        // Queue should have been drained after dispatch.
1766        assert!(cache.warming_entries.read().is_empty());
1767
1768        // Stats should reflect the work.
1769        let stats = cache.get_stats();
1770        assert!(stats.warming_runs >= 1);
1771        assert!(stats.warmed_keys >= 2);
1772    }
1773
1774    #[tokio::test]
1775    async fn test_clear_preloader_disables_predict() {
1776        let cache = ThingsCache::new_default();
1777        let pre = Arc::new(RecordingPreloader::new(vec![("today:all".to_string(), 5)]));
1778        cache.set_preloader(pre.clone());
1779        cache
1780            .get_tasks("inbox:all", || async { Ok(vec![]) })
1781            .await
1782            .unwrap();
1783        assert_eq!(pre.seen_predict.read().len(), 1);
1784
1785        cache.clear_preloader();
1786        cache
1787            .get_tasks("inbox:all", || async { Ok(vec![]) })
1788            .await
1789            .unwrap();
1790        // Cleared — no further calls.
1791        assert_eq!(pre.seen_predict.read().len(), 1);
1792    }
1793
1794    #[tokio::test]
1795    async fn test_default_preloader_warms_via_db() {
1796        // Full integration: real test DB, real DefaultPreloader, real warming
1797        // loop. After fetching `inbox:all`, the loop should warm `today:all`.
1798        let f = tempfile::NamedTempFile::new().unwrap();
1799        crate::test_utils::create_test_database(f.path())
1800            .await
1801            .unwrap();
1802        let db = Arc::new(crate::ThingsDatabase::new(f.path()).await.unwrap());
1803
1804        let config = CacheConfig {
1805            warming_interval: Duration::from_millis(20),
1806            ..Default::default()
1807        };
1808        let cache = Arc::new(ThingsCache::new(&config));
1809        cache.set_preloader(DefaultPreloader::new(&cache, Arc::clone(&db)));
1810
1811        // Trigger predict("inbox:all") → enqueues "today:all" with priority 8
1812        cache
1813            .get_tasks("inbox:all", || async {
1814                db.get_inbox(None).await.map_err(anyhow::Error::from)
1815            })
1816            .await
1817            .unwrap();
1818
1819        // Wait for the warming loop to tick AND for the spawned warm() task
1820        // (which calls back into cache.get_tasks) to complete.
1821        tokio::time::sleep(Duration::from_millis(150)).await;
1822
1823        // After warming, "today:all" should hit cache without invoking the
1824        // panicking fetcher.
1825        let result = cache
1826            .get_tasks("today:all", || async {
1827                panic!("today:all should be served from warmed cache, not fetched")
1828            })
1829            .await
1830            .unwrap();
1831        // Sanity: result is whatever db.get_today returned (possibly empty).
1832        let expected = db.get_today(None).await.unwrap();
1833        assert_eq!(result.len(), expected.len());
1834    }
1835
1836    #[tokio::test]
1837    async fn test_default_preloader_weak_ref_breaks_cycle() {
1838        // Drop the only Arc<ThingsCache>; DefaultPreloader.warm should noop.
1839        let f = tempfile::NamedTempFile::new().unwrap();
1840        crate::test_utils::create_test_database(f.path())
1841            .await
1842            .unwrap();
1843        let db = Arc::new(crate::ThingsDatabase::new(f.path()).await.unwrap());
1844
1845        let cache = Arc::new(ThingsCache::new_default());
1846        let preloader = DefaultPreloader::new(&cache, db);
1847        let preloader_dyn: Arc<dyn CachePreloader> = preloader.clone();
1848
1849        drop(cache);
1850
1851        // Should not panic and should not spawn a doomed task.
1852        preloader_dyn.warm("inbox:all");
1853        // Sanity: weak ref upgrade inside warm returned None — no observable
1854        // side effect to assert beyond "did not panic".
1855        tokio::time::sleep(Duration::from_millis(20)).await;
1856    }
1857}