Skip to main content

things3_core/cache/
stats.rs

1use crate::models::ThingsId;
2use chrono::{DateTime, Utc};
3use serde::{Deserialize, Serialize};
4use std::time::Duration;
5
6use super::config::CacheDependency;
7
8/// Enhanced cached data wrapper with dependency tracking
9#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct CachedData<T> {
11    pub data: T,
12    pub cached_at: DateTime<Utc>,
13    pub expires_at: DateTime<Utc>,
14    /// Dependencies for intelligent invalidation
15    pub dependencies: Vec<CacheDependency>,
16    /// Access count for cache warming
17    pub access_count: u64,
18    /// Last access time for TTI calculation
19    pub last_accessed: DateTime<Utc>,
20    /// Cache warming priority (higher = more likely to be warmed)
21    pub warming_priority: u32,
22}
23
24impl<T> CachedData<T> {
25    pub fn new(data: T, ttl: Duration) -> Self {
26        let now = Utc::now();
27        Self {
28            data,
29            cached_at: now,
30            expires_at: now + chrono::Duration::from_std(ttl).unwrap_or_default(),
31            dependencies: Vec::new(),
32            access_count: 0,
33            last_accessed: now,
34            warming_priority: 0,
35        }
36    }
37
38    pub fn new_with_dependencies(
39        data: T,
40        ttl: Duration,
41        dependencies: Vec<CacheDependency>,
42    ) -> Self {
43        let now = Utc::now();
44        Self {
45            data,
46            cached_at: now,
47            expires_at: now + chrono::Duration::from_std(ttl).unwrap_or_default(),
48            dependencies,
49            access_count: 0,
50            last_accessed: now,
51            warming_priority: 0,
52        }
53    }
54
55    pub fn is_expired(&self) -> bool {
56        Utc::now() > self.expires_at
57    }
58
59    pub fn is_idle(&self, tti: Duration) -> bool {
60        let now = Utc::now();
61        let idle_duration = now - self.last_accessed;
62        idle_duration > chrono::Duration::from_std(tti).unwrap_or_default()
63    }
64
65    pub fn record_access(&mut self) {
66        self.access_count += 1;
67        self.last_accessed = Utc::now();
68    }
69
70    pub fn update_warming_priority(&mut self, priority: u32) {
71        self.warming_priority = priority;
72    }
73
74    pub fn add_dependency(&mut self, dependency: CacheDependency) {
75        self.dependencies.push(dependency);
76    }
77
78    pub fn has_dependency(&self, entity_type: &str, entity_id: Option<&ThingsId>) -> bool {
79        self.dependencies
80            .iter()
81            .any(|dep| dep.matches(entity_type, entity_id))
82    }
83}
84
85/// Cache statistics
86#[derive(Debug, Clone, Default, Serialize, Deserialize)]
87pub struct CacheStats {
88    pub hits: u64,
89    pub misses: u64,
90    pub entries: u64,
91    pub hit_rate: f64,
92    /// Total number of times the warming loop has called `preloader.warm(key)`.
93    pub warmed_keys: u64,
94    /// Total number of warming loop ticks that dispatched at least one key to the registered preloader.
95    pub warming_runs: u64,
96}
97
98impl CacheStats {
99    pub fn calculate_hit_rate(&mut self) {
100        let total = self.hits + self.misses;
101        self.hit_rate = if total > 0 {
102            #[allow(clippy::cast_precision_loss)]
103            {
104                self.hits as f64 / total as f64
105            }
106        } else {
107            0.0
108        };
109    }
110}
111
112/// Hook for predictive cache preloading.
113///
114/// `ThingsCache` calls [`CachePreloader::predict`] after every `get_*` access
115/// (hit or miss) to ask "given that key X was just accessed, what should we
116/// queue for background warming?" The returned `(key, priority)` pairs are
117/// pushed into the cache's priority queue via [`ThingsCache::add_to_warming`].
118///
119/// On each warming-loop tick, the cache picks the top-priority queued keys
120/// and calls [`CachePreloader::warm`] for each. The implementor is expected
121/// to fetch the data and populate the cache (typically by `tokio::spawn`ing
122/// a task that calls back into `cache.get_*(key, fetcher)`). `warm` is
123/// fire-and-forget — errors must be handled internally.
124///
125/// The trait is synchronous to stay dyn-compatible without `async-trait`.
126/// Implementors that need async work should spawn it inside `warm`.
127pub trait CachePreloader: Send + Sync + 'static {
128    /// Called after a cache access. Returns `(key, priority)` pairs to enqueue
129    /// for background warming. Return `vec![]` to opt out for this access.
130    fn predict(&self, accessed_key: &str) -> Vec<(String, u32)>;
131
132    /// Called by the warming loop for each top-priority queued key.
133    /// Implementor fetches and populates the cache, typically via `tokio::spawn`.
134    fn warm(&self, key: &str);
135}