Skip to main content

prax_query/data_cache/
mod.rs

1//! High-performance data caching layer for Prax ORM.
2//!
3//! This module provides a flexible, multi-tier caching system for query results
4//! with support for:
5//!
6//! - **In-memory caching** using [moka](https://github.com/moka-rs/moka) for
7//!   high-performance concurrent access
8//! - **Redis caching** for distributed cache across multiple instances
9//! - **Tiered caching** combining L1 (memory) and L2 (Redis) for optimal performance
10//! - **Automatic invalidation** based on TTL, entity changes, or custom patterns
11//! - **Cache-aside pattern** with transparent integration into queries
12//!
13//! # Architecture
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────┐
17//! │                        Application                               │
18//! └─────────────────────────────────────────────────────────────────┘
19//!                                │
20//!                                ▼
21//! ┌─────────────────────────────────────────────────────────────────┐
22//! │                     Prax Query Builder                          │
23//! │                  .cache(CacheOptions::new())                    │
24//! └─────────────────────────────────────────────────────────────────┘
25//!                                │
26//!                                ▼
27//! ┌─────────────────────────────────────────────────────────────────┐
28//! │                      Cache Manager                               │
29//! │  ┌─────────────┐    ┌─────────────┐    ┌─────────────────────┐ │
30//! │  │ L1: Memory  │ -> │ L2: Redis   │ -> │   Database          │ │
31//! │  │ (< 1ms)     │    │ (1-5ms)     │    │   (10-100ms)        │ │
32//! │  └─────────────┘    └─────────────┘    └─────────────────────┘ │
33//! └─────────────────────────────────────────────────────────────────┘
34//! ```
35//!
36//! # Quick Start
37//!
38//! ```rust,ignore
39//! use prax_query::data_cache::{CacheManager, MemoryCache, RedisCache, TieredCache};
40//! use std::time::Duration;
41//!
42//! // In-memory only (single instance)
43//! let cache = MemoryCache::builder()
44//!     .max_capacity(10_000)
45//!     .time_to_live(Duration::from_secs(300))
46//!     .build();
47//!
48//! // Redis only (distributed)
49//! let redis = RedisCache::new("redis://localhost:6379").await?;
50//!
51//! // Tiered: Memory (L1) + Redis (L2)
52//! let tiered = TieredCache::new(cache, redis);
53//!
54//! // Use with queries
55//! let users = client
56//!     .user()
57//!     .find_many()
58//!     .cache(CacheOptions::ttl(Duration::from_secs(60)))
59//!     .exec()
60//!     .await?;
61//! ```
62//!
63//! # Cache Invalidation
64//!
65//! ```rust,ignore
66//! use prax_query::data_cache::{InvalidationStrategy, EntityTag};
67//!
68//! // Invalidate by entity type
69//! cache.invalidate_entity("User").await?;
70//!
71//! // Invalidate by specific record
72//! cache.invalidate_record("User", &user_id).await?;
73//!
74//! // Invalidate by pattern
75//! cache.invalidate_pattern("user:*:profile").await?;
76//!
77//! // Tag-based invalidation
78//! cache.invalidate_tags(&[EntityTag::new("User"), EntityTag::new("tenant:123")]).await?;
79//! ```
80//!
81//! # Performance Characteristics
82//!
83//! | Backend | Latency | Capacity | Distribution | Best For |
84//! |---------|---------|----------|--------------|----------|
85//! | Memory | < 1ms | Limited by RAM | Single instance | Hot data, sessions |
86//! | Redis | 1-5ms | Large | Multi-instance | Shared state, large datasets |
87//! | Tiered | < 1ms (L1 hit) | Both | Multi-instance | Production systems |
88
89mod backend;
90mod invalidation;
91mod key;
92mod memory;
93mod options;
94mod redis;
95mod stats;
96mod tiered;
97
98pub use backend::{CacheBackend, CacheEntry, CacheError, CacheResult};
99pub use invalidation::{EntityTag, InvalidationEvent, InvalidationStrategy};
100pub use key::{CacheKey, CacheKeyBuilder, KeyPattern};
101pub use memory::{MemoryCache, MemoryCacheBuilder, MemoryCacheConfig};
102pub use options::{CacheOptions, CachePolicy, WritePolicy};
103pub use redis::{RedisCache, RedisCacheConfig, RedisConnection};
104pub use stats::{CacheMetrics, CacheStats};
105pub use tiered::{TieredCache, TieredCacheConfig};
106
107use std::sync::Arc;
108
109/// The main cache manager that coordinates caching operations.
110///
111/// This is the primary entry point for the caching system. It wraps any
112/// `CacheBackend` implementation and provides a unified API.
113#[derive(Clone)]
114pub struct CacheManager<B: CacheBackend> {
115    backend: Arc<B>,
116    default_options: CacheOptions,
117    metrics: Arc<CacheMetrics>,
118}
119
120impl<B: CacheBackend> CacheManager<B> {
121    /// Create a new cache manager with the given backend.
122    pub fn new(backend: B) -> Self {
123        Self {
124            backend: Arc::new(backend),
125            default_options: CacheOptions::default(),
126            metrics: Arc::new(CacheMetrics::new()),
127        }
128    }
129
130    /// Create with custom default options.
131    pub fn with_options(backend: B, options: CacheOptions) -> Self {
132        Self {
133            backend: Arc::new(backend),
134            default_options: options,
135            metrics: Arc::new(CacheMetrics::new()),
136        }
137    }
138
139    /// Get the cache backend.
140    pub fn backend(&self) -> &B {
141        &self.backend
142    }
143
144    /// Get the metrics collector.
145    pub fn metrics(&self) -> &CacheMetrics {
146        &self.metrics
147    }
148
149    /// Get a value from the cache.
150    pub async fn get<T>(&self, key: &CacheKey) -> CacheResult<Option<T>>
151    where
152        T: serde::de::DeserializeOwned,
153    {
154        let start = std::time::Instant::now();
155        let result = self.backend.get(key).await;
156        let duration = start.elapsed();
157
158        match &result {
159            Ok(Some(_)) => self.metrics.record_hit(duration),
160            Ok(None) => self.metrics.record_miss(duration),
161            Err(_) => self.metrics.record_error(),
162        }
163
164        result
165    }
166
167    /// Set a value in the cache.
168    pub async fn set<T>(
169        &self,
170        key: &CacheKey,
171        value: &T,
172        options: Option<&CacheOptions>,
173    ) -> CacheResult<()>
174    where
175        T: serde::Serialize + Sync,
176    {
177        let opts = options.unwrap_or(&self.default_options);
178        let start = std::time::Instant::now();
179        let result = self.backend.set(key, value, opts.ttl).await;
180        let duration = start.elapsed();
181
182        if result.is_ok() {
183            self.metrics.record_write(duration);
184        } else {
185            self.metrics.record_error();
186        }
187
188        result
189    }
190
191    /// Get or compute a value.
192    ///
193    /// If the value exists in cache, returns it. Otherwise, calls the
194    /// provided function to compute the value, caches it, and returns it.
195    pub async fn get_or_set<T, F, Fut>(
196        &self,
197        key: &CacheKey,
198        f: F,
199        options: Option<&CacheOptions>,
200    ) -> CacheResult<T>
201    where
202        T: serde::Serialize + serde::de::DeserializeOwned + Sync,
203        F: FnOnce() -> Fut,
204        Fut: std::future::Future<Output = CacheResult<T>>,
205    {
206        // Try to get from cache first
207        if let Some(value) = self.get::<T>(key).await? {
208            return Ok(value);
209        }
210
211        // Compute the value
212        let value = f().await?;
213
214        // Store in cache (ignore errors - cache is best-effort)
215        let _ = self.set(key, &value, options).await;
216
217        Ok(value)
218    }
219
220    /// Delete a value from the cache.
221    pub async fn delete(&self, key: &CacheKey) -> CacheResult<bool> {
222        self.backend.delete(key).await
223    }
224
225    /// Check if a key exists in the cache.
226    pub async fn exists(&self, key: &CacheKey) -> CacheResult<bool> {
227        self.backend.exists(key).await
228    }
229
230    /// Invalidate cache entries by pattern.
231    pub async fn invalidate_pattern(&self, pattern: &KeyPattern) -> CacheResult<u64> {
232        self.backend.invalidate_pattern(pattern).await
233    }
234
235    /// Invalidate all entries for an entity type.
236    pub async fn invalidate_entity(&self, entity: &str) -> CacheResult<u64> {
237        let pattern = KeyPattern::entity(entity);
238        self.invalidate_pattern(&pattern).await
239    }
240
241    /// Invalidate a specific record.
242    pub async fn invalidate_record<I: std::fmt::Display>(
243        &self,
244        entity: &str,
245        id: I,
246    ) -> CacheResult<u64> {
247        let pattern = KeyPattern::record(entity, id);
248        self.invalidate_pattern(&pattern).await
249    }
250
251    /// Invalidate entries by tags.
252    pub async fn invalidate_tags(&self, tags: &[EntityTag]) -> CacheResult<u64> {
253        self.backend.invalidate_tags(tags).await
254    }
255
256    /// Clear all entries from the cache.
257    pub async fn clear(&self) -> CacheResult<()> {
258        self.backend.clear().await
259    }
260
261    /// Get cache statistics.
262    pub fn stats(&self) -> CacheStats {
263        self.metrics.snapshot()
264    }
265}
266
267/// Builder for creating cache managers with different configurations.
268pub struct CacheManagerBuilder {
269    default_options: CacheOptions,
270}
271
272impl Default for CacheManagerBuilder {
273    fn default() -> Self {
274        Self::new()
275    }
276}
277
278impl CacheManagerBuilder {
279    /// Create a new builder.
280    pub fn new() -> Self {
281        Self {
282            default_options: CacheOptions::default(),
283        }
284    }
285
286    /// Set default cache options.
287    pub fn default_options(mut self, options: CacheOptions) -> Self {
288        self.default_options = options;
289        self
290    }
291
292    /// Build a cache manager with an in-memory backend.
293    pub fn memory(self, config: MemoryCacheConfig) -> CacheManager<MemoryCache> {
294        let backend = MemoryCache::new(config);
295        CacheManager::with_options(backend, self.default_options)
296    }
297
298    /// Build a cache manager with a Redis backend.
299    pub async fn redis(self, config: RedisCacheConfig) -> CacheResult<CacheManager<RedisCache>> {
300        let backend = RedisCache::new(config).await?;
301        Ok(CacheManager::with_options(backend, self.default_options))
302    }
303
304    /// Build a cache manager with a tiered backend.
305    pub async fn tiered(
306        self,
307        memory_config: MemoryCacheConfig,
308        redis_config: RedisCacheConfig,
309    ) -> CacheResult<CacheManager<TieredCache<MemoryCache, RedisCache>>> {
310        let memory = MemoryCache::new(memory_config);
311        let redis = RedisCache::new(redis_config).await?;
312        let backend = TieredCache::new(memory, redis);
313        Ok(CacheManager::with_options(backend, self.default_options))
314    }
315}
316
317#[cfg(test)]
318mod tests {
319    use super::*;
320    use std::time::Duration;
321
322    #[tokio::test]
323    async fn test_memory_cache_basic() {
324        let cache = CacheManager::new(MemoryCache::new(MemoryCacheConfig::default()));
325
326        let key = CacheKey::new("test", "key1");
327
328        // Set a value
329        cache.set(&key, &"hello world", None).await.unwrap();
330
331        // Get it back
332        let value: Option<String> = cache.get(&key).await.unwrap();
333        assert_eq!(value, Some("hello world".to_string()));
334
335        // Delete it
336        cache.delete(&key).await.unwrap();
337
338        // Should be gone
339        let value: Option<String> = cache.get(&key).await.unwrap();
340        assert!(value.is_none());
341    }
342
343    #[tokio::test]
344    async fn test_get_or_set() {
345        let cache = CacheManager::new(MemoryCache::new(MemoryCacheConfig::default()));
346
347        let key = CacheKey::new("test", "computed");
348        let mut call_count = 0;
349
350        // First call should compute
351        let value: String = cache
352            .get_or_set(
353                &key,
354                || {
355                    call_count += 1;
356                    async { Ok("computed value".to_string()) }
357                },
358                None,
359            )
360            .await
361            .unwrap();
362
363        assert_eq!(value, "computed value");
364        assert_eq!(call_count, 1);
365
366        // Second call should use cache
367        let value: String = cache
368            .get_or_set(
369                &key,
370                || {
371                    call_count += 1;
372                    async { Ok("should not be called".to_string()) }
373                },
374                None,
375            )
376            .await
377            .unwrap();
378
379        assert_eq!(value, "computed value");
380        assert_eq!(call_count, 1); // Not incremented
381    }
382}