prax_query/data_cache/
mod.rs

1//! High-performance data caching layer for Prax ORM.
2//!
3//! This module provides a flexible, multi-tier caching system for query results
4//! with support for:
5//!
6//! - **In-memory caching** using [moka](https://github.com/moka-rs/moka) for
7//!   high-performance concurrent access
8//! - **Redis caching** for distributed cache across multiple instances
9//! - **Tiered caching** combining L1 (memory) and L2 (Redis) for optimal performance
10//! - **Automatic invalidation** based on TTL, entity changes, or custom patterns
11//! - **Cache-aside pattern** with transparent integration into queries
12//!
13//! # Architecture
14//!
15//! ```text
16//! ┌─────────────────────────────────────────────────────────────────┐
17//! │                        Application                               │
18//! └─────────────────────────────────────────────────────────────────┘
19//!                                │
20//!                                ▼
21//! ┌─────────────────────────────────────────────────────────────────┐
22//! │                     Prax Query Builder                          │
23//! │                  .cache(CacheOptions::new())                    │
24//! └─────────────────────────────────────────────────────────────────┘
25//!                                │
26//!                                ▼
27//! ┌─────────────────────────────────────────────────────────────────┐
28//! │                      Cache Manager                               │
29//! │  ┌─────────────┐    ┌─────────────┐    ┌─────────────────────┐ │
30//! │  │ L1: Memory  │ -> │ L2: Redis   │ -> │   Database          │ │
31//! │  │ (< 1ms)     │    │ (1-5ms)     │    │   (10-100ms)        │ │
32//! │  └─────────────┘    └─────────────┘    └─────────────────────┘ │
33//! └─────────────────────────────────────────────────────────────────┘
34//! ```
35//!
36//! # Quick Start
37//!
38//! ```rust,ignore
39//! use prax_query::data_cache::{CacheManager, MemoryCache, RedisCache, TieredCache};
40//! use std::time::Duration;
41//!
42//! // In-memory only (single instance)
43//! let cache = MemoryCache::builder()
44//!     .max_capacity(10_000)
45//!     .time_to_live(Duration::from_secs(300))
46//!     .build();
47//!
48//! // Redis only (distributed)
49//! let redis = RedisCache::new("redis://localhost:6379").await?;
50//!
51//! // Tiered: Memory (L1) + Redis (L2)
52//! let tiered = TieredCache::new(cache, redis);
53//!
54//! // Use with queries
55//! let users = client
56//!     .user()
57//!     .find_many()
58//!     .cache(CacheOptions::ttl(Duration::from_secs(60)))
59//!     .exec()
60//!     .await?;
61//! ```
62//!
63//! # Cache Invalidation
64//!
65//! ```rust,ignore
66//! use prax_query::data_cache::{InvalidationStrategy, EntityTag};
67//!
68//! // Invalidate by entity type
69//! cache.invalidate_entity("User").await?;
70//!
71//! // Invalidate by specific record
72//! cache.invalidate_record("User", &user_id).await?;
73//!
74//! // Invalidate by pattern
75//! cache.invalidate_pattern("user:*:profile").await?;
76//!
77//! // Tag-based invalidation
78//! cache.invalidate_tags(&[EntityTag::new("User"), EntityTag::new("tenant:123")]).await?;
79//! ```
80//!
81//! # Performance Characteristics
82//!
83//! | Backend | Latency | Capacity | Distribution | Best For |
84//! |---------|---------|----------|--------------|----------|
85//! | Memory | < 1ms | Limited by RAM | Single instance | Hot data, sessions |
86//! | Redis | 1-5ms | Large | Multi-instance | Shared state, large datasets |
87//! | Tiered | < 1ms (L1 hit) | Both | Multi-instance | Production systems |
88
89mod backend;
90mod invalidation;
91mod key;
92mod memory;
93mod options;
94mod redis;
95mod stats;
96mod tiered;
97
98pub use backend::{CacheBackend, CacheEntry, CacheError, CacheResult};
99pub use invalidation::{EntityTag, InvalidationEvent, InvalidationStrategy};
100pub use key::{CacheKey, CacheKeyBuilder, KeyPattern};
101pub use memory::{MemoryCache, MemoryCacheBuilder, MemoryCacheConfig};
102pub use options::{CacheOptions, CachePolicy, WritePolicy};
103pub use redis::{RedisCache, RedisCacheConfig, RedisConnection};
104pub use stats::{CacheMetrics, CacheStats};
105pub use tiered::{TieredCache, TieredCacheConfig};
106
107use std::sync::Arc;
108
109/// The main cache manager that coordinates caching operations.
110///
111/// This is the primary entry point for the caching system. It wraps any
112/// `CacheBackend` implementation and provides a unified API.
113#[derive(Clone)]
114pub struct CacheManager<B: CacheBackend> {
115    backend: Arc<B>,
116    default_options: CacheOptions,
117    metrics: Arc<CacheMetrics>,
118}
119
120impl<B: CacheBackend> CacheManager<B> {
121    /// Create a new cache manager with the given backend.
122    pub fn new(backend: B) -> Self {
123        Self {
124            backend: Arc::new(backend),
125            default_options: CacheOptions::default(),
126            metrics: Arc::new(CacheMetrics::new()),
127        }
128    }
129
130    /// Create with custom default options.
131    pub fn with_options(backend: B, options: CacheOptions) -> Self {
132        Self {
133            backend: Arc::new(backend),
134            default_options: options,
135            metrics: Arc::new(CacheMetrics::new()),
136        }
137    }
138
139    /// Get the cache backend.
140    pub fn backend(&self) -> &B {
141        &self.backend
142    }
143
144    /// Get the metrics collector.
145    pub fn metrics(&self) -> &CacheMetrics {
146        &self.metrics
147    }
148
149    /// Get a value from the cache.
150    pub async fn get<T>(&self, key: &CacheKey) -> CacheResult<Option<T>>
151    where
152        T: serde::de::DeserializeOwned,
153    {
154        let start = std::time::Instant::now();
155        let result = self.backend.get(key).await;
156        let duration = start.elapsed();
157
158        match &result {
159            Ok(Some(_)) => self.metrics.record_hit(duration),
160            Ok(None) => self.metrics.record_miss(duration),
161            Err(_) => self.metrics.record_error(),
162        }
163
164        result
165    }
166
167    /// Set a value in the cache.
168    pub async fn set<T>(&self, key: &CacheKey, value: &T, options: Option<&CacheOptions>) -> CacheResult<()>
169    where
170        T: serde::Serialize + Sync,
171    {
172        let opts = options.unwrap_or(&self.default_options);
173        let start = std::time::Instant::now();
174        let result = self.backend.set(key, value, opts.ttl).await;
175        let duration = start.elapsed();
176
177        if result.is_ok() {
178            self.metrics.record_write(duration);
179        } else {
180            self.metrics.record_error();
181        }
182
183        result
184    }
185
186    /// Get or compute a value.
187    ///
188    /// If the value exists in cache, returns it. Otherwise, calls the
189    /// provided function to compute the value, caches it, and returns it.
190    pub async fn get_or_set<T, F, Fut>(
191        &self,
192        key: &CacheKey,
193        f: F,
194        options: Option<&CacheOptions>,
195    ) -> CacheResult<T>
196    where
197        T: serde::Serialize + serde::de::DeserializeOwned + Sync,
198        F: FnOnce() -> Fut,
199        Fut: std::future::Future<Output = CacheResult<T>>,
200    {
201        // Try to get from cache first
202        if let Some(value) = self.get::<T>(key).await? {
203            return Ok(value);
204        }
205
206        // Compute the value
207        let value = f().await?;
208
209        // Store in cache (ignore errors - cache is best-effort)
210        let _ = self.set(key, &value, options).await;
211
212        Ok(value)
213    }
214
215    /// Delete a value from the cache.
216    pub async fn delete(&self, key: &CacheKey) -> CacheResult<bool> {
217        self.backend.delete(key).await
218    }
219
220    /// Check if a key exists in the cache.
221    pub async fn exists(&self, key: &CacheKey) -> CacheResult<bool> {
222        self.backend.exists(key).await
223    }
224
225    /// Invalidate cache entries by pattern.
226    pub async fn invalidate_pattern(&self, pattern: &KeyPattern) -> CacheResult<u64> {
227        self.backend.invalidate_pattern(pattern).await
228    }
229
230    /// Invalidate all entries for an entity type.
231    pub async fn invalidate_entity(&self, entity: &str) -> CacheResult<u64> {
232        let pattern = KeyPattern::entity(entity);
233        self.invalidate_pattern(&pattern).await
234    }
235
236    /// Invalidate a specific record.
237    pub async fn invalidate_record<I: std::fmt::Display>(
238        &self,
239        entity: &str,
240        id: I,
241    ) -> CacheResult<u64> {
242        let pattern = KeyPattern::record(entity, id);
243        self.invalidate_pattern(&pattern).await
244    }
245
246    /// Invalidate entries by tags.
247    pub async fn invalidate_tags(&self, tags: &[EntityTag]) -> CacheResult<u64> {
248        self.backend.invalidate_tags(tags).await
249    }
250
251    /// Clear all entries from the cache.
252    pub async fn clear(&self) -> CacheResult<()> {
253        self.backend.clear().await
254    }
255
256    /// Get cache statistics.
257    pub fn stats(&self) -> CacheStats {
258        self.metrics.snapshot()
259    }
260}
261
262/// Builder for creating cache managers with different configurations.
263pub struct CacheManagerBuilder {
264    default_options: CacheOptions,
265}
266
267impl Default for CacheManagerBuilder {
268    fn default() -> Self {
269        Self::new()
270    }
271}
272
273impl CacheManagerBuilder {
274    /// Create a new builder.
275    pub fn new() -> Self {
276        Self {
277            default_options: CacheOptions::default(),
278        }
279    }
280
281    /// Set default cache options.
282    pub fn default_options(mut self, options: CacheOptions) -> Self {
283        self.default_options = options;
284        self
285    }
286
287    /// Build a cache manager with an in-memory backend.
288    pub fn memory(self, config: MemoryCacheConfig) -> CacheManager<MemoryCache> {
289        let backend = MemoryCache::new(config);
290        CacheManager::with_options(backend, self.default_options)
291    }
292
293    /// Build a cache manager with a Redis backend.
294    pub async fn redis(self, config: RedisCacheConfig) -> CacheResult<CacheManager<RedisCache>> {
295        let backend = RedisCache::new(config).await?;
296        Ok(CacheManager::with_options(backend, self.default_options))
297    }
298
299    /// Build a cache manager with a tiered backend.
300    pub async fn tiered(
301        self,
302        memory_config: MemoryCacheConfig,
303        redis_config: RedisCacheConfig,
304    ) -> CacheResult<CacheManager<TieredCache<MemoryCache, RedisCache>>> {
305        let memory = MemoryCache::new(memory_config);
306        let redis = RedisCache::new(redis_config).await?;
307        let backend = TieredCache::new(memory, redis);
308        Ok(CacheManager::with_options(backend, self.default_options))
309    }
310}
311
312#[cfg(test)]
313mod tests {
314    use super::*;
315    use std::time::Duration;
316
317    #[tokio::test]
318    async fn test_memory_cache_basic() {
319        let cache = CacheManager::new(MemoryCache::new(MemoryCacheConfig::default()));
320
321        let key = CacheKey::new("test", "key1");
322
323        // Set a value
324        cache.set(&key, &"hello world", None).await.unwrap();
325
326        // Get it back
327        let value: Option<String> = cache.get(&key).await.unwrap();
328        assert_eq!(value, Some("hello world".to_string()));
329
330        // Delete it
331        cache.delete(&key).await.unwrap();
332
333        // Should be gone
334        let value: Option<String> = cache.get(&key).await.unwrap();
335        assert!(value.is_none());
336    }
337
338    #[tokio::test]
339    async fn test_get_or_set() {
340        let cache = CacheManager::new(MemoryCache::new(MemoryCacheConfig::default()));
341
342        let key = CacheKey::new("test", "computed");
343        let mut call_count = 0;
344
345        // First call should compute
346        let value: String = cache
347            .get_or_set(
348                &key,
349                || {
350                    call_count += 1;
351                    async { Ok("computed value".to_string()) }
352                },
353                None,
354            )
355            .await
356            .unwrap();
357
358        assert_eq!(value, "computed value");
359        assert_eq!(call_count, 1);
360
361        // Second call should use cache
362        let value: String = cache
363            .get_or_set(
364                &key,
365                || {
366                    call_count += 1;
367                    async { Ok("should not be called".to_string()) }
368                },
369                None,
370            )
371            .await
372            .unwrap();
373
374        assert_eq!(value, "computed value");
375        assert_eq!(call_count, 1); // Not incremented
376    }
377}
378
379