fraiseql_core/cache/result.rs
1//! Query result caching with LRU eviction and TTL expiry.
2//!
3//! This module provides thread-safe in-memory caching for GraphQL query results
4//! with automatic least-recently-used (LRU) eviction and time-to-live (TTL) expiry.
5
6use std::{
7 num::NonZeroUsize,
8 sync::{
9 Arc, Mutex,
10 atomic::{AtomicU64, AtomicUsize, Ordering},
11 },
12 time::{SystemTime, UNIX_EPOCH},
13};
14
15use lru::LruCache;
16use serde::{Deserialize, Serialize};
17
18use super::config::CacheConfig;
19use crate::{
20 db::types::JsonbValue,
21 error::{FraiseQLError, Result},
22};
23
24/// Cached query result with metadata.
25///
26/// Stores the query result along with tracking information for
27/// TTL expiry, view-based invalidation, and monitoring.
28#[derive(Debug, Clone)]
29pub struct CachedResult {
30 /// The actual query result (JSONB array from database).
31 ///
32 /// Wrapped in `Arc` for cheap cloning on cache hits (zero-copy).
33 pub result: Arc<Vec<JsonbValue>>,
34
35 /// Which views/tables this query accesses.
36 ///
37 /// Format: `vec!["v_user", "v_post"]`
38 ///
39 /// Used for view-based invalidation when mutations modify these views.
40 pub accessed_views: Vec<String>,
41
42 /// When this entry was cached (Unix timestamp in seconds).
43 ///
44 /// Used for TTL expiry check on access.
45 pub cached_at: u64,
46
47 /// Number of cache hits for this entry.
48 ///
49 /// Used for monitoring and optimization. Incremented on each `get()`.
50 pub hit_count: u64,
51}
52
53/// Thread-safe LRU cache for query results.
54///
55/// # Thread Safety
56///
57/// The LRU structure uses a single `Mutex` for correctness. Metrics counters
58/// use `AtomicU64` / `AtomicUsize` so no second lock is acquired in the hot path.
59/// Under high concurrency this eliminates the double-lock contention that caused
60/// cache hits to be slower than cache misses.
61///
62/// # Memory Safety
63///
64/// - **Hard LRU limit**: Configured via `max_entries`, automatically evicts least-recently-used
65/// entries when limit is reached
66/// - **TTL expiry**: Entries older than `ttl_seconds` are considered expired and removed on next
67/// access
68/// - **Memory tracking**: Metrics include estimated memory usage
69///
70/// # Example
71///
72/// ```rust
73/// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
74/// use fraiseql_core::db::types::JsonbValue;
75/// use serde_json::json;
76///
77/// let cache = QueryResultCache::new(CacheConfig::default());
78///
79/// // Cache a result
80/// let result = vec![JsonbValue::new(json!({"id": 1, "name": "Alice"}))];
81/// cache.put(
82/// "cache_key_123".to_string(),
83/// result.clone(),
84/// vec!["v_user".to_string()]
85/// ).unwrap();
86///
87/// // Retrieve from cache
88/// if let Some(cached) = cache.get("cache_key_123").unwrap() {
89/// println!("Cache hit! {} results", cached.len());
90/// }
91/// ```
92pub struct QueryResultCache {
93 /// LRU cache: key -> cached result.
94 ///
95 /// Automatically evicts least-recently-used entries above `max_entries`.
96 cache: Arc<Mutex<LruCache<String, CachedResult>>>,
97
98 /// Configuration (immutable after creation).
99 config: CacheConfig,
100
101 // Metrics counters — atomic so the hot `get()` path acquires only ONE lock
102 // (the LRU), not two. `Relaxed` ordering is sufficient: these counters are
103 // independent and used only for monitoring, not for correctness.
104 hits: AtomicU64,
105 misses: AtomicU64,
106 total_cached: AtomicU64,
107 invalidations: AtomicU64,
108 size: AtomicUsize,
109 memory_bytes: AtomicUsize,
110}
111
112/// Cache metrics for monitoring.
113///
114/// Exposed via API for observability and debugging.
115#[derive(Debug, Clone, Serialize, Deserialize)]
116pub struct CacheMetrics {
117 /// Number of cache hits (returned cached result).
118 pub hits: u64,
119
120 /// Number of cache misses (executed query).
121 pub misses: u64,
122
123 /// Total entries cached across all time.
124 pub total_cached: u64,
125
126 /// Number of invalidations triggered.
127 pub invalidations: u64,
128
129 /// Current size of cache (number of entries).
130 pub size: usize,
131
132 /// Estimated memory usage in bytes.
133 ///
134 /// This is a rough estimate based on cache key lengths and entry counts.
135 /// Actual memory usage may vary based on result sizes.
136 pub memory_bytes: usize,
137}
138
139impl QueryResultCache {
140 /// Create new cache with configuration.
141 ///
142 /// # Panics
143 ///
144 /// Panics if `config.max_entries` is 0 (invalid configuration).
145 ///
146 /// # Example
147 ///
148 /// ```rust
149 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
150 ///
151 /// let cache = QueryResultCache::new(CacheConfig::default());
152 /// ```
153 #[must_use]
154 pub fn new(config: CacheConfig) -> Self {
155 let max = NonZeroUsize::new(config.max_entries).expect("max_entries must be > 0");
156
157 Self {
158 cache: Arc::new(Mutex::new(LruCache::new(max))),
159 config,
160 hits: AtomicU64::new(0),
161 misses: AtomicU64::new(0),
162 total_cached: AtomicU64::new(0),
163 invalidations: AtomicU64::new(0),
164 size: AtomicUsize::new(0),
165 memory_bytes: AtomicUsize::new(0),
166 }
167 }
168
169 /// Get cached result by key.
170 ///
171 /// Returns `None` if:
172 /// - Caching is disabled (`config.enabled = false`)
173 /// - Entry not in cache (cache miss)
174 /// - Entry expired (TTL exceeded)
175 ///
176 /// # Errors
177 ///
178 /// Returns error if cache mutex is poisoned (should never happen).
179 ///
180 /// # Example
181 ///
182 /// ```rust
183 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
184 ///
185 /// let cache = QueryResultCache::new(CacheConfig::default());
186 ///
187 /// if let Some(result) = cache.get("cache_key_abc123")? {
188 /// // Cache hit - use result
189 /// println!("Found {} results in cache", result.len());
190 /// } else {
191 /// // Cache miss - execute query
192 /// println!("Cache miss, executing query");
193 /// }
194 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
195 /// ```
196 /// Returns whether caching is enabled.
197 ///
198 /// Used by `CachedDatabaseAdapter` to short-circuit the SHA-256 key generation
199 /// and result clone overhead when caching is disabled.
200 #[must_use]
201 pub fn is_enabled(&self) -> bool {
202 self.config.enabled
203 }
204
205 pub fn get(&self, cache_key: &str) -> Result<Option<Arc<Vec<JsonbValue>>>> {
206 if !self.config.enabled {
207 return Ok(None);
208 }
209
210 let mut cache = self.cache.lock().map_err(|e| FraiseQLError::Internal {
211 message: format!("Cache lock poisoned: {e}"),
212 source: None,
213 })?;
214
215 if let Some(cached) = cache.get_mut(cache_key) {
216 // Check TTL
217 let now = current_timestamp();
218 if now - cached.cached_at > self.config.ttl_seconds {
219 // Expired: remove and count as miss
220 cache.pop(cache_key);
221 let new_size = cache.len();
222 drop(cache); // Release LRU lock before atomic updates
223 self.size.store(new_size, Ordering::Relaxed);
224 self.misses.fetch_add(1, Ordering::Relaxed);
225 return Ok(None);
226 }
227
228 // Cache hit: clone the Arc (zero-copy) while still holding the LRU lock
229 cached.hit_count += 1;
230 let result = cached.result.clone();
231 drop(cache); // Release LRU lock before atomic update
232 self.hits.fetch_add(1, Ordering::Relaxed);
233 Ok(Some(result))
234 } else {
235 drop(cache); // Release LRU lock before atomic update
236 self.misses.fetch_add(1, Ordering::Relaxed);
237 Ok(None)
238 }
239 }
240
241 /// Store query result in cache.
242 ///
243 /// If caching is disabled, this is a no-op.
244 ///
245 /// # Arguments
246 ///
247 /// * `cache_key` - Cache key (from `generate_cache_key()`)
248 /// * `result` - Query result to cache
249 /// * `accessed_views` - List of views accessed by this query
250 ///
251 /// # Errors
252 ///
253 /// Returns error if cache mutex is poisoned.
254 ///
255 /// # Example
256 ///
257 /// ```rust
258 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
259 /// use fraiseql_core::db::types::JsonbValue;
260 /// use serde_json::json;
261 ///
262 /// let cache = QueryResultCache::new(CacheConfig::default());
263 ///
264 /// let result = vec![JsonbValue::new(json!({"id": 1}))];
265 /// cache.put(
266 /// "cache_key_abc123".to_string(),
267 /// result,
268 /// vec!["v_user".to_string()]
269 /// )?;
270 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
271 /// ```
272 pub fn put(
273 &self,
274 cache_key: String,
275 result: Vec<JsonbValue>,
276 accessed_views: Vec<String>,
277 ) -> Result<()> {
278 if !self.config.enabled {
279 return Ok(());
280 }
281
282 let now = current_timestamp();
283 let memory_size = std::mem::size_of::<CachedResult>() + cache_key.len() * 2;
284
285 let cached = CachedResult {
286 result: Arc::new(result),
287 accessed_views,
288 cached_at: now,
289 hit_count: 0,
290 };
291
292 let mut cache = self.cache.lock().map_err(|e| FraiseQLError::Internal {
293 message: format!("Cache lock poisoned: {e}"),
294 source: None,
295 })?;
296 cache.put(cache_key, cached);
297 let new_size = cache.len();
298 drop(cache); // Release LRU lock before atomic updates
299
300 self.total_cached.fetch_add(1, Ordering::Relaxed);
301 self.size.store(new_size, Ordering::Relaxed);
302 self.memory_bytes.fetch_add(memory_size, Ordering::Relaxed);
303
304 Ok(())
305 }
306
307 /// Invalidate entries accessing specified views.
308 ///
309 /// Called after mutations to invalidate affected cache entries.
310 ///
311 /// # Arguments
312 ///
313 /// * `views` - List of view/table names modified by mutation
314 ///
315 /// # Returns
316 ///
317 /// Number of cache entries invalidated
318 ///
319 /// # Errors
320 ///
321 /// Returns error if cache mutex is poisoned.
322 ///
323 /// # Example
324 ///
325 /// ```rust
326 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
327 ///
328 /// let cache = QueryResultCache::new(CacheConfig::default());
329 ///
330 /// // After createUser mutation
331 /// let invalidated = cache.invalidate_views(&["v_user".to_string()])?;
332 /// println!("Invalidated {} cache entries", invalidated);
333 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
334 /// ```
335 pub fn invalidate_views(&self, views: &[String]) -> Result<u64> {
336 let mut cache = self.cache.lock().map_err(|e| FraiseQLError::Internal {
337 message: format!("Cache lock poisoned: {e}"),
338 source: None,
339 })?;
340
341 // Collect keys to remove (can't modify during iteration)
342 let keys_to_remove: Vec<String> = cache
343 .iter()
344 .filter(|(_, cached)| cached.accessed_views.iter().any(|v| views.contains(v)))
345 .map(|(k, _)| k.clone())
346 .collect();
347
348 for key in &keys_to_remove {
349 cache.pop(key);
350 }
351
352 let new_size = cache.len();
353 let invalidated_count = keys_to_remove.len() as u64;
354 drop(cache); // Release LRU lock before atomic updates
355
356 self.invalidations.fetch_add(invalidated_count, Ordering::Relaxed);
357 self.size.store(new_size, Ordering::Relaxed);
358
359 Ok(invalidated_count)
360 }
361
362 /// Get cache metrics snapshot.
363 ///
364 /// Returns a consistent snapshot of current counters. Individual fields may
365 /// be updated independently (atomics), so the snapshot is not a single
366 /// atomic transaction, but is accurate enough for monitoring.
367 ///
368 /// # Errors
369 ///
370 /// Always returns `Ok`. The `Result` return type is kept for API compatibility.
371 ///
372 /// # Example
373 ///
374 /// ```rust
375 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
376 ///
377 /// let cache = QueryResultCache::new(CacheConfig::default());
378 /// let metrics = cache.metrics()?;
379 ///
380 /// println!("Hit rate: {:.1}%", metrics.hit_rate() * 100.0);
381 /// println!("Size: {} / {} entries", metrics.size, 10_000);
382 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
383 /// ```
384 pub fn metrics(&self) -> Result<CacheMetrics> {
385 Ok(CacheMetrics {
386 hits: self.hits.load(Ordering::Relaxed),
387 misses: self.misses.load(Ordering::Relaxed),
388 total_cached: self.total_cached.load(Ordering::Relaxed),
389 invalidations: self.invalidations.load(Ordering::Relaxed),
390 size: self.size.load(Ordering::Relaxed),
391 memory_bytes: self.memory_bytes.load(Ordering::Relaxed),
392 })
393 }
394
395 /// Clear all cache entries.
396 ///
397 /// Used for testing and manual cache flush.
398 ///
399 /// # Errors
400 ///
401 /// Returns error if cache mutex is poisoned.
402 ///
403 /// # Example
404 ///
405 /// ```rust
406 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
407 ///
408 /// let cache = QueryResultCache::new(CacheConfig::default());
409 /// cache.clear()?;
410 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
411 /// ```
412 pub fn clear(&self) -> Result<()> {
413 self.cache
414 .lock()
415 .map_err(|e| FraiseQLError::Internal {
416 message: format!("Cache lock poisoned: {e}"),
417 source: None,
418 })?
419 .clear();
420
421 self.size.store(0, Ordering::Relaxed);
422 self.memory_bytes.store(0, Ordering::Relaxed);
423
424 Ok(())
425 }
426}
427
428impl CacheMetrics {
429 /// Calculate cache hit rate.
430 ///
431 /// Returns ratio of hits to total requests (0.0 to 1.0).
432 ///
433 /// # Returns
434 ///
435 /// - `1.0` if all requests were hits
436 /// - `0.0` if all requests were misses
437 /// - `0.0` if no requests yet
438 ///
439 /// # Example
440 ///
441 /// ```rust
442 /// use fraiseql_core::cache::CacheMetrics;
443 ///
444 /// let metrics = CacheMetrics {
445 /// hits: 80,
446 /// misses: 20,
447 /// total_cached: 100,
448 /// invalidations: 5,
449 /// size: 95,
450 /// memory_bytes: 1_000_000,
451 /// };
452 ///
453 /// assert_eq!(metrics.hit_rate(), 0.8); // 80% hit rate
454 /// ```
455 #[must_use]
456 pub fn hit_rate(&self) -> f64 {
457 let total = self.hits + self.misses;
458 if total == 0 {
459 return 0.0;
460 }
461 self.hits as f64 / total as f64
462 }
463
464 /// Check if cache is performing well.
465 ///
466 /// Returns `true` if hit rate is above 60% (reasonable threshold).
467 ///
468 /// # Example
469 ///
470 /// ```rust
471 /// use fraiseql_core::cache::CacheMetrics;
472 ///
473 /// let good_metrics = CacheMetrics {
474 /// hits: 80,
475 /// misses: 20,
476 /// total_cached: 100,
477 /// invalidations: 5,
478 /// size: 95,
479 /// memory_bytes: 1_000_000,
480 /// };
481 ///
482 /// assert!(good_metrics.is_healthy()); // 80% > 60%
483 /// ```
484 #[must_use]
485 pub fn is_healthy(&self) -> bool {
486 self.hit_rate() > 0.6
487 }
488}
489
490/// Get current Unix timestamp in seconds.
491fn current_timestamp() -> u64 {
492 SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs()
493}
494
495#[cfg(test)]
496mod tests {
497 use serde_json::json;
498
499 use super::*;
500
501 // Helper to create test result
502 fn test_result() -> Vec<JsonbValue> {
503 vec![JsonbValue::new(json!({"id": 1, "name": "test"}))]
504 }
505
506 // ========================================================================
507 // Cache Hit/Miss Tests
508 // ========================================================================
509
510 #[test]
511 fn test_cache_miss() {
512 let cache = QueryResultCache::new(CacheConfig::enabled());
513
514 let result = cache.get("nonexistent_key").unwrap();
515 assert!(result.is_none(), "Should be cache miss");
516
517 let metrics = cache.metrics().unwrap();
518 assert_eq!(metrics.misses, 1);
519 assert_eq!(metrics.hits, 0);
520 }
521
522 #[test]
523 fn test_cache_put_and_get() {
524 let cache = QueryResultCache::new(CacheConfig::enabled());
525 let result = test_result();
526
527 // Put
528 cache
529 .put("key1".to_string(), result.clone(), vec!["v_user".to_string()])
530 .unwrap();
531
532 // Get
533 let cached = cache.get("key1").unwrap();
534 assert!(cached.is_some(), "Should be cache hit");
535 assert_eq!(cached.unwrap().len(), 1);
536
537 let metrics = cache.metrics().unwrap();
538 assert_eq!(metrics.hits, 1);
539 assert_eq!(metrics.misses, 0);
540 assert_eq!(metrics.total_cached, 1);
541 }
542
543 #[test]
544 fn test_cache_hit_updates_hit_count() {
545 let cache = QueryResultCache::new(CacheConfig::enabled());
546
547 cache
548 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
549 .unwrap();
550
551 // First hit
552 cache.get("key1").unwrap();
553 // Second hit
554 cache.get("key1").unwrap();
555
556 let metrics = cache.metrics().unwrap();
557 assert_eq!(metrics.hits, 2);
558 }
559
560 // ========================================================================
561 // TTL Expiry Tests
562 // ========================================================================
563
564 #[test]
565 fn test_ttl_expiry() {
566 let config = CacheConfig {
567 ttl_seconds: 1, // 1 second TTL
568 enabled: true,
569 ..Default::default()
570 };
571
572 let cache = QueryResultCache::new(config);
573
574 cache
575 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
576 .unwrap();
577
578 // Wait for expiry
579 std::thread::sleep(std::time::Duration::from_secs(2));
580
581 // Should be expired
582 let result = cache.get("key1").unwrap();
583 assert!(result.is_none(), "Entry should be expired");
584
585 let metrics = cache.metrics().unwrap();
586 assert_eq!(metrics.misses, 1); // Expired counts as miss
587 }
588
589 #[test]
590 fn test_ttl_not_expired() {
591 let config = CacheConfig {
592 ttl_seconds: 3600, // 1 hour TTL
593 enabled: true,
594 ..Default::default()
595 };
596
597 let cache = QueryResultCache::new(config);
598
599 cache
600 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
601 .unwrap();
602
603 // Should still be valid
604 let result = cache.get("key1").unwrap();
605 assert!(result.is_some(), "Entry should not be expired");
606 }
607
608 // ========================================================================
609 // LRU Eviction Tests
610 // ========================================================================
611
612 #[test]
613 fn test_lru_eviction() {
614 let config = CacheConfig {
615 max_entries: 2, // Only 2 entries
616 enabled: true,
617 ..Default::default()
618 };
619
620 let cache = QueryResultCache::new(config);
621
622 // Add 3 entries (max is 2)
623 cache
624 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
625 .unwrap();
626 cache
627 .put("key2".to_string(), test_result(), vec!["v_user".to_string()])
628 .unwrap();
629 cache
630 .put("key3".to_string(), test_result(), vec!["v_user".to_string()])
631 .unwrap();
632
633 // key1 should be evicted (LRU)
634 assert!(cache.get("key1").unwrap().is_none(), "Oldest entry should be evicted");
635 assert!(cache.get("key2").unwrap().is_some());
636 assert!(cache.get("key3").unwrap().is_some());
637
638 let metrics = cache.metrics().unwrap();
639 assert_eq!(metrics.size, 2, "Cache size should be at max");
640 }
641
642 #[test]
643 fn test_lru_updates_on_access() {
644 let config = CacheConfig {
645 max_entries: 2,
646 enabled: true,
647 ..Default::default()
648 };
649
650 let cache = QueryResultCache::new(config);
651
652 cache
653 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
654 .unwrap();
655 cache
656 .put("key2".to_string(), test_result(), vec!["v_user".to_string()])
657 .unwrap();
658
659 // Access key1 (makes it recently used)
660 cache.get("key1").unwrap();
661
662 // Add key3 (should evict key2, not key1)
663 cache
664 .put("key3".to_string(), test_result(), vec!["v_user".to_string()])
665 .unwrap();
666
667 assert!(cache.get("key1").unwrap().is_some(), "key1 should remain (recently used)");
668 assert!(cache.get("key2").unwrap().is_none(), "key2 should be evicted (LRU)");
669 assert!(cache.get("key3").unwrap().is_some());
670 }
671
672 // ========================================================================
673 // Cache Disabled Tests
674 // ========================================================================
675
676 #[test]
677 fn test_cache_disabled() {
678 let config = CacheConfig::disabled();
679 let cache = QueryResultCache::new(config);
680
681 // Put should be no-op
682 cache
683 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
684 .unwrap();
685
686 // Get should return None
687 assert!(cache.get("key1").unwrap().is_none(), "Cache disabled should always miss");
688
689 let metrics = cache.metrics().unwrap();
690 assert_eq!(metrics.total_cached, 0);
691 }
692
693 // ========================================================================
694 // Invalidation Tests
695 // ========================================================================
696
697 #[test]
698 fn test_invalidate_single_view() {
699 let cache = QueryResultCache::new(CacheConfig::enabled());
700
701 cache
702 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
703 .unwrap();
704 cache
705 .put("key2".to_string(), test_result(), vec!["v_post".to_string()])
706 .unwrap();
707
708 // Invalidate v_user
709 let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
710 assert_eq!(invalidated, 1);
711
712 // v_user entry gone, v_post remains
713 assert!(cache.get("key1").unwrap().is_none());
714 assert!(cache.get("key2").unwrap().is_some());
715 }
716
717 #[test]
718 fn test_invalidate_multiple_views() {
719 let cache = QueryResultCache::new(CacheConfig::enabled());
720
721 cache
722 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
723 .unwrap();
724 cache
725 .put("key2".to_string(), test_result(), vec!["v_post".to_string()])
726 .unwrap();
727 cache
728 .put("key3".to_string(), test_result(), vec!["v_product".to_string()])
729 .unwrap();
730
731 // Invalidate v_user and v_post
732 let invalidated =
733 cache.invalidate_views(&["v_user".to_string(), "v_post".to_string()]).unwrap();
734 assert_eq!(invalidated, 2);
735
736 assert!(cache.get("key1").unwrap().is_none());
737 assert!(cache.get("key2").unwrap().is_none());
738 assert!(cache.get("key3").unwrap().is_some());
739 }
740
741 #[test]
742 fn test_invalidate_entry_with_multiple_views() {
743 let cache = QueryResultCache::new(CacheConfig::enabled());
744
745 // Entry accesses both v_user and v_post
746 cache
747 .put(
748 "key1".to_string(),
749 test_result(),
750 vec!["v_user".to_string(), "v_post".to_string()],
751 )
752 .unwrap();
753
754 // Invalidating either view should remove the entry
755 let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
756 assert_eq!(invalidated, 1);
757
758 assert!(cache.get("key1").unwrap().is_none());
759 }
760
761 #[test]
762 fn test_invalidate_nonexistent_view() {
763 let cache = QueryResultCache::new(CacheConfig::enabled());
764
765 cache
766 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
767 .unwrap();
768
769 // Invalidate view that doesn't exist
770 let invalidated = cache.invalidate_views(&["v_nonexistent".to_string()]).unwrap();
771 assert_eq!(invalidated, 0);
772
773 // Entry should remain
774 assert!(cache.get("key1").unwrap().is_some());
775 }
776
777 // ========================================================================
778 // Clear Tests
779 // ========================================================================
780
781 #[test]
782 fn test_clear() {
783 let cache = QueryResultCache::new(CacheConfig::enabled());
784
785 cache
786 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
787 .unwrap();
788 cache
789 .put("key2".to_string(), test_result(), vec!["v_post".to_string()])
790 .unwrap();
791
792 cache.clear().unwrap();
793
794 assert!(cache.get("key1").unwrap().is_none());
795 assert!(cache.get("key2").unwrap().is_none());
796
797 let metrics = cache.metrics().unwrap();
798 assert_eq!(metrics.size, 0);
799 }
800
801 // ========================================================================
802 // Metrics Tests
803 // ========================================================================
804
805 #[test]
806 fn test_metrics_tracking() {
807 let cache = QueryResultCache::new(CacheConfig::enabled());
808
809 // Miss
810 cache.get("NotThere").unwrap();
811
812 // Put
813 cache
814 .put("key1".to_string(), test_result(), vec!["v_user".to_string()])
815 .unwrap();
816
817 // Hit
818 cache.get("key1").unwrap();
819
820 let metrics = cache.metrics().unwrap();
821 assert_eq!(metrics.hits, 1);
822 assert_eq!(metrics.misses, 1);
823 assert_eq!(metrics.size, 1);
824 assert_eq!(metrics.total_cached, 1);
825 }
826
827 #[test]
828 fn test_metrics_hit_rate() {
829 let metrics = CacheMetrics {
830 hits: 80,
831 misses: 20,
832 total_cached: 100,
833 invalidations: 5,
834 size: 95,
835 memory_bytes: 1_000_000,
836 };
837
838 assert!((metrics.hit_rate() - 0.8).abs() < f64::EPSILON);
839 assert!(metrics.is_healthy());
840 }
841
842 #[test]
843 fn test_metrics_hit_rate_zero_requests() {
844 let metrics = CacheMetrics {
845 hits: 0,
846 misses: 0,
847 total_cached: 0,
848 invalidations: 0,
849 size: 0,
850 memory_bytes: 0,
851 };
852
853 assert!((metrics.hit_rate() - 0.0).abs() < f64::EPSILON);
854 assert!(!metrics.is_healthy());
855 }
856
857 #[test]
858 fn test_metrics_is_healthy() {
859 let good = CacheMetrics {
860 hits: 70,
861 misses: 30,
862 total_cached: 100,
863 invalidations: 5,
864 size: 95,
865 memory_bytes: 1_000_000,
866 };
867 assert!(good.is_healthy()); // 70% > 60%
868
869 let bad = CacheMetrics {
870 hits: 50,
871 misses: 50,
872 total_cached: 100,
873 invalidations: 5,
874 size: 95,
875 memory_bytes: 1_000_000,
876 };
877 assert!(!bad.is_healthy()); // 50% < 60%
878 }
879
880 // ========================================================================
881 // Thread Safety Tests
882 // ========================================================================
883
884 #[test]
885 fn test_concurrent_access() {
886 use std::{sync::Arc, thread};
887
888 let cache = Arc::new(QueryResultCache::new(CacheConfig::enabled()));
889
890 // Spawn multiple threads accessing cache
891 let handles: Vec<_> = (0..10)
892 .map(|i| {
893 let cache_clone = cache.clone();
894 thread::spawn(move || {
895 let key = format!("key{}", i);
896 cache_clone
897 .put(key.clone(), test_result(), vec!["v_user".to_string()])
898 .unwrap();
899 cache_clone.get(&key).unwrap();
900 })
901 })
902 .collect();
903
904 for handle in handles {
905 handle.join().unwrap();
906 }
907
908 let metrics = cache.metrics().unwrap();
909 assert_eq!(metrics.total_cached, 10);
910 assert_eq!(metrics.hits, 10);
911 }
912}