Skip to main content

fraiseql_core/cache/
result.rs

1//! Query result caching with LRU eviction and TTL expiry.
2//!
3//! This module provides a 64-shard striped LRU cache for GraphQL query results.
4//! Each shard holds `capacity / NUM_SHARDS` entries behind its own
5//! [`parking_lot::Mutex`], eliminating the single-lock bottleneck under high
6//! concurrency.
7//!
8//! ## Performance characteristics
9//!
10//! - **`get()` hot path** (cache hit): one shard lock, O(1) LRU promotion, `Arc` clone (single
11//!   atomic increment), one atomic counter bump.
12//! - **`put()` path**: early-exit guards (disabled / list / size / TTL=0) before touching any lock.
13//!   Entity-ID index is built outside the lock. Shard lock held only for the `push()` call.
14//! - **`metrics()`**: lazily computes `size` by scanning all shards. Called rarely (monitoring),
15//!   never on the query hot path.
16//! - **Invalidation**: iterates all shards (acceptable — mutations are rare).
17
18use std::{
19    collections::{HashMap, HashSet},
20    num::NonZeroUsize,
21    sync::{
22        Arc,
23        atomic::{AtomicU64, AtomicUsize, Ordering},
24    },
25};
26
27use lru::LruCache;
28use parking_lot::Mutex;
29use serde::{Deserialize, Serialize};
30
31use super::config::CacheConfig;
32use crate::{
33    db::types::JsonbValue,
34    error::Result,
35    utils::clock::{Clock, SystemClock},
36};
37
38/// Cached query result with metadata.
39///
40/// Stores the query result along with tracking information for
41/// TTL expiry, view-based invalidation, and monitoring.
42#[derive(Debug, Clone)]
43pub struct CachedResult {
44    /// The actual query result (JSONB array from database).
45    ///
46    /// Wrapped in `Arc` for cheap cloning on cache hits (zero-copy).
47    pub result: Arc<Vec<JsonbValue>>,
48
49    /// Which views/tables this query accesses.
50    ///
51    /// Format: `vec!["v_user", "v_post"]`
52    ///
53    /// Stored as a boxed slice (no excess capacity) since views are fixed
54    /// at `put()` time and never modified.
55    ///
56    /// Used for view-based invalidation when mutations modify these views.
57    pub accessed_views: Box<[String]>,
58
59    /// When this entry was cached (Unix timestamp in seconds).
60    ///
61    /// Used for TTL expiry check on access.
62    pub cached_at: u64,
63
64    /// Per-entry TTL in seconds.
65    ///
66    /// Overrides `CacheConfig::ttl_seconds` when set via `put(..., Some(ttl))`.
67    /// Enables per-query cache lifetimes (e.g., reference data lives 1 h,
68    /// live prices are never cached with `ttl = 0`).
69    pub ttl_seconds: u64,
70
71    /// Number of cache hits for this entry.
72    ///
73    /// Used for monitoring and optimization. Incremented on each `get()`.
74    pub hit_count: u64,
75
76    /// Entity UUID index for selective invalidation.
77    ///
78    /// Key: GraphQL entity type name (e.g. `"User"`).
79    /// Value: set of UUID strings present in this result's rows.
80    ///
81    /// Built at `put()` time by scanning each row for an `"id"` field. Used by
82    /// `invalidate_by_entity()` to evict only the entries that actually contain
83    /// a specific entity, leaving unrelated entries warm.
84    pub entity_ids: HashMap<String, HashSet<String>>,
85}
86
87/// Number of shards for the striped LRU cache.
88///
89/// 64 shards reduce mutex contention to ~1/64 under uniform key distribution.
90const NUM_SHARDS: usize = 64;
91
92/// Thread-safe 64-shard striped LRU cache for query results.
93///
94/// # Thread Safety
95///
96/// Each shard is an independent `parking_lot::Mutex<LruCache>` holding
97/// `capacity / 64` entries. Concurrent requests that hash to different shards
98/// never contend on the same lock. `parking_lot::Mutex` is used over
99/// `std::sync::Mutex` for:
100/// - **No poisoning**: a panic in one thread does not permanently break the cache
101/// - **Smaller footprint**: 1 byte vs 40 bytes per mutex on Linux
102/// - **Faster lock/unlock**: optimized futex-based implementation
103///
104/// Metrics counters use `AtomicU64` / `AtomicUsize` so no second lock is
105/// acquired in the hot path.
106///
107/// # Memory Safety
108///
109/// - **Hard LRU limit**: Each shard evicts least-recently-used entries independently
110/// - **TTL expiry**: Entries older than `ttl_seconds` are considered expired and removed on next
111///   access
112/// - **Memory tracking**: `memory_bytes` tracked via atomic add/sub; `size` computed lazily in
113///   `metrics()`
114///
115/// # Example
116///
117/// ```rust
118/// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
119/// use fraiseql_core::db::types::JsonbValue;
120/// use serde_json::json;
121///
122/// let cache = QueryResultCache::new(CacheConfig::default());
123///
124/// // Cache a result
125/// let result = vec![JsonbValue::new(json!({"id": 1, "name": "Alice"}))];
126/// cache.put(
127///     12345_u64,
128///     result.clone(),
129///     vec!["v_user".to_string()],
130///     None, // use global TTL
131///     None, // no entity type index
132/// ).unwrap();
133///
134/// // Retrieve from cache
135/// if let Some(cached) = cache.get(12345).unwrap() {
136///     println!("Cache hit! {} results", cached.len());
137/// }
138/// ```
139pub struct QueryResultCache {
140    /// Striped LRU shards: key is routed to `shards[key % len]`.
141    shards: Box<[Mutex<LruCache<u64, CachedResult>>]>,
142
143    /// Configuration (immutable after creation).
144    config: CacheConfig,
145
146    /// Clock for TTL expiry checks. Injectable for deterministic testing.
147    clock: Arc<dyn Clock>,
148
149    // Metrics counters — atomic so the hot `get()` path acquires only ONE shard
150    // lock, not two. `Relaxed` ordering is sufficient: these counters are
151    // independent and used only for monitoring, not for correctness.
152    hits:          AtomicU64,
153    misses:        AtomicU64,
154    total_cached:  AtomicU64,
155    invalidations: AtomicU64,
156    memory_bytes:  AtomicUsize,
157}
158
159/// Cache metrics for monitoring.
160///
161/// Exposed via API for observability and debugging.
162#[derive(Debug, Clone, Serialize, Deserialize)]
163pub struct CacheMetrics {
164    /// Number of cache hits (returned cached result).
165    pub hits: u64,
166
167    /// Number of cache misses (executed query).
168    pub misses: u64,
169
170    /// Total entries cached across all time.
171    pub total_cached: u64,
172
173    /// Number of invalidations triggered.
174    pub invalidations: u64,
175
176    /// Current size of cache (number of entries).
177    pub size: usize,
178
179    /// Estimated memory usage in bytes.
180    ///
181    /// This is a rough estimate based on cache key lengths and entry counts.
182    /// Actual memory usage may vary based on result sizes.
183    pub memory_bytes: usize,
184}
185
186/// Estimate the accounting overhead of one cache entry.
187///
188/// The LRU crate stores the key twice (once in the `HashMap`, once in the
189/// linked-list node). We add the `CachedResult` struct size.
190const fn entry_overhead() -> usize {
191    std::mem::size_of::<CachedResult>() + std::mem::size_of::<u64>() * 2
192}
193
194impl QueryResultCache {
195    /// Create new cache with configuration.
196    ///
197    /// # Panics
198    ///
199    /// Panics if `config.max_entries` is 0 (invalid configuration).
200    ///
201    /// # Example
202    ///
203    /// ```rust
204    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
205    ///
206    /// let cache = QueryResultCache::new(CacheConfig::default());
207    /// ```
208    #[must_use]
209    pub fn new(config: CacheConfig) -> Self {
210        Self::new_with_clock(config, Arc::new(SystemClock))
211    }
212
213    /// Create a cache with a custom clock for deterministic time-based testing.
214    ///
215    /// # Panics
216    ///
217    /// Panics if `config.max_entries` is 0.
218    #[must_use]
219    pub fn new_with_clock(config: CacheConfig, clock: Arc<dyn Clock>) -> Self {
220        assert!(config.max_entries > 0, "max_entries must be > 0");
221
222        // Use full sharding only when capacity is large enough (≥ NUM_SHARDS).
223        // Below that threshold, a single shard preserves exact global LRU ordering.
224        let num_shards = if config.max_entries >= NUM_SHARDS {
225            NUM_SHARDS
226        } else {
227            1
228        };
229        let per_shard = config.max_entries.div_ceil(num_shards);
230        // Reason: per_shard = max_entries.div_ceil(num_shards); max_entries > 0 is asserted above
231        // and num_shards is always ≥ 1, so per_shard ≥ 1 and NonZeroUsize::new cannot return None.
232        let per_shard_nz = NonZeroUsize::new(per_shard).expect("per_shard > 0");
233
234        let shards: Box<[_]> =
235            (0..num_shards).map(|_| Mutex::new(LruCache::new(per_shard_nz))).collect();
236
237        Self {
238            shards,
239            config,
240            clock,
241            hits: AtomicU64::new(0),
242            misses: AtomicU64::new(0),
243            total_cached: AtomicU64::new(0),
244            invalidations: AtomicU64::new(0),
245            memory_bytes: AtomicUsize::new(0),
246        }
247    }
248
249    /// Returns whether caching is enabled.
250    ///
251    /// Used by `CachedDatabaseAdapter` to short-circuit key generation
252    /// and result clone overhead when caching is disabled.
253    #[must_use]
254    pub const fn is_enabled(&self) -> bool {
255        self.config.enabled
256    }
257
258    /// Select the shard for a given cache key.
259    ///
260    /// The key is already a hash (u64), so we just modulo into `shard_count`
261    /// directly — no need to rehash.
262    #[inline]
263    #[allow(clippy::cast_possible_truncation)] // Reason: truncation is intentional; we only need a uniform index into shard_count
264    fn shard_for(&self, key: u64) -> &Mutex<LruCache<u64, CachedResult>> {
265        let idx = (key as usize) % self.shards.len();
266        &self.shards[idx]
267    }
268
269    /// Look up a cached result by its cache key.
270    ///
271    /// Returns `None` when caching is disabled or the key is not present or expired.
272    ///
273    /// # Errors
274    ///
275    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
276    /// The `Result` return type is kept for API compatibility.
277    pub fn get(&self, cache_key: u64) -> Result<Option<Arc<Vec<JsonbValue>>>> {
278        if !self.config.enabled {
279            return Ok(None);
280        }
281
282        let mut cache = self.shard_for(cache_key).lock();
283
284        if let Some(cached) = cache.get_mut(&cache_key) {
285            // Check TTL: use per-entry override, fall back to global config.
286            let now = self.clock.now_secs();
287            if now - cached.cached_at > cached.ttl_seconds {
288                // Expired: remove and count as miss.
289                cache.pop(&cache_key);
290                drop(cache); // Release shard lock before atomic updates
291
292                self.memory_bytes.fetch_sub(entry_overhead(), Ordering::Relaxed);
293                self.misses.fetch_add(1, Ordering::Relaxed);
294                return Ok(None);
295            }
296
297            // Cache hit: clone the Arc (zero-copy) while still holding the shard lock.
298            cached.hit_count += 1;
299            let result = cached.result.clone();
300            drop(cache); // Release shard lock before atomic update
301            self.hits.fetch_add(1, Ordering::Relaxed);
302            Ok(Some(result))
303        } else {
304            drop(cache); // Release shard lock before atomic update
305            self.misses.fetch_add(1, Ordering::Relaxed);
306            Ok(None)
307        }
308    }
309
310    /// Store query result in cache.
311    ///
312    /// If caching is disabled, this is a no-op.
313    ///
314    /// # Arguments
315    ///
316    /// * `cache_key` - Cache key (from `generate_cache_key()`)
317    /// * `result` - Query result to cache
318    /// * `accessed_views` - List of views accessed by this query
319    /// * `ttl_override` - Per-entry TTL in seconds; `None` uses `CacheConfig::ttl_seconds`
320    /// * `entity_type` - Optional GraphQL type name (e.g. `"User"`) for entity-ID indexing. When
321    ///   provided, each row's `"id"` field is extracted and stored in `entity_ids` so that
322    ///   `invalidate_by_entity()` can perform selective eviction.
323    ///
324    /// # Errors
325    ///
326    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
327    /// The `Result` return type is kept for API compatibility.
328    ///
329    /// # Example
330    ///
331    /// ```rust
332    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
333    /// use fraiseql_core::db::types::JsonbValue;
334    /// use serde_json::json;
335    ///
336    /// let cache = QueryResultCache::new(CacheConfig::default());
337    ///
338    /// let result = vec![JsonbValue::new(json!({"id": "uuid-1"}))];
339    /// cache.put(0xabc123, result, vec!["v_user".to_string()], None, Some("User"))?;
340    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
341    /// ```
342    pub fn put(
343        &self,
344        cache_key: u64,
345        result: Vec<JsonbValue>,
346        accessed_views: Vec<String>,
347        ttl_override: Option<u64>,
348        entity_type: Option<&str>,
349    ) -> Result<()> {
350        if !self.config.enabled {
351            return Ok(());
352        }
353
354        // Respect cache_list_queries: a result with more than one row is considered a list.
355        if !self.config.cache_list_queries && result.len() > 1 {
356            return Ok(());
357        }
358
359        // Enforce per-entry size limit: estimate entry size from serialized JSON.
360        if let Some(max_entry) = self.config.max_entry_bytes {
361            let estimated = serde_json::to_vec(&result).map_or(0, |v| v.len());
362            if estimated > max_entry {
363                return Ok(()); // silently skip oversized entries
364            }
365        }
366
367        // Enforce total cache size limit.
368        if let Some(max_total) = self.config.max_total_bytes {
369            let current = self.memory_bytes.load(Ordering::Relaxed);
370            if current >= max_total {
371                return Ok(()); // silently skip when budget is exhausted
372            }
373        }
374
375        let ttl_seconds = ttl_override.unwrap_or(self.config.ttl_seconds);
376
377        // TTL=0 means "never cache this entry" — skip storing it entirely.
378        if ttl_seconds == 0 {
379            return Ok(());
380        }
381
382        let now = self.clock.now_secs();
383        // Build entity-ID index outside the lock: scan rows for "id" fields.
384        let entity_ids = if let Some(etype) = entity_type {
385            let ids: HashSet<String> = result
386                .iter()
387                .filter_map(|row| {
388                    row.as_value().as_object()?.get("id")?.as_str().map(str::to_string)
389                })
390                .collect();
391            if ids.is_empty() {
392                HashMap::new()
393            } else {
394                HashMap::from([(etype.to_string(), ids)])
395            }
396        } else {
397            HashMap::new()
398        };
399
400        let cached = CachedResult {
401            result: Arc::new(result),
402            accessed_views: accessed_views.into_boxed_slice(),
403            cached_at: now,
404            ttl_seconds,
405            hit_count: 0,
406            entity_ids,
407        };
408
409        // --- Critical section: hold shard lock only for the insert ---
410        let mut guard = self.shard_for(cache_key).lock();
411        let evicted = guard.push(cache_key, cached);
412        drop(guard);
413        // --- End critical section ---
414
415        self.total_cached.fetch_add(1, Ordering::Relaxed);
416
417        // Adjust memory_bytes: add new entry, subtract evicted entry if any.
418        // push() returns Some((key, value)) when it evicts the LRU tail OR
419        // when the key already existed (replacement). Either way, we subtract.
420        // With u64 keys, entry overhead is constant — evicted and new are the same size.
421        if evicted.is_none() {
422            self.memory_bytes.fetch_add(entry_overhead(), Ordering::Relaxed);
423        }
424
425        Ok(())
426    }
427
428    /// Invalidate entries accessing specified views.
429    ///
430    /// Called after mutations to invalidate affected cache entries.
431    ///
432    /// # Arguments
433    ///
434    /// * `views` - List of view/table names modified by mutation
435    ///
436    /// # Returns
437    ///
438    /// Number of cache entries invalidated
439    ///
440    /// # Errors
441    ///
442    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
443    /// The `Result` return type is kept for API compatibility.
444    ///
445    /// # Example
446    ///
447    /// ```rust
448    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
449    ///
450    /// let cache = QueryResultCache::new(CacheConfig::default());
451    ///
452    /// // After createUser mutation
453    /// let invalidated = cache.invalidate_views(&["v_user".to_string()])?;
454    /// println!("Invalidated {} cache entries", invalidated);
455    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
456    /// ```
457    pub fn invalidate_views(&self, views: &[String]) -> Result<u64> {
458        let mut total_invalidated: u64 = 0;
459        let mut total_freed: usize = 0;
460
461        for shard in &*self.shards {
462            let mut cache = shard.lock();
463
464            let keys_to_remove: Vec<u64> = cache
465                .iter()
466                .filter(|(_, cached)| cached.accessed_views.iter().any(|v| views.contains(v)))
467                .map(|(k, _)| *k)
468                .collect();
469
470            let freed_bytes: usize = keys_to_remove.iter().map(|_| entry_overhead()).sum();
471
472            for key in &keys_to_remove {
473                cache.pop(key);
474            }
475
476            #[allow(clippy::cast_possible_truncation)]
477            // Reason: key count within a shard never exceeds u64
478            let count = keys_to_remove.len() as u64;
479            total_invalidated += count;
480            total_freed += freed_bytes;
481        }
482
483        self.invalidations.fetch_add(total_invalidated, Ordering::Relaxed);
484        self.memory_bytes.fetch_sub(
485            total_freed.min(self.memory_bytes.load(Ordering::Relaxed)),
486            Ordering::Relaxed,
487        );
488
489        Ok(total_invalidated)
490    }
491
492    /// Evict cache entries that contain a specific entity UUID.
493    ///
494    /// Scans all entries whose `entity_ids` index contains the given `entity_id`
495    /// under the given `entity_type` key, and removes them. Entries that do not
496    /// reference this entity are left untouched.
497    ///
498    /// # Arguments
499    ///
500    /// * `entity_type` - GraphQL type name (e.g. `"User"`)
501    /// * `entity_id`   - UUID string of the mutated entity
502    ///
503    /// # Returns
504    ///
505    /// Number of cache entries evicted.
506    ///
507    /// # Errors
508    ///
509    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
510    /// The `Result` return type is kept for API compatibility.
511    pub fn invalidate_by_entity(&self, entity_type: &str, entity_id: &str) -> Result<u64> {
512        let mut total_invalidated: u64 = 0;
513        let mut total_freed: usize = 0;
514
515        for shard in &*self.shards {
516            let mut cache = shard.lock();
517
518            let keys_to_remove: Vec<u64> = cache
519                .iter()
520                .filter(|(_, cached)| {
521                    cached.entity_ids.get(entity_type).is_some_and(|ids| ids.contains(entity_id))
522                })
523                .map(|(k, _)| *k)
524                .collect();
525
526            let freed_bytes: usize = keys_to_remove.iter().map(|_| entry_overhead()).sum();
527
528            for key in &keys_to_remove {
529                cache.pop(key);
530            }
531
532            #[allow(clippy::cast_possible_truncation)]
533            // Reason: key count within a shard never exceeds u64
534            let count = keys_to_remove.len() as u64;
535            total_invalidated += count;
536            total_freed += freed_bytes;
537        }
538
539        self.invalidations.fetch_add(total_invalidated, Ordering::Relaxed);
540        self.memory_bytes.fetch_sub(
541            total_freed.min(self.memory_bytes.load(Ordering::Relaxed)),
542            Ordering::Relaxed,
543        );
544
545        Ok(total_invalidated)
546    }
547
548    /// Get cache metrics snapshot.
549    ///
550    /// Returns a consistent snapshot of current counters. Individual fields may
551    /// be updated independently (atomics), so the snapshot is not a single
552    /// atomic transaction, but is accurate enough for monitoring.
553    ///
554    /// `size` is computed lazily by scanning all shards — this keeps the
555    /// `get()`/`put()` hot paths free of cross-shard coordination.
556    ///
557    /// # Errors
558    ///
559    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
560    /// The `Result` return type is kept for API compatibility.
561    ///
562    /// # Example
563    ///
564    /// ```rust
565    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
566    ///
567    /// let cache = QueryResultCache::new(CacheConfig::default());
568    /// let metrics = cache.metrics()?;
569    ///
570    /// println!("Hit rate: {:.1}%", metrics.hit_rate() * 100.0);
571    /// println!("Size: {} / {} entries", metrics.size, 10_000);
572    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
573    /// ```
574    pub fn metrics(&self) -> Result<CacheMetrics> {
575        // Compute size by scanning all shards. This is O(NUM_SHARDS) lock
576        // acquisitions but metrics() is called rarely (monitoring endpoints),
577        // never on the query hot path.
578        let size: usize = self.shards.iter().map(|s| s.lock().len()).sum();
579
580        Ok(CacheMetrics {
581            hits: self.hits.load(Ordering::Relaxed),
582            misses: self.misses.load(Ordering::Relaxed),
583            total_cached: self.total_cached.load(Ordering::Relaxed),
584            invalidations: self.invalidations.load(Ordering::Relaxed),
585            size,
586            memory_bytes: self.memory_bytes.load(Ordering::Relaxed),
587        })
588    }
589
590    /// Clear all cache entries.
591    ///
592    /// Used for testing and manual cache flush.
593    ///
594    /// # Errors
595    ///
596    /// This method is infallible with `parking_lot::Mutex` (no poisoning).
597    /// The `Result` return type is kept for API compatibility.
598    ///
599    /// # Example
600    ///
601    /// ```rust
602    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
603    ///
604    /// let cache = QueryResultCache::new(CacheConfig::default());
605    /// cache.clear()?;
606    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
607    /// ```
608    pub fn clear(&self) -> Result<()> {
609        for shard in &*self.shards {
610            shard.lock().clear();
611        }
612
613        self.memory_bytes.store(0, Ordering::Relaxed);
614
615        Ok(())
616    }
617}
618
619impl CacheMetrics {
620    /// Calculate cache hit rate.
621    ///
622    /// Returns ratio of hits to total requests (0.0 to 1.0).
623    ///
624    /// # Returns
625    ///
626    /// - `1.0` if all requests were hits
627    /// - `0.0` if all requests were misses
628    /// - `0.0` if no requests yet
629    ///
630    /// # Example
631    ///
632    /// ```rust
633    /// use fraiseql_core::cache::CacheMetrics;
634    ///
635    /// let metrics = CacheMetrics {
636    ///     hits: 80,
637    ///     misses: 20,
638    ///     total_cached: 100,
639    ///     invalidations: 5,
640    ///     size: 95,
641    ///     memory_bytes: 1_000_000,
642    /// };
643    ///
644    /// assert_eq!(metrics.hit_rate(), 0.8);  // 80% hit rate
645    /// ```
646    #[must_use]
647    pub fn hit_rate(&self) -> f64 {
648        let total = self.hits + self.misses;
649        if total == 0 {
650            return 0.0;
651        }
652        #[allow(clippy::cast_precision_loss)]
653        // Reason: hit-rate is a display metric; f64 precision loss on u64 counters is acceptable
654        {
655            self.hits as f64 / total as f64
656        }
657    }
658
659    /// Check if cache is performing well.
660    ///
661    /// Returns `true` if hit rate is above 60% (reasonable threshold).
662    ///
663    /// # Example
664    ///
665    /// ```rust
666    /// use fraiseql_core::cache::CacheMetrics;
667    ///
668    /// let good_metrics = CacheMetrics {
669    ///     hits: 80,
670    ///     misses: 20,
671    ///     total_cached: 100,
672    ///     invalidations: 5,
673    ///     size: 95,
674    ///     memory_bytes: 1_000_000,
675    /// };
676    ///
677    /// assert!(good_metrics.is_healthy());  // 80% > 60%
678    /// ```
679    #[must_use]
680    pub fn is_healthy(&self) -> bool {
681        self.hit_rate() > 0.6
682    }
683}
684
685#[cfg(test)]
686mod tests {
687    #![allow(clippy::unwrap_used)] // Reason: test code, panics are acceptable
688
689    use serde_json::json;
690
691    use super::*;
692
693    // Helper to create test result
694    fn test_result() -> Vec<JsonbValue> {
695        vec![JsonbValue::new(json!({"id": 1, "name": "test"}))]
696    }
697
698    // ========================================================================
699    // Cache Hit/Miss Tests
700    // ========================================================================
701
702    #[test]
703    fn test_cache_miss() {
704        let cache = QueryResultCache::new(CacheConfig::enabled());
705
706        let result = cache.get(999_u64).unwrap();
707        assert!(result.is_none(), "Should be cache miss");
708
709        let metrics = cache.metrics().unwrap();
710        assert_eq!(metrics.misses, 1);
711        assert_eq!(metrics.hits, 0);
712    }
713
714    #[test]
715    fn test_cache_put_and_get() {
716        let cache = QueryResultCache::new(CacheConfig::enabled());
717        let result = test_result();
718
719        // Put
720        cache.put(1_u64, result, vec!["v_user".to_string()], None, None).unwrap();
721
722        // Get
723        let cached = cache.get(1_u64).unwrap();
724        assert!(cached.is_some(), "Should be cache hit");
725        assert_eq!(cached.unwrap().len(), 1);
726
727        let metrics = cache.metrics().unwrap();
728        assert_eq!(metrics.hits, 1);
729        assert_eq!(metrics.misses, 0);
730        assert_eq!(metrics.total_cached, 1);
731    }
732
733    #[test]
734    fn test_cache_hit_updates_hit_count() {
735        let cache = QueryResultCache::new(CacheConfig::enabled());
736
737        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
738
739        // First hit
740        cache.get(1_u64).unwrap();
741        // Second hit
742        cache.get(1_u64).unwrap();
743
744        let metrics = cache.metrics().unwrap();
745        assert_eq!(metrics.hits, 2);
746    }
747
748    // ========================================================================
749    // TTL Expiry Tests
750    // ========================================================================
751
752    #[test]
753    fn test_ttl_expiry() {
754        let config = CacheConfig {
755            ttl_seconds: 1, // 1 second TTL
756            enabled: true,
757            ..Default::default()
758        };
759
760        let cache = QueryResultCache::new(config);
761
762        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
763
764        // Wait for expiry
765        std::thread::sleep(std::time::Duration::from_secs(2));
766
767        // Should be expired
768        let result = cache.get(1_u64).unwrap();
769        assert!(result.is_none(), "Entry should be expired");
770
771        let metrics = cache.metrics().unwrap();
772        assert_eq!(metrics.misses, 1); // Expired counts as miss
773    }
774
775    #[test]
776    fn test_per_entry_ttl_override_expires_early() {
777        // Global config has 1-hour TTL but entry overrides to 1 second
778        let config = CacheConfig {
779            ttl_seconds: 3600,
780            enabled: true,
781            ..Default::default()
782        };
783        let cache = QueryResultCache::new(config);
784
785        cache
786            .put(
787                1_u64,
788                test_result(),
789                vec!["v_ref".to_string()],
790                Some(1), // 1-second per-entry override
791                None,
792            )
793            .unwrap();
794
795        std::thread::sleep(std::time::Duration::from_secs(2));
796
797        let result = cache.get(1_u64).unwrap();
798        assert!(result.is_none(), "Entry with per-entry TTL=1s should have expired");
799    }
800
801    #[test]
802    fn test_per_entry_ttl_zero_never_cached() {
803        // TTL=0 means an entry is immediately expired on the first get()
804        let cache = QueryResultCache::new(CacheConfig::enabled());
805
806        cache
807            .put(1_u64, test_result(), vec!["v_live".to_string()], Some(0), None)
808            .unwrap();
809
810        let result = cache.get(1_u64).unwrap();
811        assert!(result.is_none(), "Entry with TTL=0 should be immediately expired");
812    }
813
814    #[test]
815    fn test_ttl_not_expired() {
816        let config = CacheConfig {
817            ttl_seconds: 3600, // 1 hour TTL
818            enabled: true,
819            ..Default::default()
820        };
821
822        let cache = QueryResultCache::new(config);
823
824        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
825
826        // Should still be valid
827        let result = cache.get(1_u64).unwrap();
828        assert!(result.is_some(), "Entry should not be expired");
829    }
830
831    // ========================================================================
832    // LRU Eviction Tests
833    // ========================================================================
834
835    #[test]
836    fn test_lru_eviction() {
837        let config = CacheConfig {
838            max_entries: 2, // Only 2 entries
839            enabled: true,
840            ..Default::default()
841        };
842
843        let cache = QueryResultCache::new(config);
844
845        // Add 3 entries (max is 2)
846        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
847        cache.put(2_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
848        cache.put(3_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
849
850        // key1 should be evicted (LRU)
851        assert!(cache.get(1_u64).unwrap().is_none(), "Oldest entry should be evicted");
852        assert!(cache.get(2_u64).unwrap().is_some());
853        assert!(cache.get(3_u64).unwrap().is_some());
854
855        let metrics = cache.metrics().unwrap();
856        assert_eq!(metrics.size, 2, "Cache size should be at max");
857    }
858
859    #[test]
860    fn test_lru_updates_on_access() {
861        let config = CacheConfig {
862            max_entries: 2,
863            enabled: true,
864            ..Default::default()
865        };
866
867        let cache = QueryResultCache::new(config);
868
869        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
870        cache.put(2_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
871
872        // Access key1 (makes it recently used)
873        cache.get(1_u64).unwrap();
874
875        // Add key3 (should evict key2, not key1)
876        cache.put(3_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
877
878        assert!(cache.get(1_u64).unwrap().is_some(), "key1 should remain (recently used)");
879        assert!(cache.get(2_u64).unwrap().is_none(), "key2 should be evicted (LRU)");
880        assert!(cache.get(3_u64).unwrap().is_some());
881    }
882
883    // ========================================================================
884    // Cache Disabled Tests
885    // ========================================================================
886
887    #[test]
888    fn test_cache_disabled() {
889        let config = CacheConfig::disabled();
890        let cache = QueryResultCache::new(config);
891
892        // Put should be no-op
893        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
894
895        // Get should return None
896        assert!(cache.get(1_u64).unwrap().is_none(), "Cache disabled should always miss");
897
898        let metrics = cache.metrics().unwrap();
899        assert_eq!(metrics.total_cached, 0);
900    }
901
902    // ========================================================================
903    // Invalidation Tests
904    // ========================================================================
905
906    #[test]
907    fn test_invalidate_single_view() {
908        let cache = QueryResultCache::new(CacheConfig::enabled());
909
910        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
911        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
912
913        // Invalidate v_user
914        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
915        assert_eq!(invalidated, 1);
916
917        // v_user entry gone, v_post remains
918        assert!(cache.get(1_u64).unwrap().is_none());
919        assert!(cache.get(2_u64).unwrap().is_some());
920    }
921
922    #[test]
923    fn test_invalidate_multiple_views() {
924        let cache = QueryResultCache::new(CacheConfig::enabled());
925
926        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
927        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
928        cache
929            .put(3_u64, test_result(), vec!["v_product".to_string()], None, None)
930            .unwrap();
931
932        // Invalidate v_user and v_post
933        let invalidated =
934            cache.invalidate_views(&["v_user".to_string(), "v_post".to_string()]).unwrap();
935        assert_eq!(invalidated, 2);
936
937        assert!(cache.get(1_u64).unwrap().is_none());
938        assert!(cache.get(2_u64).unwrap().is_none());
939        assert!(cache.get(3_u64).unwrap().is_some());
940    }
941
942    #[test]
943    fn test_invalidate_entry_with_multiple_views() {
944        let cache = QueryResultCache::new(CacheConfig::enabled());
945
946        // Entry accesses both v_user and v_post
947        cache
948            .put(
949                1_u64,
950                test_result(),
951                vec!["v_user".to_string(), "v_post".to_string()],
952                None,
953                None,
954            )
955            .unwrap();
956
957        // Invalidating either view should remove the entry
958        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
959        assert_eq!(invalidated, 1);
960
961        assert!(cache.get(1_u64).unwrap().is_none());
962    }
963
964    #[test]
965    fn test_invalidate_nonexistent_view() {
966        let cache = QueryResultCache::new(CacheConfig::enabled());
967
968        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
969
970        // Invalidate view that doesn't exist
971        let invalidated = cache.invalidate_views(&["v_nonexistent".to_string()]).unwrap();
972        assert_eq!(invalidated, 0);
973
974        // Entry should remain
975        assert!(cache.get(1_u64).unwrap().is_some());
976    }
977
978    // ========================================================================
979    // Clear Tests
980    // ========================================================================
981
982    #[test]
983    fn test_clear() {
984        let cache = QueryResultCache::new(CacheConfig::enabled());
985
986        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
987        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
988
989        cache.clear().unwrap();
990
991        assert!(cache.get(1_u64).unwrap().is_none());
992        assert!(cache.get(2_u64).unwrap().is_none());
993
994        let metrics = cache.metrics().unwrap();
995        assert_eq!(metrics.size, 0);
996    }
997
998    // ========================================================================
999    // Metrics Tests
1000    // ========================================================================
1001
1002    #[test]
1003    fn test_metrics_tracking() {
1004        let cache = QueryResultCache::new(CacheConfig::enabled());
1005
1006        // Miss
1007        cache.get(999_u64).unwrap();
1008
1009        // Put
1010        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1011
1012        // Hit
1013        cache.get(1_u64).unwrap();
1014
1015        let metrics = cache.metrics().unwrap();
1016        assert_eq!(metrics.hits, 1);
1017        assert_eq!(metrics.misses, 1);
1018        assert_eq!(metrics.size, 1);
1019        assert_eq!(metrics.total_cached, 1);
1020    }
1021
1022    #[test]
1023    fn test_metrics_hit_rate() {
1024        let metrics = CacheMetrics {
1025            hits:          80,
1026            misses:        20,
1027            total_cached:  100,
1028            invalidations: 5,
1029            size:          95,
1030            memory_bytes:  1_000_000,
1031        };
1032
1033        assert!((metrics.hit_rate() - 0.8).abs() < f64::EPSILON);
1034        assert!(metrics.is_healthy());
1035    }
1036
1037    #[test]
1038    fn test_metrics_hit_rate_zero_requests() {
1039        let metrics = CacheMetrics {
1040            hits:          0,
1041            misses:        0,
1042            total_cached:  0,
1043            invalidations: 0,
1044            size:          0,
1045            memory_bytes:  0,
1046        };
1047
1048        assert!((metrics.hit_rate() - 0.0).abs() < f64::EPSILON);
1049        assert!(!metrics.is_healthy());
1050    }
1051
1052    #[test]
1053    fn test_metrics_is_healthy() {
1054        let good = CacheMetrics {
1055            hits:          70,
1056            misses:        30,
1057            total_cached:  100,
1058            invalidations: 5,
1059            size:          95,
1060            memory_bytes:  1_000_000,
1061        };
1062        assert!(good.is_healthy()); // 70% > 60%
1063
1064        let bad = CacheMetrics {
1065            hits:          50,
1066            misses:        50,
1067            total_cached:  100,
1068            invalidations: 5,
1069            size:          95,
1070            memory_bytes:  1_000_000,
1071        };
1072        assert!(!bad.is_healthy()); // 50% < 60%
1073    }
1074
1075    // ========================================================================
1076    // Entity-Aware Invalidation Tests
1077    // ========================================================================
1078
1079    fn entity_result(id: &str) -> Vec<JsonbValue> {
1080        vec![JsonbValue::new(
1081            serde_json::json!({"id": id, "name": "test"}),
1082        )]
1083    }
1084
1085    #[test]
1086    fn test_invalidate_by_entity_only_removes_matching_entries() {
1087        let cache = QueryResultCache::new(CacheConfig::enabled());
1088
1089        // Cache User A and User B as separate entries
1090        cache
1091            .put(1_u64, entity_result("uuid-a"), vec!["v_user".to_string()], None, Some("User"))
1092            .unwrap();
1093        cache
1094            .put(2_u64, entity_result("uuid-b"), vec!["v_user".to_string()], None, Some("User"))
1095            .unwrap();
1096
1097        // Invalidate User A — User B must remain
1098        let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1099        assert_eq!(evicted, 1);
1100        assert!(cache.get(1_u64).unwrap().is_none(), "User A should be evicted");
1101        assert!(cache.get(2_u64).unwrap().is_some(), "User B should remain");
1102    }
1103
1104    #[test]
1105    fn test_invalidate_by_entity_removes_list_containing_entity() {
1106        let cache = QueryResultCache::new(CacheConfig::enabled());
1107
1108        // Cache a "users list" entry that contains both A and B
1109        let list = vec![
1110            JsonbValue::new(serde_json::json!({"id": "uuid-a", "name": "Alice"})),
1111            JsonbValue::new(serde_json::json!({"id": "uuid-b", "name": "Bob"})),
1112        ];
1113        cache.put(1_u64, list, vec!["v_user".to_string()], None, Some("User")).unwrap();
1114
1115        // Invalidate by User A — the list entry contains A, so it must be evicted
1116        let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1117        assert_eq!(evicted, 1);
1118        assert!(cache.get(1_u64).unwrap().is_none(), "List containing A should be evicted");
1119    }
1120
1121    #[test]
1122    fn test_invalidate_by_entity_leaves_unrelated_types() {
1123        let cache = QueryResultCache::new(CacheConfig::enabled());
1124
1125        // Cache a User entry and a Post entry
1126        cache
1127            .put(
1128                1_u64,
1129                entity_result("uuid-user"),
1130                vec!["v_user".to_string()],
1131                None,
1132                Some("User"),
1133            )
1134            .unwrap();
1135        cache
1136            .put(
1137                2_u64,
1138                entity_result("uuid-post"),
1139                vec!["v_post".to_string()],
1140                None,
1141                Some("Post"),
1142            )
1143            .unwrap();
1144
1145        // Invalidate the User — Post entry must remain untouched
1146        let evicted = cache.invalidate_by_entity("User", "uuid-user").unwrap();
1147        assert_eq!(evicted, 1);
1148        assert!(cache.get(1_u64).unwrap().is_none(), "User entry should be evicted");
1149        assert!(cache.get(2_u64).unwrap().is_some(), "Post entry should remain");
1150    }
1151
1152    #[test]
1153    fn test_put_builds_entity_id_index() {
1154        let cache = QueryResultCache::new(CacheConfig::enabled());
1155
1156        let rows = vec![
1157            JsonbValue::new(serde_json::json!({"id": "uuid-1", "name": "Alice"})),
1158            JsonbValue::new(serde_json::json!({"id": "uuid-2", "name": "Bob"})),
1159        ];
1160        cache.put(1_u64, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1161
1162        // Invalidating by uuid-1 should evict the entry
1163        let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1164        assert_eq!(evicted, 1);
1165        assert!(cache.get(1_u64).unwrap().is_none());
1166    }
1167
1168    #[test]
1169    fn test_put_without_entity_type_not_indexed() {
1170        let cache = QueryResultCache::new(CacheConfig::enabled());
1171
1172        cache
1173            .put(
1174                1_u64,
1175                entity_result("uuid-1"),
1176                vec!["v_user".to_string()],
1177                None,
1178                None, // no entity type
1179            )
1180            .unwrap();
1181
1182        // invalidate_by_entity should not match (no index was built)
1183        let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1184        assert_eq!(evicted, 0);
1185        assert!(cache.get(1_u64).unwrap().is_some(), "Non-indexed entry should remain");
1186    }
1187
1188    // ========================================================================
1189    // Thread Safety Tests
1190    // ========================================================================
1191
1192    #[test]
1193    fn test_concurrent_access() {
1194        use std::{sync::Arc, thread};
1195
1196        let cache = Arc::new(QueryResultCache::new(CacheConfig::enabled()));
1197
1198        // Spawn multiple threads accessing cache
1199        let handles: Vec<_> = (0_u64..10)
1200            .map(|key| {
1201                let cache_clone = cache.clone();
1202                thread::spawn(move || {
1203                    cache_clone
1204                        .put(key, test_result(), vec!["v_user".to_string()], None, None)
1205                        .unwrap();
1206                    cache_clone.get(key).unwrap();
1207                })
1208            })
1209            .collect();
1210
1211        for handle in handles {
1212            handle.join().unwrap();
1213        }
1214
1215        let metrics = cache.metrics().unwrap();
1216        assert_eq!(metrics.total_cached, 10);
1217        assert_eq!(metrics.hits, 10);
1218    }
1219
1220    // ========================================================================
1221    // Sentinel tests — boundary guards for mutation testing
1222    // ========================================================================
1223
1224    /// Sentinel: `cache_list_queries = false` must skip results with >1 row.
1225    ///
1226    /// Kills the `> → >=` mutation at the list-query guard: `result.len() > 1`.
1227    #[test]
1228    fn test_cache_list_queries_false_skips_multi_row() {
1229        let config = CacheConfig {
1230            enabled: true,
1231            cache_list_queries: false,
1232            ..CacheConfig::default()
1233        };
1234        let cache = QueryResultCache::new(config);
1235
1236        // Two-row result: must be skipped (killed by > → >= mutant)
1237        let two_rows = vec![
1238            JsonbValue::new(json!({"id": 1})),
1239            JsonbValue::new(json!({"id": 2})),
1240        ];
1241        cache.put(1_u64, two_rows, vec!["v_user".to_string()], None, None).unwrap();
1242        assert!(
1243            cache.get(1_u64).unwrap().is_none(),
1244            "multi-row result must not be cached when cache_list_queries=false"
1245        );
1246    }
1247
1248    /// Sentinel: `cache_list_queries = false` must still store single-row results.
1249    ///
1250    /// Complements the above: the single-row path must remain unaffected.
1251    #[test]
1252    fn test_cache_list_queries_false_allows_single_row() {
1253        let config = CacheConfig {
1254            enabled: true,
1255            cache_list_queries: false,
1256            ..CacheConfig::default()
1257        };
1258        let cache = QueryResultCache::new(config);
1259
1260        // One-row result: must be stored
1261        let one_row = vec![JsonbValue::new(json!({"id": 1}))];
1262        cache.put(1_u64, one_row, vec!["v_user".to_string()], None, None).unwrap();
1263        assert!(
1264            cache.get(1_u64).unwrap().is_some(),
1265            "single-row result must be cached even when cache_list_queries=false"
1266        );
1267    }
1268
1269    /// Sentinel: entries exceeding `max_entry_bytes` must be silently skipped.
1270    ///
1271    /// Kills mutations on the `estimated > max_entry` guard.
1272    #[test]
1273    fn test_max_entry_bytes_skips_oversized_entry() {
1274        let config = CacheConfig {
1275            enabled: true,
1276            max_entry_bytes: Some(10), // 10 bytes — smaller than any JSON row
1277            ..CacheConfig::default()
1278        };
1279        let cache = QueryResultCache::new(config);
1280
1281        // A typical row serialises to far more than 10 bytes
1282        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1283        assert!(cache.get(1_u64).unwrap().is_none(), "oversized entry must be silently skipped");
1284    }
1285
1286    /// Sentinel: entries within `max_entry_bytes` must be stored normally.
1287    ///
1288    /// Complements the above to pin both sides of the size boundary.
1289    #[test]
1290    fn test_max_entry_bytes_allows_small_entry() {
1291        let config = CacheConfig {
1292            enabled: true,
1293            max_entry_bytes: Some(100_000), // 100 KB — plenty for a test row
1294            ..CacheConfig::default()
1295        };
1296        let cache = QueryResultCache::new(config);
1297
1298        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1299        assert!(
1300            cache.get(1_u64).unwrap().is_some(),
1301            "small entry must be cached when within max_entry_bytes"
1302        );
1303    }
1304
1305    /// Sentinel: `put()` must skip new entries when `max_total_bytes` budget is exhausted.
1306    ///
1307    /// Kills mutations on the `current >= max_total` guard.
1308    #[test]
1309    fn test_max_total_bytes_skips_when_budget_exhausted() {
1310        let config = CacheConfig {
1311            enabled: true,
1312            max_total_bytes: Some(0), // 0 bytes — always exhausted
1313            ..CacheConfig::default()
1314        };
1315        let cache = QueryResultCache::new(config);
1316
1317        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1318        assert!(
1319            cache.get(1_u64).unwrap().is_none(),
1320            "entry must be skipped when max_total_bytes budget is already exhausted"
1321        );
1322    }
1323
1324    // ========================================================================
1325    // Sharding Tests
1326    // ========================================================================
1327
1328    /// Verify that a large cache uses 64 shards.
1329    #[test]
1330    fn test_sharded_cache_has_64_shards() {
1331        let config = CacheConfig {
1332            max_entries: 10_000,
1333            enabled: true,
1334            ..CacheConfig::default()
1335        };
1336        let cache = QueryResultCache::new(config);
1337        assert_eq!(cache.shards.len(), NUM_SHARDS);
1338    }
1339
1340    /// Small capacities (< 64) fall back to 1 shard for exact LRU ordering.
1341    #[test]
1342    fn test_small_capacity_uses_single_shard() {
1343        let config = CacheConfig {
1344            max_entries: 10,
1345            enabled: true,
1346            ..CacheConfig::default()
1347        };
1348        let cache = QueryResultCache::new(config);
1349        assert_eq!(cache.shards.len(), 1);
1350    }
1351
1352    /// Cross-shard invalidation: `invalidate_views` clears matching entries
1353    /// regardless of which shard they reside in.
1354    #[test]
1355    fn test_cross_shard_view_invalidation() {
1356        let config = CacheConfig {
1357            max_entries: 10_000,
1358            enabled: true,
1359            ..CacheConfig::default()
1360        };
1361        let cache = QueryResultCache::new(config);
1362
1363        // Insert many entries across different shards
1364        for i in 0_u64..200 {
1365            let view = if i % 2 == 0 { "v_user" } else { "v_post" };
1366            cache.put(i, test_result(), vec![view.to_string()], None, None).unwrap();
1367        }
1368
1369        // Invalidate v_user — should remove exactly 100 entries
1370        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1371        assert_eq!(invalidated, 100);
1372
1373        // All v_user entries gone, all v_post entries remain
1374        for i in 0_u64..200 {
1375            if i % 2 == 0 {
1376                assert!(cache.get(i).unwrap().is_none(), "v_user entry should be invalidated");
1377            } else {
1378                assert!(cache.get(i).unwrap().is_some(), "v_post entry should remain");
1379            }
1380        }
1381    }
1382
1383    /// Cross-shard entity invalidation works across all shards.
1384    #[test]
1385    fn test_cross_shard_entity_invalidation() {
1386        let config = CacheConfig {
1387            max_entries: 10_000,
1388            enabled: true,
1389            ..CacheConfig::default()
1390        };
1391        let cache = QueryResultCache::new(config);
1392
1393        // Insert entries for the same entity across different cache keys
1394        for i in 0_u64..50 {
1395            cache
1396                .put(
1397                    i,
1398                    entity_result("uuid-target"),
1399                    vec!["v_user".to_string()],
1400                    None,
1401                    Some("User"),
1402                )
1403                .unwrap();
1404        }
1405
1406        // Also insert an unrelated entry
1407        cache
1408            .put(
1409                999_u64,
1410                entity_result("uuid-other"),
1411                vec!["v_user".to_string()],
1412                None,
1413                Some("User"),
1414            )
1415            .unwrap();
1416
1417        let evicted = cache.invalidate_by_entity("User", "uuid-target").unwrap();
1418        assert_eq!(evicted, 50);
1419        assert!(cache.get(999_u64).unwrap().is_some(), "unrelated entity should remain");
1420    }
1421
1422    /// Clear works across all shards.
1423    #[test]
1424    fn test_clear_all_shards() {
1425        let config = CacheConfig {
1426            max_entries: 10_000,
1427            enabled: true,
1428            ..CacheConfig::default()
1429        };
1430        let cache = QueryResultCache::new(config);
1431
1432        for i in 0_u64..200 {
1433            cache.put(i, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1434        }
1435
1436        cache.clear().unwrap();
1437        let metrics = cache.metrics().unwrap();
1438        assert_eq!(metrics.size, 0);
1439
1440        for i in 0_u64..200 {
1441            assert!(cache.get(i).unwrap().is_none());
1442        }
1443    }
1444
1445    /// Verify `push()` returns evicted entries for correct memory accounting.
1446    #[test]
1447    fn test_memory_bytes_tracked_on_eviction() {
1448        let config = CacheConfig {
1449            max_entries: 2,
1450            enabled: true,
1451            ..CacheConfig::default()
1452        };
1453        let cache = QueryResultCache::new(config);
1454
1455        cache.put(1_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1456        cache.put(2_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1457
1458        let before = cache.memory_bytes.load(Ordering::Relaxed);
1459        assert!(before > 0, "memory_bytes should be tracked");
1460
1461        // Evict k1 by adding k3 (same key length → memory_bytes unchanged)
1462        cache.put(3_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1463
1464        let after = cache.memory_bytes.load(Ordering::Relaxed);
1465        assert_eq!(before, after, "memory_bytes should remain stable after same-size eviction");
1466    }
1467
1468    /// Verify `memory_bytes` decreases after invalidation.
1469    #[test]
1470    fn test_memory_bytes_decreases_on_invalidation() {
1471        let cache = QueryResultCache::new(CacheConfig::enabled());
1472
1473        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1474
1475        let before = cache.memory_bytes.load(Ordering::Relaxed);
1476        assert!(before > 0);
1477
1478        cache.invalidate_views(&["v_user".to_string()]).unwrap();
1479
1480        let after = cache.memory_bytes.load(Ordering::Relaxed);
1481        assert_eq!(after, 0, "memory_bytes should be zero after invalidating all entries");
1482    }
1483}