Skip to main content

fraiseql_core/cache/
result.rs

1//! Query result caching with W-TinyLFU eviction and per-entry TTL.
2//!
3//! This module provides a `moka::sync::Cache`-backed store for GraphQL query results.
4//! Moka uses Concurrent W-TinyLFU policy with lock-free reads — cache hits do NOT
5//! acquire any shared lock, eliminating the hot-key serialisation bottleneck present
6//! in the old 64-shard `parking_lot::Mutex<LruCache>` design.
7//!
8//! ## Performance characteristics
9//!
10//! - **`get()` hot path** (cache hit): lock-free frequency-counter update (thread-local
11//!   ring buffer, drained lazily on writes), `Arc` clone (single atomic increment),
12//!   one atomic counter bump.
13//! - **`put()` path**: early-exit guards (disabled / list / size) before touching
14//!   the store. Reverse-index updates use `DashMap` (fine-grained sharding, no global lock).
15//! - **`metrics()`**: reads `store.entry_count()` directly — no shard scan.
16//! - **`invalidate_views()` / `invalidate_by_entity()`**: O(k) where k = matching entries
17//!   (via reverse indexes), not O(total entries).
18//!
19//! ## Reverse indexes
20//!
21//! Because `moka` does not support arbitrary iteration, view-based and entity-based
22//! invalidation rely on two `DashMap` reverse indexes maintained alongside the store:
23//!
24//! ```text
25//! view_index:   DashMap<view_name,   DashSet<cache_key>>
26//! entity_index: DashMap<entity_type, DashMap<entity_id, DashSet<cache_key>>>
27//! ```
28//!
29//! Indexes are populated in `put()` and pruned via moka's eviction listener (fired
30//! asynchronously). `clear()` resets all indexes synchronously.
31
32use std::{
33    collections::HashSet,
34    sync::{
35        Arc,
36        atomic::{AtomicU64, AtomicUsize, Ordering},
37    },
38    time::Duration,
39};
40
41use dashmap::{DashMap, DashSet};
42use moka::sync::Cache as MokaCache;
43use serde::{Deserialize, Serialize};
44
45use super::config::CacheConfig;
46use crate::{db::types::JsonbValue, error::Result};
47
48/// Cached query result with metadata.
49///
50/// Stores the query result along with tracking information for
51/// TTL expiry, view-based invalidation, and monitoring.
52#[derive(Debug, Clone)]
53pub struct CachedResult {
54    /// The actual query result (JSONB array from database).
55    ///
56    /// Wrapped in `Arc` for cheap cloning on cache hits (zero-copy).
57    pub result: Arc<Vec<JsonbValue>>,
58
59    /// Which views/tables this query accesses.
60    ///
61    /// Format: `vec!["v_user", "v_post"]`
62    ///
63    /// Stored as a boxed slice (no excess capacity) since views are fixed
64    /// at `put()` time and never modified.
65    pub accessed_views: Box<[String]>,
66
67    /// When this entry was cached (Unix timestamp in seconds).
68    ///
69    /// Wall-clock timestamp for debugging. TTL enforcement is handled by moka
70    /// internally via `CacheEntryExpiry`.
71    pub cached_at: u64,
72
73    /// Per-entry TTL in seconds.
74    ///
75    /// Overrides `CacheConfig::ttl_seconds` when set via `put(..., Some(ttl))`.
76    /// Read by `CacheEntryExpiry::expire_after_create` to tell moka the expiry.
77    pub ttl_seconds: u64,
78
79    /// Entity references for selective entity-level invalidation.
80    ///
81    /// Contains one `(entity_type, entity_id)` pair per row in `result` that has
82    /// a valid string in its `"id"` field.  Empty for queries with no `id` column
83    /// or when `put()` is called without an `entity_type`.
84    /// Used by the eviction listener to clean up `entity_index` on eviction.
85    pub entity_refs: Box<[(String, String)]>,
86
87    /// True when `result.len() > 1` at put time.
88    ///
89    /// Used by `invalidate_list_queries()` to avoid evicting single-entity
90    /// point-lookup entries on CREATE mutations.
91    pub is_list_query: bool,
92}
93
94/// Moka `Expiry` implementation: reads TTL from `CachedResult.ttl_seconds`.
95struct CacheEntryExpiry;
96
97impl moka::Expiry<u64, Arc<CachedResult>> for CacheEntryExpiry {
98    fn expire_after_create(
99        &self,
100        _key: &u64,
101        value: &Arc<CachedResult>,
102        _created_at: std::time::Instant,
103    ) -> Option<Duration> {
104        if value.ttl_seconds == 0 {
105            // TTL=0 means "no time-based expiry" — entry lives until explicitly
106            // invalidated by a mutation.  Return None so moka never schedules
107            // a timer-wheel eviction for this entry.
108            None
109        } else {
110            Some(Duration::from_secs(value.ttl_seconds))
111        }
112    }
113
114    // `expire_after_read` is intentionally NOT overridden.
115    //
116    // Moka's default returns `None` (no change to the timer) which skips the
117    // internal timer-wheel reschedule on every get().  Overriding it to return
118    // `duration_until_expiry` — even though the value is semantically unchanged —
119    // forces moka to acquire its timer-wheel lock on every cache hit.  Under 40
120    // concurrent workers reading the same key, that lock becomes the new hot-key
121    // bottleneck, serialising reads and degrading list-query throughput ~3×.
122    //
123    // Entries expire at creation_time + ttl_seconds regardless of read frequency,
124    // which is the correct fixed-TTL semantics for query result caching.
125}
126
127/// Thread-safe W-TinyLFU cache for query results.
128///
129/// Backed by [`moka::sync::Cache`] which provides lock-free reads via
130/// Concurrent `TinyLFU`. Reverse `DashMap` indexes enable O(k) invalidation.
131///
132/// # Thread Safety
133///
134/// `moka::sync::Cache` is `Send + Sync`. All reverse indexes use `DashMap`
135/// (fine-grained shard locking) and `DashSet` (also shard-locked). There is no
136/// global mutex on the read path.
137///
138/// # Example
139///
140/// ```rust
141/// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
142/// use fraiseql_core::db::types::JsonbValue;
143/// use serde_json::json;
144///
145/// let cache = QueryResultCache::new(CacheConfig::default());
146///
147/// // Cache a result
148/// let result = vec![JsonbValue::new(json!({"id": 1, "name": "Alice"}))];
149/// cache.put(
150///     12345_u64,
151///     result.clone(),
152///     vec!["v_user".to_string()],
153///     None, // use global TTL
154///     None, // no entity type index
155/// ).unwrap();
156///
157/// // Retrieve from cache
158/// if let Some(cached) = cache.get(12345).unwrap() {
159///     println!("Cache hit! {} results", cached.len());
160/// }
161/// ```
162pub struct QueryResultCache {
163    /// Moka W-TinyLFU store.
164    ///
165    /// `Arc<CachedResult>` rather than `CachedResult` so that `get()` returns in
166    /// one atomic increment instead of deep-cloning the struct (which would copy
167    /// `accessed_views: Box<[String]>` on every cache hit).
168    store: MokaCache<u64, Arc<CachedResult>>,
169
170    /// Configuration (immutable after creation).
171    config: CacheConfig,
172
173    // Metrics counters — `Relaxed` ordering is sufficient: these counters are
174    // used only for monitoring, not for correctness or synchronisation.
175    hits:          AtomicU64,
176    misses:        AtomicU64,
177    total_cached:  AtomicU64,
178    invalidations: AtomicU64,
179
180    /// Estimated total memory in use.
181    ///
182    /// Wrapped in `Arc` so the eviction listener closure (which requires `'static`)
183    /// can hold a clone and decrement on eviction.
184    memory_bytes: Arc<AtomicUsize>,
185
186    /// Reverse index: view name → set of cache keys accessing that view.
187    view_index: Arc<DashMap<String, DashSet<u64>>>,
188
189    /// Reverse index: entity type → entity id → set of cache keys.
190    entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>>,
191
192    /// Reverse index: view name → set of cache keys for list (multi-row) entries only.
193    ///
194    /// Populated in `put_arc()` when `result.len() > 1`. Used by
195    /// `invalidate_list_queries()` for CREATE-targeted eviction that leaves
196    /// point-lookup entries intact.
197    list_index: Arc<DashMap<String, DashSet<u64>>>,
198}
199
200/// Cache metrics for monitoring.
201///
202/// Exposed via API for observability and debugging.
203#[derive(Debug, Clone, Serialize, Deserialize)]
204pub struct CacheMetrics {
205    /// Number of cache hits (returned cached result).
206    pub hits: u64,
207
208    /// Number of cache misses (executed query).
209    pub misses: u64,
210
211    /// Total entries cached across all time.
212    pub total_cached: u64,
213
214    /// Number of invalidations triggered.
215    pub invalidations: u64,
216
217    /// Current size of cache (number of entries).
218    pub size: usize,
219
220    /// Estimated memory usage in bytes.
221    ///
222    /// This is a rough estimate based on `CachedResult` struct size.
223    /// Actual memory usage may vary based on result sizes.
224    pub memory_bytes: usize,
225}
226
227/// Estimate the per-entry accounting overhead.
228const fn entry_overhead() -> usize {
229    std::mem::size_of::<CachedResult>() + std::mem::size_of::<u64>() * 2
230}
231
232/// Build the moka store, wiring the eviction listener to the reverse indexes
233/// and memory counter.
234fn build_store(
235    config: &CacheConfig,
236    memory_bytes: Arc<AtomicUsize>,
237    view_index: Arc<DashMap<String, DashSet<u64>>>,
238    entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>>,
239    list_index: Arc<DashMap<String, DashSet<u64>>>,
240) -> MokaCache<u64, Arc<CachedResult>> {
241    let max_cap = config.max_entries as u64;
242    let mb = memory_bytes;
243    let vi = view_index;
244    let ei = entity_index;
245    let li = list_index;
246
247    MokaCache::builder()
248        .max_capacity(max_cap)
249        .expire_after(CacheEntryExpiry)
250        .eviction_listener(move |key: Arc<u64>, value: Arc<CachedResult>, _cause| {
251            // Decrement memory budget so put()'s byte-gate stays accurate.
252            mb.fetch_sub(entry_overhead(), Ordering::Relaxed);
253
254            // Remove key from view index.
255            for view in &value.accessed_views {
256                if let Some(keys) = vi.get(view) {
257                    keys.remove(&*key);
258                }
259            }
260
261            // Remove key from list index (only populated for multi-row entries).
262            if value.is_list_query {
263                for view in &value.accessed_views {
264                    if let Some(keys) = li.get(view) {
265                        keys.remove(&*key);
266                    }
267                }
268            }
269
270            // Remove ALL entity_refs from entity index.
271            for (et, id) in &*value.entity_refs {
272                if let Some(by_type) = ei.get(et) {
273                    if let Some(keys) = by_type.get(id) {
274                        keys.remove(&*key);
275                    }
276                }
277            }
278        })
279        .build()
280}
281
282impl QueryResultCache {
283    /// Create new cache with configuration.
284    ///
285    /// # Panics
286    ///
287    /// Panics if `config.max_entries` is 0 (invalid configuration).
288    ///
289    /// # Example
290    ///
291    /// ```rust
292    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
293    ///
294    /// let cache = QueryResultCache::new(CacheConfig::default());
295    /// ```
296    #[must_use]
297    pub fn new(config: CacheConfig) -> Self {
298        assert!(config.max_entries > 0, "max_entries must be > 0");
299
300        let memory_bytes = Arc::new(AtomicUsize::new(0));
301        let view_index: Arc<DashMap<String, DashSet<u64>>> = Arc::new(DashMap::new());
302        let entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>> =
303            Arc::new(DashMap::new());
304        let list_index: Arc<DashMap<String, DashSet<u64>>> = Arc::new(DashMap::new());
305
306        let store = build_store(
307            &config,
308            Arc::clone(&memory_bytes),
309            Arc::clone(&view_index),
310            Arc::clone(&entity_index),
311            Arc::clone(&list_index),
312        );
313
314        Self {
315            store,
316            config,
317            hits: AtomicU64::new(0),
318            misses: AtomicU64::new(0),
319            total_cached: AtomicU64::new(0),
320            invalidations: AtomicU64::new(0),
321            memory_bytes,
322            view_index,
323            entity_index,
324            list_index,
325        }
326    }
327
328    /// Returns whether caching is enabled.
329    ///
330    /// Used by `CachedDatabaseAdapter` to short-circuit key generation
331    /// and result clone overhead when caching is disabled.
332    #[must_use]
333    pub const fn is_enabled(&self) -> bool {
334        self.config.enabled
335    }
336
337    /// Look up a cached result by its cache key.
338    ///
339    /// Returns `None` when caching is disabled or the key is not present or expired.
340    /// Moka handles TTL expiry internally — if `get()` returns `Some`, the entry is live.
341    ///
342    /// # Errors
343    ///
344    /// This method is infallible. The `Result` return type is kept for API compatibility.
345    pub fn get(&self, cache_key: u64) -> Result<Option<Arc<Vec<JsonbValue>>>> {
346        if !self.config.enabled {
347            return Ok(None);
348        }
349
350        // moka::sync::Cache::get() is lock-free on the read path.
351        if let Some(cached) = self.store.get(&cache_key) {
352            self.hits.fetch_add(1, Ordering::Relaxed);
353            Ok(Some(Arc::clone(&cached.result)))
354        } else {
355            self.misses.fetch_add(1, Ordering::Relaxed);
356            Ok(None)
357        }
358    }
359
360    /// Store query result in cache, accepting an already-`Arc`-wrapped result.
361    ///
362    /// Preferred over [`put`](Self::put) on the hot miss path: callers that already
363    /// hold an `Arc<Vec<JsonbValue>>` (e.g. `CachedDatabaseAdapter`) can store it
364    /// without an extra `Vec` clone.
365    ///
366    /// # Arguments
367    ///
368    /// * `cache_key` - Cache key (from `generate_cache_key()`)
369    /// * `result` - Arc-wrapped query result to cache
370    /// * `accessed_views` - List of views accessed by this query
371    /// * `ttl_override` - Per-entry TTL in seconds; `None` uses `CacheConfig::ttl_seconds`
372    /// * `entity_type` - Optional GraphQL type name for entity-ID indexing
373    ///
374    /// # Errors
375    ///
376    /// This method is infallible. The `Result` return type is kept for API compatibility.
377    pub fn put_arc(
378        &self,
379        cache_key: u64,
380        result: Arc<Vec<JsonbValue>>,
381        accessed_views: Vec<String>,
382        ttl_override: Option<u64>,
383        entity_type: Option<&str>,
384    ) -> Result<()> {
385        if !self.config.enabled {
386            return Ok(());
387        }
388
389        let ttl_seconds = ttl_override.unwrap_or(self.config.ttl_seconds);
390
391        // TTL=0 means "no time-based expiry" — store the entry and rely entirely
392        // on mutation-based invalidation.  expire_after_create returns None for
393        // these entries so moka never schedules a timer-wheel eviction.
394
395        // Respect cache_list_queries: a result with more than one row is considered a list.
396        if !self.config.cache_list_queries && result.len() > 1 {
397            return Ok(());
398        }
399
400        // Enforce per-entry size limit: estimate entry size from serialized JSON.
401        if let Some(max_entry) = self.config.max_entry_bytes {
402            let estimated = serde_json::to_vec(&*result).map_or(0, |v| v.len());
403            if estimated > max_entry {
404                return Ok(()); // silently skip oversized entries
405            }
406        }
407
408        // Enforce total cache size limit.
409        if let Some(max_total) = self.config.max_total_bytes {
410            if self.memory_bytes.load(Ordering::Relaxed) >= max_total {
411                return Ok(()); // silently skip when budget is exhausted
412            }
413        }
414
415        let is_list_query = result.len() > 1;
416
417        // Extract entity refs from ALL rows (not just the first).
418        let entity_refs: Box<[(String, String)]> = if let Some(et) = entity_type {
419            result
420                .iter()
421                .filter_map(|row| {
422                    row.as_value()
423                        .as_object()?
424                        .get("id")?
425                        .as_str()
426                        .map(|id| (et.to_string(), id.to_string()))
427                })
428                .collect::<Vec<_>>()
429                .into_boxed_slice()
430        } else {
431            Box::default()
432        };
433
434        // Register in view index.
435        for view in &accessed_views {
436            self.view_index.entry(view.clone()).or_default().insert(cache_key);
437        }
438
439        // Register in list index (only for multi-row results).
440        if is_list_query {
441            for view in &accessed_views {
442                self.list_index.entry(view.clone()).or_default().insert(cache_key);
443            }
444        }
445
446        // Register ALL entity refs in entity index.
447        for (et, id) in &*entity_refs {
448            self.entity_index
449                .entry(et.clone())
450                .or_default()
451                .entry(id.clone())
452                .or_default()
453                .insert(cache_key);
454        }
455
456        let cached = CachedResult {
457            result,
458            accessed_views: accessed_views.into_boxed_slice(),
459            cached_at: std::time::SystemTime::now()
460                .duration_since(std::time::UNIX_EPOCH)
461                .map_or(0, |d| d.as_secs()),
462            ttl_seconds,
463            entity_refs,
464            is_list_query,
465        };
466
467        self.memory_bytes.fetch_add(entry_overhead(), Ordering::Relaxed);
468        // Wrap in Arc so moka's get() costs one atomic increment, not a full clone.
469        self.store.insert(cache_key, Arc::new(cached));
470        self.total_cached.fetch_add(1, Ordering::Relaxed);
471        Ok(())
472    }
473
474    /// Store query result in cache.
475    ///
476    /// If caching is disabled, this is a no-op.
477    ///
478    /// Wraps `result` in an `Arc` and delegates to [`put_arc`](Self::put_arc).
479    /// Prefer [`put_arc`](Self::put_arc) when the caller already holds an `Arc`.
480    ///
481    /// # Arguments
482    ///
483    /// * `cache_key` - Cache key (from `generate_cache_key()`)
484    /// * `result` - Query result to cache
485    /// * `accessed_views` - List of views accessed by this query
486    /// * `ttl_override` - Per-entry TTL in seconds; `None` uses `CacheConfig::ttl_seconds`
487    /// * `entity_type` - Optional GraphQL type name (e.g. `"User"`) for entity-ID indexing.
488    ///   When provided, each row's `"id"` field is extracted and stored in `entity_index`
489    ///   so that `invalidate_by_entity()` can perform selective eviction.
490    ///
491    /// # Errors
492    ///
493    /// This method is infallible. The `Result` return type is kept for API compatibility.
494    ///
495    /// # Example
496    ///
497    /// ```rust
498    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
499    /// use fraiseql_core::db::types::JsonbValue;
500    /// use serde_json::json;
501    ///
502    /// let cache = QueryResultCache::new(CacheConfig::default());
503    ///
504    /// let result = vec![JsonbValue::new(json!({"id": "uuid-1"}))];
505    /// cache.put(0xabc123, result, vec!["v_user".to_string()], None, Some("User"))?;
506    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
507    /// ```
508    pub fn put(
509        &self,
510        cache_key: u64,
511        result: Vec<JsonbValue>,
512        accessed_views: Vec<String>,
513        ttl_override: Option<u64>,
514        entity_type: Option<&str>,
515    ) -> Result<()> {
516        self.put_arc(cache_key, Arc::new(result), accessed_views, ttl_override, entity_type)
517    }
518
519    /// Invalidate entries accessing specified views.
520    ///
521    /// Uses the `view_index` for O(k) lookup instead of O(n) full-cache scan.
522    /// Keys accessing multiple views in `views` are deduplicated before invalidation.
523    ///
524    /// # Arguments
525    ///
526    /// * `views` - List of view/table names modified by mutation
527    ///
528    /// # Returns
529    ///
530    /// Number of cache entries invalidated.
531    ///
532    /// # Errors
533    ///
534    /// This method is infallible. The `Result` return type is kept for API compatibility.
535    ///
536    /// # Example
537    ///
538    /// ```rust
539    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
540    ///
541    /// let cache = QueryResultCache::new(CacheConfig::default());
542    ///
543    /// // After createUser mutation
544    /// let invalidated = cache.invalidate_views(&["v_user".to_string()])?;
545    /// println!("Invalidated {} cache entries", invalidated);
546    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
547    /// ```
548    pub fn invalidate_views(&self, views: &[String]) -> Result<u64> {
549        if !self.config.enabled {
550            return Ok(0);
551        }
552
553        // Collect keys first (releases DashMap guards) then invalidate.
554        // Moka's eviction listener fires synchronously on the calling thread, so
555        // we must NOT hold any DashMap shard guard when calling store.invalidate() —
556        // the listener itself calls view_index.get() on the same shard, which
557        // would deadlock on a non-re-entrant parking_lot::RwLock.
558        let mut keys_to_invalidate: HashSet<u64> = HashSet::new();
559        for view in views {
560            if let Some(keys) = self.view_index.get(view) {
561                // Dedup: a query accessing multiple views in `views` would
562                // otherwise be counted and invalidated once per view.
563                for key in keys.iter() {
564                    keys_to_invalidate.insert(*key);
565                }
566            }
567            // Guard dropped here — safe to proceed
568        }
569
570        #[allow(clippy::cast_possible_truncation)]
571        // Reason: entry count never exceeds u64
572        let count = keys_to_invalidate.len() as u64;
573
574        for key in keys_to_invalidate {
575            self.store.invalidate(&key);
576            // Index cleanup handled by eviction listener.
577        }
578
579        self.invalidations.fetch_add(count, Ordering::Relaxed);
580        Ok(count)
581    }
582
583    /// Evict only list (multi-row) cache entries for the given views.
584    ///
585    /// Unlike `invalidate_views()`, this method leaves single-entity point-lookup
586    /// entries intact. Used for CREATE mutations: creating a new entity does not
587    /// affect queries that fetch a *different* existing entity by UUID, but it
588    /// does invalidate queries that return a variable-length list of entities.
589    ///
590    /// Uses the `list_index` for O(k) lookup.
591    ///
592    /// # Errors
593    ///
594    /// This method is infallible. The `Result` return type is kept for API compatibility.
595    pub fn invalidate_list_queries(&self, views: &[String]) -> Result<u64> {
596        if !self.config.enabled {
597            return Ok(0);
598        }
599
600        let mut keys_to_invalidate: HashSet<u64> = HashSet::new();
601        for view in views {
602            if let Some(keys) = self.list_index.get(view) {
603                for k in keys.iter() {
604                    keys_to_invalidate.insert(*k);
605                }
606            }
607        }
608
609        #[allow(clippy::cast_possible_truncation)]
610        // Reason: entry count never exceeds u64
611        let count = keys_to_invalidate.len() as u64;
612        for key in keys_to_invalidate {
613            self.store.invalidate(&key);
614        }
615        self.invalidations.fetch_add(count, Ordering::Relaxed);
616        Ok(count)
617    }
618
619    /// Evict cache entries that contain a specific entity UUID.
620    ///
621    /// Uses the `entity_index` for O(k) lookup. Entries not referencing this
622    /// entity are left untouched.
623    ///
624    /// # Arguments
625    ///
626    /// * `entity_type` - GraphQL type name (e.g. `"User"`)
627    /// * `entity_id`   - UUID string of the mutated entity
628    ///
629    /// # Returns
630    ///
631    /// Number of cache entries evicted.
632    ///
633    /// # Errors
634    ///
635    /// This method is infallible. The `Result` return type is kept for API compatibility.
636    pub fn invalidate_by_entity(&self, entity_type: &str, entity_id: &str) -> Result<u64> {
637        if !self.config.enabled {
638            return Ok(0);
639        }
640
641        // Short-circuit: if entity_type has no indexed entries, skip the DashMap
642        // lookup entirely.  Covers cold-cache and write-heavy workloads where no
643        // reads are cached yet.
644        if !self.entity_index.contains_key(entity_type) {
645            return Ok(0);
646        }
647
648        // Collect keys first (releases DashMap guards) then invalidate.
649        // Moka's eviction listener fires synchronously on the calling thread, so
650        // we must NOT hold any DashMap shard guard when calling store.invalidate() —
651        // the listener itself calls entity_index.get() on the same shard, which
652        // would deadlock on a non-re-entrant parking_lot::RwLock.
653        let keys_to_invalidate: Vec<u64> = self
654            .entity_index
655            .get(entity_type)
656            .and_then(|by_type| {
657                by_type.get(entity_id).map(|keys| keys.iter().map(|k| *k).collect())
658            })
659            .unwrap_or_default();
660
661        #[allow(clippy::cast_possible_truncation)]
662        // Reason: entry count never exceeds u64
663        let count = keys_to_invalidate.len() as u64;
664
665        for key in keys_to_invalidate {
666            self.store.invalidate(&key);
667            // Index cleanup handled by eviction listener.
668        }
669
670        self.invalidations.fetch_add(count, Ordering::Relaxed);
671        Ok(count)
672    }
673
674    /// Get cache metrics snapshot.
675    ///
676    /// Returns a consistent snapshot of current counters. Individual fields may
677    /// be updated independently (atomics), so the snapshot is not a single atomic
678    /// transaction, but is accurate enough for monitoring.
679    ///
680    /// # Errors
681    ///
682    /// This method is infallible. The `Result` return type is kept for API compatibility.
683    ///
684    /// # Example
685    ///
686    /// ```rust
687    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
688    ///
689    /// let cache = QueryResultCache::new(CacheConfig::default());
690    /// let metrics = cache.metrics()?;
691    ///
692    /// println!("Hit rate: {:.1}%", metrics.hit_rate() * 100.0);
693    /// println!("Size: {} / {} entries", metrics.size, 10_000);
694    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
695    /// ```
696    pub fn metrics(&self) -> Result<CacheMetrics> {
697        Ok(CacheMetrics {
698            hits: self.hits.load(Ordering::Relaxed),
699            misses: self.misses.load(Ordering::Relaxed),
700            total_cached: self.total_cached.load(Ordering::Relaxed),
701            invalidations: self.invalidations.load(Ordering::Relaxed),
702            #[allow(clippy::cast_possible_truncation)]
703            // Reason: entry count fits in usize on any 64-bit target
704            size: self.store.entry_count() as usize,
705            memory_bytes: self.memory_bytes.load(Ordering::Relaxed),
706        })
707    }
708
709    /// Clear all cache entries.
710    ///
711    /// Resets the store, reverse indexes, and `memory_bytes` synchronously.
712    /// The eviction listener will still fire asynchronously for each evicted entry,
713    /// but its index-cleanup operations will be no-ops on the already-cleared maps.
714    ///
715    /// # Errors
716    ///
717    /// This method is infallible. The `Result` return type is kept for API compatibility.
718    ///
719    /// # Example
720    ///
721    /// ```rust
722    /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
723    ///
724    /// let cache = QueryResultCache::new(CacheConfig::default());
725    /// cache.clear()?;
726    /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
727    /// ```
728    pub fn clear(&self) -> Result<()> {
729        self.store.invalidate_all();
730        // Reset indexes and memory counter synchronously — don't rely on the
731        // async eviction listener to do this.
732        self.view_index.clear();
733        self.entity_index.clear();
734        self.list_index.clear();
735        self.memory_bytes.store(0, Ordering::Relaxed);
736        Ok(())
737    }
738}
739
740impl CacheMetrics {
741    /// Calculate cache hit rate.
742    ///
743    /// Returns ratio of hits to total requests (0.0 to 1.0).
744    ///
745    /// # Returns
746    ///
747    /// - `1.0` if all requests were hits
748    /// - `0.0` if all requests were misses
749    /// - `0.0` if no requests yet
750    ///
751    /// # Example
752    ///
753    /// ```rust
754    /// use fraiseql_core::cache::CacheMetrics;
755    ///
756    /// let metrics = CacheMetrics {
757    ///     hits: 80,
758    ///     misses: 20,
759    ///     total_cached: 100,
760    ///     invalidations: 5,
761    ///     size: 95,
762    ///     memory_bytes: 1_000_000,
763    /// };
764    ///
765    /// assert_eq!(metrics.hit_rate(), 0.8);  // 80% hit rate
766    /// ```
767    #[must_use]
768    pub fn hit_rate(&self) -> f64 {
769        let total = self.hits + self.misses;
770        if total == 0 {
771            return 0.0;
772        }
773        #[allow(clippy::cast_precision_loss)]
774        // Reason: hit-rate is a display metric; f64 precision loss on u64 counters is acceptable
775        {
776            self.hits as f64 / total as f64
777        }
778    }
779
780    /// Check if cache is performing well.
781    ///
782    /// Returns `true` if hit rate is above 60% (reasonable threshold).
783    ///
784    /// # Example
785    ///
786    /// ```rust
787    /// use fraiseql_core::cache::CacheMetrics;
788    ///
789    /// let good_metrics = CacheMetrics {
790    ///     hits: 80,
791    ///     misses: 20,
792    ///     total_cached: 100,
793    ///     invalidations: 5,
794    ///     size: 95,
795    ///     memory_bytes: 1_000_000,
796    /// };
797    ///
798    /// assert!(good_metrics.is_healthy());  // 80% > 60%
799    /// ```
800    #[must_use]
801    pub fn is_healthy(&self) -> bool {
802        self.hit_rate() > 0.6
803    }
804}
805
806#[cfg(test)]
807mod tests {
808    #![allow(clippy::unwrap_used)] // Reason: test code, panics are acceptable
809
810    use serde_json::json;
811
812    use super::*;
813
814    // Helper to create test result
815    fn test_result() -> Vec<JsonbValue> {
816        vec![JsonbValue::new(json!({"id": 1, "name": "test"}))]
817    }
818
819    // ========================================================================
820    // Cache Hit/Miss Tests
821    // ========================================================================
822
823    #[test]
824    fn test_cache_miss() {
825        let cache = QueryResultCache::new(CacheConfig::enabled());
826
827        let result = cache.get(999_u64).unwrap();
828        assert!(result.is_none(), "Should be cache miss");
829
830        let metrics = cache.metrics().unwrap();
831        assert_eq!(metrics.misses, 1);
832        assert_eq!(metrics.hits, 0);
833    }
834
835    #[test]
836    fn test_cache_put_and_get() {
837        let cache = QueryResultCache::new(CacheConfig::enabled());
838        let result = test_result();
839
840        // Put
841        cache.put(1_u64, result, vec!["v_user".to_string()], None, None).unwrap();
842
843        // Get
844        let cached = cache.get(1_u64).unwrap();
845        assert!(cached.is_some(), "Should be cache hit");
846        assert_eq!(cached.unwrap().len(), 1);
847
848        let metrics = cache.metrics().unwrap();
849        assert_eq!(metrics.hits, 1);
850        assert_eq!(metrics.misses, 0);
851        assert_eq!(metrics.total_cached, 1);
852    }
853
854    #[test]
855    fn test_cache_hit_updates_hit_count() {
856        let cache = QueryResultCache::new(CacheConfig::enabled());
857
858        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
859
860        // First hit
861        cache.get(1_u64).unwrap();
862        // Second hit
863        cache.get(1_u64).unwrap();
864
865        let metrics = cache.metrics().unwrap();
866        assert_eq!(metrics.hits, 2);
867    }
868
869    // ========================================================================
870    // TTL Expiry Tests
871    // ========================================================================
872
873    #[test]
874    fn test_ttl_expiry() {
875        let config = CacheConfig {
876            ttl_seconds: 1,
877            enabled: true,
878            ..Default::default()
879        };
880
881        let cache = QueryResultCache::new(config);
882
883        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
884
885        // Wait for expiry
886        std::thread::sleep(std::time::Duration::from_secs(2));
887        cache.store.run_pending_tasks();
888
889        // Should be expired
890        let result = cache.get(1_u64).unwrap();
891        assert!(result.is_none(), "Entry should be expired");
892
893        let metrics = cache.metrics().unwrap();
894        assert_eq!(metrics.misses, 1); // Expired counts as miss
895    }
896
897    #[test]
898    fn test_per_entry_ttl_override_expires_early() {
899        // Global config has 1-hour TTL but entry overrides to 1 second
900        let config = CacheConfig {
901            ttl_seconds: 3600,
902            enabled: true,
903            ..Default::default()
904        };
905        let cache = QueryResultCache::new(config);
906
907        cache
908            .put(
909                1_u64,
910                test_result(),
911                vec!["v_ref".to_string()],
912                Some(1), // 1-second per-entry override
913                None,
914            )
915            .unwrap();
916
917        std::thread::sleep(std::time::Duration::from_secs(2));
918        cache.store.run_pending_tasks();
919
920        let result = cache.get(1_u64).unwrap();
921        assert!(result.is_none(), "Entry with per-entry TTL=1s should have expired");
922    }
923
924    #[test]
925    fn test_per_entry_ttl_zero_cached_indefinitely() {
926        // TTL=0 = no time-based expiry; entry lives until mutation invalidation.
927        let cache = QueryResultCache::new(CacheConfig::enabled());
928
929        cache
930            .put(1_u64, test_result(), vec!["v_live".to_string()], Some(0), None)
931            .unwrap();
932
933        let result = cache.get(1_u64).unwrap();
934        assert!(result.is_some(), "Entry with TTL=0 should be cached indefinitely");
935    }
936
937    #[test]
938    fn test_ttl_not_expired() {
939        let config = CacheConfig {
940            ttl_seconds: 3600, // 1 hour TTL
941            enabled: true,
942            ..Default::default()
943        };
944
945        let cache = QueryResultCache::new(config);
946
947        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
948
949        // Should still be valid
950        let result = cache.get(1_u64).unwrap();
951        assert!(result.is_some(), "Entry should not be expired");
952    }
953
954    // ========================================================================
955    // Eviction Tests (capacity-based)
956    // ========================================================================
957
958    #[test]
959    fn test_capacity_eviction() {
960        let config = CacheConfig {
961            max_entries: 2,
962            enabled: true,
963            ..Default::default()
964        };
965
966        let cache = QueryResultCache::new(config);
967
968        // Add 3 entries (max is 2); moka will evict one
969        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
970        cache.put(2_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
971        cache.put(3_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
972
973        // Run pending tasks to flush evictions
974        cache.store.run_pending_tasks();
975
976        let metrics = cache.metrics().unwrap();
977        assert!(metrics.size <= 2, "Cache size should not exceed max capacity");
978    }
979
980    // ========================================================================
981    // Cache Disabled Tests
982    // ========================================================================
983
984    #[test]
985    fn test_cache_disabled() {
986        let config = CacheConfig::disabled();
987        let cache = QueryResultCache::new(config);
988
989        // Put should be no-op
990        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
991
992        // Get should return None
993        assert!(cache.get(1_u64).unwrap().is_none(), "Cache disabled should always miss");
994
995        let metrics = cache.metrics().unwrap();
996        assert_eq!(metrics.total_cached, 0);
997    }
998
999    // ========================================================================
1000    // Invalidation Tests
1001    // ========================================================================
1002
1003    #[test]
1004    fn test_invalidate_single_view() {
1005        let cache = QueryResultCache::new(CacheConfig::enabled());
1006
1007        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1008        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1009
1010        // Invalidate v_user
1011        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1012        assert_eq!(invalidated, 1);
1013
1014        // v_user entry gone, v_post remains
1015        assert!(cache.get(1_u64).unwrap().is_none());
1016        assert!(cache.get(2_u64).unwrap().is_some());
1017    }
1018
1019    #[test]
1020    fn test_invalidate_multiple_views() {
1021        let cache = QueryResultCache::new(CacheConfig::enabled());
1022
1023        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1024        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1025        cache
1026            .put(3_u64, test_result(), vec!["v_product".to_string()], None, None)
1027            .unwrap();
1028
1029        // Invalidate v_user and v_post
1030        let invalidated =
1031            cache.invalidate_views(&["v_user".to_string(), "v_post".to_string()]).unwrap();
1032        assert_eq!(invalidated, 2);
1033
1034        assert!(cache.get(1_u64).unwrap().is_none());
1035        assert!(cache.get(2_u64).unwrap().is_none());
1036        assert!(cache.get(3_u64).unwrap().is_some());
1037    }
1038
1039    #[test]
1040    fn test_invalidate_entry_with_multiple_views() {
1041        let cache = QueryResultCache::new(CacheConfig::enabled());
1042
1043        // Entry accesses both v_user and v_post
1044        cache
1045            .put(
1046                1_u64,
1047                test_result(),
1048                vec!["v_user".to_string(), "v_post".to_string()],
1049                None,
1050                None,
1051            )
1052            .unwrap();
1053
1054        // Invalidating either view should remove the entry
1055        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1056        assert_eq!(invalidated, 1);
1057
1058        assert!(cache.get(1_u64).unwrap().is_none());
1059    }
1060
1061    #[test]
1062    fn test_invalidate_nonexistent_view() {
1063        let cache = QueryResultCache::new(CacheConfig::enabled());
1064
1065        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1066
1067        // Invalidate view that doesn't exist
1068        let invalidated = cache.invalidate_views(&["v_nonexistent".to_string()]).unwrap();
1069        assert_eq!(invalidated, 0);
1070
1071        // Entry should remain
1072        assert!(cache.get(1_u64).unwrap().is_some());
1073    }
1074
1075    // ========================================================================
1076    // Clear Tests
1077    // ========================================================================
1078
1079    #[test]
1080    fn test_clear() {
1081        let cache = QueryResultCache::new(CacheConfig::enabled());
1082
1083        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1084        cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1085
1086        cache.clear().unwrap();
1087
1088        // Run pending tasks to flush moka's eviction pipeline
1089        cache.store.run_pending_tasks();
1090
1091        assert!(cache.get(1_u64).unwrap().is_none());
1092        assert!(cache.get(2_u64).unwrap().is_none());
1093
1094        let metrics = cache.metrics().unwrap();
1095        assert_eq!(metrics.size, 0);
1096    }
1097
1098    // ========================================================================
1099    // Metrics Tests
1100    // ========================================================================
1101
1102    #[test]
1103    fn test_metrics_tracking() {
1104        let cache = QueryResultCache::new(CacheConfig::enabled());
1105
1106        // Miss
1107        cache.get(999_u64).unwrap();
1108
1109        // Put
1110        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1111
1112        // Hit
1113        cache.get(1_u64).unwrap();
1114
1115        // moka::sync::Cache entry_count() is eventually consistent — flush pending
1116        // write operations before asserting on size.
1117        cache.store.run_pending_tasks();
1118
1119        let metrics = cache.metrics().unwrap();
1120        assert_eq!(metrics.hits, 1);
1121        assert_eq!(metrics.misses, 1);
1122        assert_eq!(metrics.size, 1);
1123        assert_eq!(metrics.total_cached, 1);
1124    }
1125
1126    #[test]
1127    fn test_metrics_hit_rate() {
1128        let metrics = CacheMetrics {
1129            hits:          80,
1130            misses:        20,
1131            total_cached:  100,
1132            invalidations: 5,
1133            size:          95,
1134            memory_bytes:  1_000_000,
1135        };
1136
1137        assert!((metrics.hit_rate() - 0.8).abs() < f64::EPSILON);
1138        assert!(metrics.is_healthy());
1139    }
1140
1141    #[test]
1142    fn test_metrics_hit_rate_zero_requests() {
1143        let metrics = CacheMetrics {
1144            hits:          0,
1145            misses:        0,
1146            total_cached:  0,
1147            invalidations: 0,
1148            size:          0,
1149            memory_bytes:  0,
1150        };
1151
1152        assert!((metrics.hit_rate() - 0.0).abs() < f64::EPSILON);
1153        assert!(!metrics.is_healthy());
1154    }
1155
1156    #[test]
1157    fn test_metrics_is_healthy() {
1158        let good = CacheMetrics {
1159            hits:          70,
1160            misses:        30,
1161            total_cached:  100,
1162            invalidations: 5,
1163            size:          95,
1164            memory_bytes:  1_000_000,
1165        };
1166        assert!(good.is_healthy()); // 70% > 60%
1167
1168        let bad = CacheMetrics {
1169            hits:          50,
1170            misses:        50,
1171            total_cached:  100,
1172            invalidations: 5,
1173            size:          95,
1174            memory_bytes:  1_000_000,
1175        };
1176        assert!(!bad.is_healthy()); // 50% < 60%
1177    }
1178
1179    // ========================================================================
1180    // Entity-Aware Invalidation Tests
1181    // ========================================================================
1182
1183    fn entity_result(id: &str) -> Vec<JsonbValue> {
1184        vec![JsonbValue::new(serde_json::json!({"id": id, "name": "test"}))]
1185    }
1186
1187    #[test]
1188    fn test_invalidate_by_entity_only_removes_matching_entries() {
1189        let cache = QueryResultCache::new(CacheConfig::enabled());
1190
1191        // Cache User A and User B as separate entries
1192        cache
1193            .put(1_u64, entity_result("uuid-a"), vec!["v_user".to_string()], None, Some("User"))
1194            .unwrap();
1195        cache
1196            .put(2_u64, entity_result("uuid-b"), vec!["v_user".to_string()], None, Some("User"))
1197            .unwrap();
1198
1199        // Invalidate User A — User B must remain
1200        let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1201        assert_eq!(evicted, 1);
1202        assert!(cache.get(1_u64).unwrap().is_none(), "User A should be evicted");
1203        assert!(cache.get(2_u64).unwrap().is_some(), "User B should remain");
1204    }
1205
1206    #[test]
1207    fn test_invalidate_by_entity_removes_list_containing_entity() {
1208        let cache = QueryResultCache::new(CacheConfig::enabled());
1209
1210        // Cache a single-entity entry (entity_ref uses first row's id)
1211        cache
1212            .put(
1213                1_u64,
1214                entity_result("uuid-a"),
1215                vec!["v_user".to_string()],
1216                None,
1217                Some("User"),
1218            )
1219            .unwrap();
1220
1221        // Invalidate by User A
1222        let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1223        assert_eq!(evicted, 1);
1224        assert!(cache.get(1_u64).unwrap().is_none(), "Entry for A should be evicted");
1225    }
1226
1227    #[test]
1228    fn test_invalidate_by_entity_leaves_unrelated_types() {
1229        let cache = QueryResultCache::new(CacheConfig::enabled());
1230
1231        // Cache a User entry and a Post entry
1232        cache
1233            .put(
1234                1_u64,
1235                entity_result("uuid-user"),
1236                vec!["v_user".to_string()],
1237                None,
1238                Some("User"),
1239            )
1240            .unwrap();
1241        cache
1242            .put(
1243                2_u64,
1244                entity_result("uuid-post"),
1245                vec!["v_post".to_string()],
1246                None,
1247                Some("Post"),
1248            )
1249            .unwrap();
1250
1251        // Invalidate the User — Post entry must remain untouched
1252        let evicted = cache.invalidate_by_entity("User", "uuid-user").unwrap();
1253        assert_eq!(evicted, 1);
1254        assert!(cache.get(1_u64).unwrap().is_none(), "User entry should be evicted");
1255        assert!(cache.get(2_u64).unwrap().is_some(), "Post entry should remain");
1256    }
1257
1258    #[test]
1259    fn test_put_builds_entity_id_index() {
1260        let cache = QueryResultCache::new(CacheConfig::enabled());
1261
1262        cache
1263            .put(
1264                1_u64,
1265                entity_result("uuid-1"),
1266                vec!["v_user".to_string()],
1267                None,
1268                Some("User"),
1269            )
1270            .unwrap();
1271
1272        // Invalidating by uuid-1 should evict the entry
1273        let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1274        assert_eq!(evicted, 1);
1275        assert!(cache.get(1_u64).unwrap().is_none());
1276    }
1277
1278    #[test]
1279    fn test_put_without_entity_type_not_indexed() {
1280        let cache = QueryResultCache::new(CacheConfig::enabled());
1281
1282        cache
1283            .put(
1284                1_u64,
1285                entity_result("uuid-1"),
1286                vec!["v_user".to_string()],
1287                None,
1288                None, // no entity type
1289            )
1290            .unwrap();
1291
1292        // invalidate_by_entity should not match (no index was built)
1293        let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1294        assert_eq!(evicted, 0);
1295        assert!(cache.get(1_u64).unwrap().is_some(), "Non-indexed entry should remain");
1296    }
1297
1298    // ========================================================================
1299    // Multi-entity indexing + list_index / invalidate_list_queries tests
1300    // ========================================================================
1301
1302    fn list_result(ids: &[&str]) -> Vec<JsonbValue> {
1303        ids.iter()
1304            .map(|id| JsonbValue::new(serde_json::json!({"id": id, "name": "test"})))
1305            .collect()
1306    }
1307
1308    #[test]
1309    fn test_put_indexes_all_entities_in_list() {
1310        let cache = QueryResultCache::new(CacheConfig::enabled());
1311        let rows = list_result(&["uuid-A", "uuid-B", "uuid-C"]);
1312        cache.put(0xABC, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1313
1314        let evicted_a = cache.invalidate_by_entity("User", "uuid-A").unwrap();
1315        assert_eq!(evicted_a, 1, "uuid-A must be indexed and evictable");
1316
1317        // Re-insert to test uuid-C
1318        let rows2 = list_result(&["uuid-A", "uuid-B", "uuid-C"]);
1319        cache.put(0xDEF, rows2, vec!["v_user".to_string()], None, Some("User")).unwrap();
1320        let evicted_c = cache.invalidate_by_entity("User", "uuid-C").unwrap();
1321        assert_eq!(evicted_c, 1, "uuid-C at position 2 must also be indexed");
1322    }
1323
1324    #[test]
1325    fn test_update_evicts_list_query_via_non_first_entity() {
1326        let cache = QueryResultCache::new(CacheConfig::enabled());
1327        let rows = list_result(&["uuid-A", "uuid-B"]);
1328        cache.put(0x111, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1329
1330        // uuid-B is at position 1 — must still be evicted
1331        let evicted = cache.invalidate_by_entity("User", "uuid-B").unwrap();
1332        assert_eq!(evicted, 1);
1333        assert!(cache.get(0x111).unwrap().is_none(), "list entry containing uuid-B must be gone");
1334    }
1335
1336    #[test]
1337    fn test_invalidate_list_queries_spares_point_lookups() {
1338        let cache = QueryResultCache::new(CacheConfig::enabled());
1339
1340        // Point lookup: single row
1341        let single = vec![JsonbValue::new(serde_json::json!({"id": "uuid-X"}))];
1342        cache.put(0x001, single, vec!["v_user".to_string()], None, Some("User")).unwrap();
1343
1344        // List query: multiple rows
1345        let list = list_result(&["uuid-A", "uuid-B"]);
1346        cache.put(0x002, list, vec!["v_user".to_string()], None, Some("User")).unwrap();
1347
1348        // CREATE fires invalidate_list_queries
1349        let evicted = cache.invalidate_list_queries(&["v_user".to_string()]).unwrap();
1350        assert_eq!(evicted, 1, "only the list entry should be evicted");
1351        assert!(cache.get(0x001).unwrap().is_some(), "point lookup must survive");
1352        assert!(cache.get(0x002).unwrap().is_none(), "list entry must be evicted");
1353    }
1354
1355    #[test]
1356    fn test_invalidate_by_entity_short_circuits_on_empty_index() {
1357        let cache = QueryResultCache::new(CacheConfig::enabled());
1358        // Nothing cached — must return 0 without panicking
1359        let count = cache.invalidate_by_entity("User", "uuid-X").unwrap();
1360        assert_eq!(count, 0);
1361    }
1362
1363    #[test]
1364    fn test_eviction_listener_cleans_all_entity_refs() {
1365        let cache = QueryResultCache::new(CacheConfig::enabled());
1366        let rows = list_result(&["uuid-A", "uuid-B"]);
1367        cache.put(0x001, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1368
1369        // Force eviction via invalidate_views
1370        cache.invalidate_views(&["v_user".to_string()]).unwrap();
1371        // Flush moka's async eviction pipeline
1372        cache.store.run_pending_tasks();
1373
1374        // After eviction the entity_index must be cleaned up (no dangling refs)
1375        let count_a = cache.invalidate_by_entity("User", "uuid-A").unwrap();
1376        let count_b = cache.invalidate_by_entity("User", "uuid-B").unwrap();
1377        assert_eq!(count_a, 0, "entity_index must be clean after eviction");
1378        assert_eq!(count_b, 0, "entity_index must be clean after eviction");
1379    }
1380
1381    // ========================================================================
1382    // Thread Safety Tests
1383    // ========================================================================
1384
1385    #[test]
1386    fn test_concurrent_access() {
1387        use std::{sync::Arc, thread};
1388
1389        let cache = Arc::new(QueryResultCache::new(CacheConfig::enabled()));
1390
1391        // Spawn multiple threads accessing cache
1392        let handles: Vec<_> = (0_u64..10)
1393            .map(|key| {
1394                let cache_clone = cache.clone();
1395                thread::spawn(move || {
1396                    cache_clone
1397                        .put(key, test_result(), vec!["v_user".to_string()], None, None)
1398                        .unwrap();
1399                    cache_clone.get(key).unwrap();
1400                })
1401            })
1402            .collect();
1403
1404        for handle in handles {
1405            handle.join().unwrap();
1406        }
1407
1408        let metrics = cache.metrics().unwrap();
1409        assert_eq!(metrics.total_cached, 10);
1410        assert_eq!(metrics.hits, 10);
1411    }
1412
1413    // ========================================================================
1414    // Sentinel tests — boundary guards for mutation testing
1415    // ========================================================================
1416
1417    /// Sentinel: `cache_list_queries = false` must skip results with >1 row.
1418    ///
1419    /// Kills the `> → >=` mutation at the list-query guard: `result.len() > 1`.
1420    #[test]
1421    fn test_cache_list_queries_false_skips_multi_row() {
1422        let config = CacheConfig {
1423            enabled: true,
1424            cache_list_queries: false,
1425            ..CacheConfig::default()
1426        };
1427        let cache = QueryResultCache::new(config);
1428
1429        // Two-row result: must be skipped (killed by > → >= mutant)
1430        let two_rows = vec![
1431            JsonbValue::new(json!({"id": 1})),
1432            JsonbValue::new(json!({"id": 2})),
1433        ];
1434        cache.put(1_u64, two_rows, vec!["v_user".to_string()], None, None).unwrap();
1435        assert!(
1436            cache.get(1_u64).unwrap().is_none(),
1437            "multi-row result must not be cached when cache_list_queries=false"
1438        );
1439    }
1440
1441    /// Sentinel: `cache_list_queries = false` must still store single-row results.
1442    ///
1443    /// Complements the above: the single-row path must remain unaffected.
1444    #[test]
1445    fn test_cache_list_queries_false_allows_single_row() {
1446        let config = CacheConfig {
1447            enabled: true,
1448            cache_list_queries: false,
1449            ..CacheConfig::default()
1450        };
1451        let cache = QueryResultCache::new(config);
1452
1453        // One-row result: must be stored
1454        let one_row = vec![JsonbValue::new(json!({"id": 1}))];
1455        cache.put(1_u64, one_row, vec!["v_user".to_string()], None, None).unwrap();
1456        assert!(
1457            cache.get(1_u64).unwrap().is_some(),
1458            "single-row result must be cached even when cache_list_queries=false"
1459        );
1460    }
1461
1462    /// Sentinel: entries exceeding `max_entry_bytes` must be silently skipped.
1463    ///
1464    /// Kills mutations on the `estimated > max_entry` guard.
1465    #[test]
1466    fn test_max_entry_bytes_skips_oversized_entry() {
1467        let config = CacheConfig {
1468            enabled: true,
1469            max_entry_bytes: Some(10), // 10 bytes — smaller than any JSON row
1470            ..CacheConfig::default()
1471        };
1472        let cache = QueryResultCache::new(config);
1473
1474        // A typical row serialises to far more than 10 bytes
1475        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1476        assert!(cache.get(1_u64).unwrap().is_none(), "oversized entry must be silently skipped");
1477    }
1478
1479    /// Sentinel: entries within `max_entry_bytes` must be stored normally.
1480    ///
1481    /// Complements the above to pin both sides of the size boundary.
1482    #[test]
1483    fn test_max_entry_bytes_allows_small_entry() {
1484        let config = CacheConfig {
1485            enabled: true,
1486            max_entry_bytes: Some(100_000), // 100 KB — plenty for a test row
1487            ..CacheConfig::default()
1488        };
1489        let cache = QueryResultCache::new(config);
1490
1491        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1492        assert!(
1493            cache.get(1_u64).unwrap().is_some(),
1494            "small entry must be cached when within max_entry_bytes"
1495        );
1496    }
1497
1498    /// Sentinel: `put()` must skip new entries when `max_total_bytes` budget is exhausted.
1499    ///
1500    /// Kills mutations on the `current >= max_total` guard.
1501    #[test]
1502    fn test_max_total_bytes_skips_when_budget_exhausted() {
1503        let config = CacheConfig {
1504            enabled: true,
1505            max_total_bytes: Some(0), // 0 bytes — always exhausted
1506            ..CacheConfig::default()
1507        };
1508        let cache = QueryResultCache::new(config);
1509
1510        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1511        assert!(
1512            cache.get(1_u64).unwrap().is_none(),
1513            "entry must be skipped when max_total_bytes budget is already exhausted"
1514        );
1515    }
1516
1517    // ========================================================================
1518    // Cross-key invalidation Tests (replaces cross-shard tests)
1519    // ========================================================================
1520
1521    /// `invalidate_views` clears matching entries regardless of cache key.
1522    #[test]
1523    fn test_cross_key_view_invalidation() {
1524        let config = CacheConfig {
1525            max_entries: 10_000,
1526            enabled: true,
1527            ..CacheConfig::default()
1528        };
1529        let cache = QueryResultCache::new(config);
1530
1531        // Insert many entries
1532        for i in 0_u64..200 {
1533            let view = if i % 2 == 0 { "v_user" } else { "v_post" };
1534            cache.put(i, test_result(), vec![view.to_string()], None, None).unwrap();
1535        }
1536
1537        // Invalidate v_user — should remove exactly 100 entries
1538        let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1539        assert_eq!(invalidated, 100);
1540
1541        // All v_user entries gone, all v_post entries remain
1542        for i in 0_u64..200 {
1543            if i % 2 == 0 {
1544                assert!(cache.get(i).unwrap().is_none(), "v_user entry should be invalidated");
1545            } else {
1546                assert!(cache.get(i).unwrap().is_some(), "v_post entry should remain");
1547            }
1548        }
1549    }
1550
1551    /// Cross-key entity invalidation works across all cache keys.
1552    #[test]
1553    fn test_cross_key_entity_invalidation() {
1554        let config = CacheConfig {
1555            max_entries: 10_000,
1556            enabled: true,
1557            ..CacheConfig::default()
1558        };
1559        let cache = QueryResultCache::new(config);
1560
1561        // Insert entries for the same entity across different cache keys
1562        for i in 0_u64..50 {
1563            cache
1564                .put(
1565                    i,
1566                    entity_result("uuid-target"),
1567                    vec!["v_user".to_string()],
1568                    None,
1569                    Some("User"),
1570                )
1571                .unwrap();
1572        }
1573
1574        // Also insert an unrelated entry
1575        cache
1576            .put(
1577                999_u64,
1578                entity_result("uuid-other"),
1579                vec!["v_user".to_string()],
1580                None,
1581                Some("User"),
1582            )
1583            .unwrap();
1584
1585        let evicted = cache.invalidate_by_entity("User", "uuid-target").unwrap();
1586        assert_eq!(evicted, 50);
1587        assert!(cache.get(999_u64).unwrap().is_some(), "unrelated entity should remain");
1588    }
1589
1590    /// Clear works for all entries.
1591    #[test]
1592    fn test_clear_all() {
1593        let config = CacheConfig {
1594            max_entries: 10_000,
1595            enabled: true,
1596            ..CacheConfig::default()
1597        };
1598        let cache = QueryResultCache::new(config);
1599
1600        for i in 0_u64..200 {
1601            cache.put(i, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1602        }
1603
1604        cache.clear().unwrap();
1605        cache.store.run_pending_tasks();
1606
1607        let metrics = cache.metrics().unwrap();
1608        assert_eq!(metrics.size, 0);
1609
1610        for i in 0_u64..200 {
1611            assert!(cache.get(i).unwrap().is_none());
1612        }
1613    }
1614
1615    /// `memory_bytes` is tracked and reported via `metrics()`.
1616    #[test]
1617    fn test_memory_bytes_tracked() {
1618        let cache = QueryResultCache::new(CacheConfig::enabled());
1619
1620        cache.put(1_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1621        cache.put(2_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1622
1623        let before = cache.metrics().unwrap().memory_bytes;
1624        assert!(before > 0, "memory_bytes should be tracked");
1625    }
1626
1627    /// `memory_bytes` decreases after invalidation (synchronously via clear).
1628    #[test]
1629    fn test_memory_bytes_decreases_on_clear() {
1630        let cache = QueryResultCache::new(CacheConfig::enabled());
1631
1632        cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1633
1634        let before = cache.metrics().unwrap().memory_bytes;
1635        assert!(before > 0);
1636
1637        cache.clear().unwrap();
1638
1639        let after = cache.metrics().unwrap().memory_bytes;
1640        assert_eq!(after, 0, "memory_bytes should be zero after clear()");
1641    }
1642
1643    // ========================================================================
1644    // Concurrency regression test (#185)
1645    // ========================================================================
1646
1647    /// Regression guard for #185: LRU+Mutex serialized all hot-key reads through
1648    /// one shard's mutex. With moka, reads are lock-free and should scale near-
1649    /// linearly with thread count.
1650    #[test]
1651    #[ignore = "wall-clock dependent — run manually to confirm lock-free read scaling"]
1652    fn test_concurrent_reads_do_not_serialize() {
1653        const ITERS: usize = 10_000;
1654        let config = CacheConfig::enabled();
1655        let cache = Arc::new(QueryResultCache::new(config));
1656        let key = 42_u64;
1657        cache.put(key, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1658
1659        // Single-threaded baseline
1660        let start = std::time::Instant::now();
1661        for _ in 0..ITERS {
1662            let _ = cache.get(key).unwrap();
1663        }
1664        let single_elapsed = start.elapsed();
1665
1666        // 40-thread concurrent
1667        let start = std::time::Instant::now();
1668        let handles: Vec<_> = (0..40)
1669            .map(|_| {
1670                let c = Arc::clone(&cache);
1671                std::thread::spawn(move || {
1672                    for _ in 0..ITERS {
1673                        let _ = c.get(key).unwrap();
1674                    }
1675                })
1676            })
1677            .collect();
1678        for h in handles {
1679            h.join().unwrap();
1680        }
1681        let multi_elapsed = start.elapsed();
1682
1683        // 40× the work in ≤2× the time → near-linear scaling.
1684        // Under old LRU+Mutex, 40-thread took ~20-40× single-thread time.
1685        assert!(
1686            multi_elapsed <= single_elapsed * 2,
1687            "40-thread ({:?}) was more than 2× single-thread ({:?}) — suggests serialization",
1688            multi_elapsed,
1689            single_elapsed,
1690        );
1691    }
1692}