fraiseql_core/cache/result.rs
1//! Query result caching with W-TinyLFU eviction and per-entry TTL.
2//!
3//! This module provides a `moka::sync::Cache`-backed store for GraphQL query results.
4//! Moka uses Concurrent W-TinyLFU policy with lock-free reads — cache hits do NOT
5//! acquire any shared lock, eliminating the hot-key serialisation bottleneck present
6//! in the old 64-shard `parking_lot::Mutex<LruCache>` design.
7//!
8//! ## Performance characteristics
9//!
10//! - **`get()` hot path** (cache hit): lock-free frequency-counter update (thread-local ring
11//! buffer, drained lazily on writes), `Arc` clone (single atomic increment), one atomic counter
12//! bump.
13//! - **`put()` path**: early-exit guards (disabled / list / size) before touching the store.
14//! Reverse-index updates use `DashMap` (fine-grained sharding, no global lock).
15//! - **`metrics()`**: reads `store.entry_count()` directly — no shard scan.
16//! - **`invalidate_views()` / `invalidate_by_entity()`**: O(k) where k = matching entries (via
17//! reverse indexes), not O(total entries).
18//!
19//! ## Reverse indexes
20//!
21//! Because `moka` does not support arbitrary iteration, view-based and entity-based
22//! invalidation rely on two `DashMap` reverse indexes maintained alongside the store:
23//!
24//! ```text
25//! view_index: DashMap<view_name, DashSet<cache_key>>
26//! entity_index: DashMap<entity_type, DashMap<entity_id, DashSet<cache_key>>>
27//! ```
28//!
29//! Indexes are populated in `put()` and pruned via moka's eviction listener (fired
30//! asynchronously). `clear()` resets all indexes synchronously.
31
32use std::{
33 collections::HashSet,
34 sync::{
35 Arc,
36 atomic::{AtomicU64, AtomicUsize, Ordering},
37 },
38 time::Duration,
39};
40
41use dashmap::{DashMap, DashSet};
42use moka::sync::Cache as MokaCache;
43use serde::{Deserialize, Serialize};
44
45use super::config::CacheConfig;
46use crate::{db::types::JsonbValue, error::Result};
47
48/// Cached query result with metadata.
49///
50/// Stores the query result along with tracking information for
51/// TTL expiry, view-based invalidation, and monitoring.
52#[derive(Debug, Clone)]
53pub struct CachedResult {
54 /// The actual query result (JSONB array from database).
55 ///
56 /// Wrapped in `Arc` for cheap cloning on cache hits (zero-copy).
57 pub result: Arc<Vec<JsonbValue>>,
58
59 /// Which views/tables this query accesses.
60 ///
61 /// Format: `vec!["v_user", "v_post"]`
62 ///
63 /// Stored as a boxed slice (no excess capacity) since views are fixed
64 /// at `put()` time and never modified.
65 pub accessed_views: Box<[String]>,
66
67 /// When this entry was cached (Unix timestamp in seconds).
68 ///
69 /// Wall-clock timestamp for debugging. TTL enforcement is handled by moka
70 /// internally via `CacheEntryExpiry`.
71 pub cached_at: u64,
72
73 /// Per-entry TTL in seconds.
74 ///
75 /// Overrides `CacheConfig::ttl_seconds` when set via `put(..., Some(ttl))`.
76 /// Read by `CacheEntryExpiry::expire_after_create` to tell moka the expiry.
77 pub ttl_seconds: u64,
78
79 /// Entity references for selective entity-level invalidation.
80 ///
81 /// Contains one `(entity_type, entity_id)` pair per row in `result` that has
82 /// a valid string in its `"id"` field. Empty for queries with no `id` column
83 /// or when `put()` is called without an `entity_type`.
84 /// Used by the eviction listener to clean up `entity_index` on eviction.
85 pub entity_refs: Box<[(String, String)]>,
86
87 /// True when `result.len() > 1` at put time.
88 ///
89 /// Used by `invalidate_list_queries()` to avoid evicting single-entity
90 /// point-lookup entries on CREATE mutations.
91 pub is_list_query: bool,
92}
93
94/// Moka `Expiry` implementation: reads TTL from `CachedResult.ttl_seconds`.
95struct CacheEntryExpiry;
96
97impl moka::Expiry<u64, Arc<CachedResult>> for CacheEntryExpiry {
98 fn expire_after_create(
99 &self,
100 _key: &u64,
101 value: &Arc<CachedResult>,
102 _created_at: std::time::Instant,
103 ) -> Option<Duration> {
104 if value.ttl_seconds == 0 {
105 // TTL=0 means "no time-based expiry" — entry lives until explicitly
106 // invalidated by a mutation. Return None so moka never schedules
107 // a timer-wheel eviction for this entry.
108 None
109 } else {
110 Some(Duration::from_secs(value.ttl_seconds))
111 }
112 }
113
114 // `expire_after_read` is intentionally NOT overridden.
115 //
116 // Moka's default returns `None` (no change to the timer) which skips the
117 // internal timer-wheel reschedule on every get(). Overriding it to return
118 // `duration_until_expiry` — even though the value is semantically unchanged —
119 // forces moka to acquire its timer-wheel lock on every cache hit. Under 40
120 // concurrent workers reading the same key, that lock becomes the new hot-key
121 // bottleneck, serialising reads and degrading list-query throughput ~3×.
122 //
123 // Entries expire at creation_time + ttl_seconds regardless of read frequency,
124 // which is the correct fixed-TTL semantics for query result caching.
125}
126
127/// Thread-safe W-TinyLFU cache for query results.
128///
129/// Backed by [`moka::sync::Cache`] which provides lock-free reads via
130/// Concurrent `TinyLFU`. Reverse `DashMap` indexes enable O(k) invalidation.
131///
132/// # Thread Safety
133///
134/// `moka::sync::Cache` is `Send + Sync`. All reverse indexes use `DashMap`
135/// (fine-grained shard locking) and `DashSet` (also shard-locked). There is no
136/// global mutex on the read path.
137///
138/// # Example
139///
140/// ```rust
141/// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
142/// use fraiseql_core::db::types::JsonbValue;
143/// use serde_json::json;
144///
145/// let cache = QueryResultCache::new(CacheConfig::default());
146///
147/// // Cache a result
148/// let result = vec![JsonbValue::new(json!({"id": 1, "name": "Alice"}))];
149/// cache.put(
150/// 12345_u64,
151/// result.clone(),
152/// vec!["v_user".to_string()],
153/// None, // use global TTL
154/// None, // no entity type index
155/// ).unwrap();
156///
157/// // Retrieve from cache
158/// if let Some(cached) = cache.get(12345).unwrap() {
159/// println!("Cache hit! {} results", cached.len());
160/// }
161/// ```
162pub struct QueryResultCache {
163 /// Moka W-TinyLFU store.
164 ///
165 /// `Arc<CachedResult>` rather than `CachedResult` so that `get()` returns in
166 /// one atomic increment instead of deep-cloning the struct (which would copy
167 /// `accessed_views: Box<[String]>` on every cache hit).
168 store: MokaCache<u64, Arc<CachedResult>>,
169
170 /// Configuration (immutable after creation).
171 config: CacheConfig,
172
173 // Metrics counters — `Relaxed` ordering is sufficient: these counters are
174 // used only for monitoring, not for correctness or synchronisation.
175 hits: AtomicU64,
176 misses: AtomicU64,
177 total_cached: AtomicU64,
178 invalidations: AtomicU64,
179
180 /// Estimated total memory in use.
181 ///
182 /// Wrapped in `Arc` so the eviction listener closure (which requires `'static`)
183 /// can hold a clone and decrement on eviction.
184 memory_bytes: Arc<AtomicUsize>,
185
186 /// Reverse index: view name → set of cache keys accessing that view.
187 view_index: Arc<DashMap<String, DashSet<u64>>>,
188
189 /// Reverse index: entity type → entity id → set of cache keys.
190 entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>>,
191
192 /// Reverse index: view name → set of cache keys for list (multi-row) entries only.
193 ///
194 /// Populated in `put_arc()` when `result.len() > 1`. Used by
195 /// `invalidate_list_queries()` for CREATE-targeted eviction that leaves
196 /// point-lookup entries intact.
197 list_index: Arc<DashMap<String, DashSet<u64>>>,
198}
199
200/// Cache metrics for monitoring.
201///
202/// Exposed via API for observability and debugging.
203#[derive(Debug, Clone, Serialize, Deserialize)]
204pub struct CacheMetrics {
205 /// Number of cache hits (returned cached result).
206 pub hits: u64,
207
208 /// Number of cache misses (executed query).
209 pub misses: u64,
210
211 /// Total entries cached across all time.
212 pub total_cached: u64,
213
214 /// Number of invalidations triggered.
215 pub invalidations: u64,
216
217 /// Current size of cache (number of entries).
218 pub size: usize,
219
220 /// Estimated memory usage in bytes.
221 ///
222 /// This is a rough estimate based on `CachedResult` struct size.
223 /// Actual memory usage may vary based on result sizes.
224 pub memory_bytes: usize,
225}
226
227/// Estimate the per-entry accounting overhead.
228const fn entry_overhead() -> usize {
229 std::mem::size_of::<CachedResult>() + std::mem::size_of::<u64>() * 2
230}
231
232/// Build the moka store, wiring the eviction listener to the reverse indexes
233/// and memory counter.
234fn build_store(
235 config: &CacheConfig,
236 memory_bytes: Arc<AtomicUsize>,
237 view_index: Arc<DashMap<String, DashSet<u64>>>,
238 entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>>,
239 list_index: Arc<DashMap<String, DashSet<u64>>>,
240) -> MokaCache<u64, Arc<CachedResult>> {
241 let max_cap = config.max_entries as u64;
242 let mb = memory_bytes;
243 let vi = view_index;
244 let ei = entity_index;
245 let li = list_index;
246
247 MokaCache::builder()
248 .max_capacity(max_cap)
249 .expire_after(CacheEntryExpiry)
250 .eviction_listener(move |key: Arc<u64>, value: Arc<CachedResult>, _cause| {
251 // Decrement memory budget so put()'s byte-gate stays accurate.
252 mb.fetch_sub(entry_overhead(), Ordering::Relaxed);
253
254 // Remove key from view index.
255 for view in &value.accessed_views {
256 if let Some(keys) = vi.get(view) {
257 keys.remove(&*key);
258 }
259 }
260
261 // Remove key from list index (only populated for multi-row entries).
262 if value.is_list_query {
263 for view in &value.accessed_views {
264 if let Some(keys) = li.get(view) {
265 keys.remove(&*key);
266 }
267 }
268 }
269
270 // Remove ALL entity_refs from entity index.
271 for (et, id) in &*value.entity_refs {
272 if let Some(by_type) = ei.get(et) {
273 if let Some(keys) = by_type.get(id) {
274 keys.remove(&*key);
275 }
276 }
277 }
278 })
279 .build()
280}
281
282impl QueryResultCache {
283 /// Create new cache with configuration.
284 ///
285 /// # Panics
286 ///
287 /// Panics if `config.max_entries` is 0 (invalid configuration).
288 ///
289 /// # Example
290 ///
291 /// ```rust
292 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
293 ///
294 /// let cache = QueryResultCache::new(CacheConfig::default());
295 /// ```
296 #[must_use]
297 pub fn new(config: CacheConfig) -> Self {
298 assert!(config.max_entries > 0, "max_entries must be > 0");
299
300 let memory_bytes = Arc::new(AtomicUsize::new(0));
301 let view_index: Arc<DashMap<String, DashSet<u64>>> = Arc::new(DashMap::new());
302 let entity_index: Arc<DashMap<String, DashMap<String, DashSet<u64>>>> =
303 Arc::new(DashMap::new());
304 let list_index: Arc<DashMap<String, DashSet<u64>>> = Arc::new(DashMap::new());
305
306 let store = build_store(
307 &config,
308 Arc::clone(&memory_bytes),
309 Arc::clone(&view_index),
310 Arc::clone(&entity_index),
311 Arc::clone(&list_index),
312 );
313
314 Self {
315 store,
316 config,
317 hits: AtomicU64::new(0),
318 misses: AtomicU64::new(0),
319 total_cached: AtomicU64::new(0),
320 invalidations: AtomicU64::new(0),
321 memory_bytes,
322 view_index,
323 entity_index,
324 list_index,
325 }
326 }
327
328 /// Returns whether caching is enabled.
329 ///
330 /// Used by `CachedDatabaseAdapter` to short-circuit key generation
331 /// and result clone overhead when caching is disabled.
332 #[must_use]
333 pub const fn is_enabled(&self) -> bool {
334 self.config.enabled
335 }
336
337 /// Look up a cached result by its cache key.
338 ///
339 /// Returns `None` when caching is disabled or the key is not present or expired.
340 /// Moka handles TTL expiry internally — if `get()` returns `Some`, the entry is live.
341 ///
342 /// # Errors
343 ///
344 /// This method is infallible. The `Result` return type is kept for API compatibility.
345 pub fn get(&self, cache_key: u64) -> Result<Option<Arc<Vec<JsonbValue>>>> {
346 if !self.config.enabled {
347 return Ok(None);
348 }
349
350 // moka::sync::Cache::get() is lock-free on the read path.
351 if let Some(cached) = self.store.get(&cache_key) {
352 self.hits.fetch_add(1, Ordering::Relaxed);
353 Ok(Some(Arc::clone(&cached.result)))
354 } else {
355 self.misses.fetch_add(1, Ordering::Relaxed);
356 Ok(None)
357 }
358 }
359
360 /// Store query result in cache, accepting an already-`Arc`-wrapped result.
361 ///
362 /// Preferred over [`put`](Self::put) on the hot miss path: callers that already
363 /// hold an `Arc<Vec<JsonbValue>>` (e.g. `CachedDatabaseAdapter`) can store it
364 /// without an extra `Vec` clone.
365 ///
366 /// # Arguments
367 ///
368 /// * `cache_key` - Cache key (from `generate_cache_key()`)
369 /// * `result` - Arc-wrapped query result to cache
370 /// * `accessed_views` - List of views accessed by this query
371 /// * `ttl_override` - Per-entry TTL in seconds; `None` uses `CacheConfig::ttl_seconds`
372 /// * `entity_type` - Optional GraphQL type name for entity-ID indexing
373 ///
374 /// # Errors
375 ///
376 /// This method is infallible. The `Result` return type is kept for API compatibility.
377 pub fn put_arc(
378 &self,
379 cache_key: u64,
380 result: Arc<Vec<JsonbValue>>,
381 accessed_views: Vec<String>,
382 ttl_override: Option<u64>,
383 entity_type: Option<&str>,
384 ) -> Result<()> {
385 if !self.config.enabled {
386 return Ok(());
387 }
388
389 let ttl_seconds = ttl_override.unwrap_or(self.config.ttl_seconds);
390
391 // TTL=0 means "no time-based expiry" — store the entry and rely entirely
392 // on mutation-based invalidation. expire_after_create returns None for
393 // these entries so moka never schedules a timer-wheel eviction.
394
395 // Respect cache_list_queries: a result with more than one row is considered a list.
396 if !self.config.cache_list_queries && result.len() > 1 {
397 return Ok(());
398 }
399
400 // Enforce per-entry size limit: estimate entry size from serialized JSON.
401 if let Some(max_entry) = self.config.max_entry_bytes {
402 let estimated = serde_json::to_vec(&*result).map_or(0, |v| v.len());
403 if estimated > max_entry {
404 return Ok(()); // silently skip oversized entries
405 }
406 }
407
408 // Enforce total cache size limit.
409 if let Some(max_total) = self.config.max_total_bytes {
410 if self.memory_bytes.load(Ordering::Relaxed) >= max_total {
411 return Ok(()); // silently skip when budget is exhausted
412 }
413 }
414
415 let is_list_query = result.len() > 1;
416
417 // Extract entity refs from ALL rows (not just the first).
418 let entity_refs: Box<[(String, String)]> = if let Some(et) = entity_type {
419 result
420 .iter()
421 .filter_map(|row| {
422 row.as_value()
423 .as_object()?
424 .get("id")?
425 .as_str()
426 .map(|id| (et.to_string(), id.to_string()))
427 })
428 .collect::<Vec<_>>()
429 .into_boxed_slice()
430 } else {
431 Box::default()
432 };
433
434 // Register in view index.
435 for view in &accessed_views {
436 self.view_index.entry(view.clone()).or_default().insert(cache_key);
437 }
438
439 // Register in list index (only for multi-row results).
440 if is_list_query {
441 for view in &accessed_views {
442 self.list_index.entry(view.clone()).or_default().insert(cache_key);
443 }
444 }
445
446 // Register ALL entity refs in entity index.
447 for (et, id) in &*entity_refs {
448 self.entity_index
449 .entry(et.clone())
450 .or_default()
451 .entry(id.clone())
452 .or_default()
453 .insert(cache_key);
454 }
455
456 let cached = CachedResult {
457 result,
458 accessed_views: accessed_views.into_boxed_slice(),
459 cached_at: std::time::SystemTime::now()
460 .duration_since(std::time::UNIX_EPOCH)
461 .map_or(0, |d| d.as_secs()),
462 ttl_seconds,
463 entity_refs,
464 is_list_query,
465 };
466
467 self.memory_bytes.fetch_add(entry_overhead(), Ordering::Relaxed);
468 // Wrap in Arc so moka's get() costs one atomic increment, not a full clone.
469 self.store.insert(cache_key, Arc::new(cached));
470 self.total_cached.fetch_add(1, Ordering::Relaxed);
471 Ok(())
472 }
473
474 /// Store query result in cache.
475 ///
476 /// If caching is disabled, this is a no-op.
477 ///
478 /// Wraps `result` in an `Arc` and delegates to [`put_arc`](Self::put_arc).
479 /// Prefer [`put_arc`](Self::put_arc) when the caller already holds an `Arc`.
480 ///
481 /// # Arguments
482 ///
483 /// * `cache_key` - Cache key (from `generate_cache_key()`)
484 /// * `result` - Query result to cache
485 /// * `accessed_views` - List of views accessed by this query
486 /// * `ttl_override` - Per-entry TTL in seconds; `None` uses `CacheConfig::ttl_seconds`
487 /// * `entity_type` - Optional GraphQL type name (e.g. `"User"`) for entity-ID indexing. When
488 /// provided, each row's `"id"` field is extracted and stored in `entity_index` so that
489 /// `invalidate_by_entity()` can perform selective eviction.
490 ///
491 /// # Errors
492 ///
493 /// This method is infallible. The `Result` return type is kept for API compatibility.
494 ///
495 /// # Example
496 ///
497 /// ```rust
498 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
499 /// use fraiseql_core::db::types::JsonbValue;
500 /// use serde_json::json;
501 ///
502 /// let cache = QueryResultCache::new(CacheConfig::default());
503 ///
504 /// let result = vec![JsonbValue::new(json!({"id": "uuid-1"}))];
505 /// cache.put(0xabc123, result, vec!["v_user".to_string()], None, Some("User"))?;
506 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
507 /// ```
508 pub fn put(
509 &self,
510 cache_key: u64,
511 result: Vec<JsonbValue>,
512 accessed_views: Vec<String>,
513 ttl_override: Option<u64>,
514 entity_type: Option<&str>,
515 ) -> Result<()> {
516 self.put_arc(cache_key, Arc::new(result), accessed_views, ttl_override, entity_type)
517 }
518
519 /// Invalidate entries accessing specified views.
520 ///
521 /// Uses the `view_index` for O(k) lookup instead of O(n) full-cache scan.
522 /// Keys accessing multiple views in `views` are deduplicated before invalidation.
523 ///
524 /// # Arguments
525 ///
526 /// * `views` - List of view/table names modified by mutation
527 ///
528 /// # Returns
529 ///
530 /// Number of cache entries invalidated.
531 ///
532 /// # Errors
533 ///
534 /// This method is infallible. The `Result` return type is kept for API compatibility.
535 ///
536 /// # Example
537 ///
538 /// ```rust
539 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
540 ///
541 /// let cache = QueryResultCache::new(CacheConfig::default());
542 ///
543 /// // After createUser mutation
544 /// let invalidated = cache.invalidate_views(&["v_user".to_string()])?;
545 /// println!("Invalidated {} cache entries", invalidated);
546 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
547 /// ```
548 pub fn invalidate_views(&self, views: &[String]) -> Result<u64> {
549 if !self.config.enabled {
550 return Ok(0);
551 }
552
553 // Collect keys first (releases DashMap guards) then invalidate.
554 // Moka's eviction listener fires synchronously on the calling thread, so
555 // we must NOT hold any DashMap shard guard when calling store.invalidate() —
556 // the listener itself calls view_index.get() on the same shard, which
557 // would deadlock on a non-re-entrant parking_lot::RwLock.
558 let mut keys_to_invalidate: HashSet<u64> = HashSet::new();
559 for view in views {
560 if let Some(keys) = self.view_index.get(view) {
561 // Dedup: a query accessing multiple views in `views` would
562 // otherwise be counted and invalidated once per view.
563 for key in keys.iter() {
564 keys_to_invalidate.insert(*key);
565 }
566 }
567 // Guard dropped here — safe to proceed
568 }
569
570 #[allow(clippy::cast_possible_truncation)]
571 // Reason: entry count never exceeds u64
572 let count = keys_to_invalidate.len() as u64;
573
574 for key in keys_to_invalidate {
575 self.store.invalidate(&key);
576 // Index cleanup handled by eviction listener.
577 }
578
579 self.invalidations.fetch_add(count, Ordering::Relaxed);
580 Ok(count)
581 }
582
583 /// Evict only list (multi-row) cache entries for the given views.
584 ///
585 /// Unlike `invalidate_views()`, this method leaves single-entity point-lookup
586 /// entries intact. Used for CREATE mutations: creating a new entity does not
587 /// affect queries that fetch a *different* existing entity by UUID, but it
588 /// does invalidate queries that return a variable-length list of entities.
589 ///
590 /// Uses the `list_index` for O(k) lookup.
591 ///
592 /// # Errors
593 ///
594 /// This method is infallible. The `Result` return type is kept for API compatibility.
595 pub fn invalidate_list_queries(&self, views: &[String]) -> Result<u64> {
596 if !self.config.enabled {
597 return Ok(0);
598 }
599
600 let mut keys_to_invalidate: HashSet<u64> = HashSet::new();
601 for view in views {
602 if let Some(keys) = self.list_index.get(view) {
603 for k in keys.iter() {
604 keys_to_invalidate.insert(*k);
605 }
606 }
607 }
608
609 #[allow(clippy::cast_possible_truncation)]
610 // Reason: entry count never exceeds u64
611 let count = keys_to_invalidate.len() as u64;
612 for key in keys_to_invalidate {
613 self.store.invalidate(&key);
614 }
615 self.invalidations.fetch_add(count, Ordering::Relaxed);
616 Ok(count)
617 }
618
619 /// Evict cache entries that contain a specific entity UUID.
620 ///
621 /// Uses the `entity_index` for O(k) lookup. Entries not referencing this
622 /// entity are left untouched.
623 ///
624 /// # Arguments
625 ///
626 /// * `entity_type` - GraphQL type name (e.g. `"User"`)
627 /// * `entity_id` - UUID string of the mutated entity
628 ///
629 /// # Returns
630 ///
631 /// Number of cache entries evicted.
632 ///
633 /// # Errors
634 ///
635 /// This method is infallible. The `Result` return type is kept for API compatibility.
636 pub fn invalidate_by_entity(&self, entity_type: &str, entity_id: &str) -> Result<u64> {
637 if !self.config.enabled {
638 return Ok(0);
639 }
640
641 // Short-circuit: if entity_type has no indexed entries, skip the DashMap
642 // lookup entirely. Covers cold-cache and write-heavy workloads where no
643 // reads are cached yet.
644 if !self.entity_index.contains_key(entity_type) {
645 return Ok(0);
646 }
647
648 // Collect keys first (releases DashMap guards) then invalidate.
649 // Moka's eviction listener fires synchronously on the calling thread, so
650 // we must NOT hold any DashMap shard guard when calling store.invalidate() —
651 // the listener itself calls entity_index.get() on the same shard, which
652 // would deadlock on a non-re-entrant parking_lot::RwLock.
653 let keys_to_invalidate: Vec<u64> = self
654 .entity_index
655 .get(entity_type)
656 .and_then(|by_type| {
657 by_type.get(entity_id).map(|keys| keys.iter().map(|k| *k).collect())
658 })
659 .unwrap_or_default();
660
661 #[allow(clippy::cast_possible_truncation)]
662 // Reason: entry count never exceeds u64
663 let count = keys_to_invalidate.len() as u64;
664
665 for key in keys_to_invalidate {
666 self.store.invalidate(&key);
667 // Index cleanup handled by eviction listener.
668 }
669
670 self.invalidations.fetch_add(count, Ordering::Relaxed);
671 Ok(count)
672 }
673
674 /// Get cache metrics snapshot.
675 ///
676 /// Returns a consistent snapshot of current counters. Individual fields may
677 /// be updated independently (atomics), so the snapshot is not a single atomic
678 /// transaction, but is accurate enough for monitoring.
679 ///
680 /// # Errors
681 ///
682 /// This method is infallible. The `Result` return type is kept for API compatibility.
683 ///
684 /// # Example
685 ///
686 /// ```rust
687 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
688 ///
689 /// let cache = QueryResultCache::new(CacheConfig::default());
690 /// let metrics = cache.metrics()?;
691 ///
692 /// println!("Hit rate: {:.1}%", metrics.hit_rate() * 100.0);
693 /// println!("Size: {} / {} entries", metrics.size, 10_000);
694 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
695 /// ```
696 pub fn metrics(&self) -> Result<CacheMetrics> {
697 Ok(CacheMetrics {
698 hits: self.hits.load(Ordering::Relaxed),
699 misses: self.misses.load(Ordering::Relaxed),
700 total_cached: self.total_cached.load(Ordering::Relaxed),
701 invalidations: self.invalidations.load(Ordering::Relaxed),
702 #[allow(clippy::cast_possible_truncation)]
703 // Reason: entry count fits in usize on any 64-bit target
704 size: self.store.entry_count() as usize,
705 memory_bytes: self.memory_bytes.load(Ordering::Relaxed),
706 })
707 }
708
709 /// Clear all cache entries.
710 ///
711 /// Resets the store, reverse indexes, and `memory_bytes` synchronously.
712 /// The eviction listener will still fire asynchronously for each evicted entry,
713 /// but its index-cleanup operations will be no-ops on the already-cleared maps.
714 ///
715 /// # Errors
716 ///
717 /// This method is infallible. The `Result` return type is kept for API compatibility.
718 ///
719 /// # Example
720 ///
721 /// ```rust
722 /// use fraiseql_core::cache::{QueryResultCache, CacheConfig};
723 ///
724 /// let cache = QueryResultCache::new(CacheConfig::default());
725 /// cache.clear()?;
726 /// # Ok::<(), fraiseql_core::error::FraiseQLError>(())
727 /// ```
728 pub fn clear(&self) -> Result<()> {
729 self.store.invalidate_all();
730 // Reset indexes and memory counter synchronously — don't rely on the
731 // async eviction listener to do this.
732 self.view_index.clear();
733 self.entity_index.clear();
734 self.list_index.clear();
735 self.memory_bytes.store(0, Ordering::Relaxed);
736 Ok(())
737 }
738}
739
740impl CacheMetrics {
741 /// Calculate cache hit rate.
742 ///
743 /// Returns ratio of hits to total requests (0.0 to 1.0).
744 ///
745 /// # Returns
746 ///
747 /// - `1.0` if all requests were hits
748 /// - `0.0` if all requests were misses
749 /// - `0.0` if no requests yet
750 ///
751 /// # Example
752 ///
753 /// ```rust
754 /// use fraiseql_core::cache::CacheMetrics;
755 ///
756 /// let metrics = CacheMetrics {
757 /// hits: 80,
758 /// misses: 20,
759 /// total_cached: 100,
760 /// invalidations: 5,
761 /// size: 95,
762 /// memory_bytes: 1_000_000,
763 /// };
764 ///
765 /// assert_eq!(metrics.hit_rate(), 0.8); // 80% hit rate
766 /// ```
767 #[must_use]
768 pub fn hit_rate(&self) -> f64 {
769 let total = self.hits + self.misses;
770 if total == 0 {
771 return 0.0;
772 }
773 #[allow(clippy::cast_precision_loss)]
774 // Reason: hit-rate is a display metric; f64 precision loss on u64 counters is acceptable
775 {
776 self.hits as f64 / total as f64
777 }
778 }
779
780 /// Check if cache is performing well.
781 ///
782 /// Returns `true` if hit rate is above 60% (reasonable threshold).
783 ///
784 /// # Example
785 ///
786 /// ```rust
787 /// use fraiseql_core::cache::CacheMetrics;
788 ///
789 /// let good_metrics = CacheMetrics {
790 /// hits: 80,
791 /// misses: 20,
792 /// total_cached: 100,
793 /// invalidations: 5,
794 /// size: 95,
795 /// memory_bytes: 1_000_000,
796 /// };
797 ///
798 /// assert!(good_metrics.is_healthy()); // 80% > 60%
799 /// ```
800 #[must_use]
801 pub fn is_healthy(&self) -> bool {
802 self.hit_rate() > 0.6
803 }
804}
805
806#[cfg(test)]
807mod tests {
808 #![allow(clippy::unwrap_used)] // Reason: test code, panics are acceptable
809
810 use serde_json::json;
811
812 use super::*;
813
814 // Helper to create test result
815 fn test_result() -> Vec<JsonbValue> {
816 vec![JsonbValue::new(json!({"id": 1, "name": "test"}))]
817 }
818
819 // ========================================================================
820 // Cache Hit/Miss Tests
821 // ========================================================================
822
823 #[test]
824 fn test_cache_miss() {
825 let cache = QueryResultCache::new(CacheConfig::enabled());
826
827 let result = cache.get(999_u64).unwrap();
828 assert!(result.is_none(), "Should be cache miss");
829
830 let metrics = cache.metrics().unwrap();
831 assert_eq!(metrics.misses, 1);
832 assert_eq!(metrics.hits, 0);
833 }
834
835 #[test]
836 fn test_cache_put_and_get() {
837 let cache = QueryResultCache::new(CacheConfig::enabled());
838 let result = test_result();
839
840 // Put
841 cache.put(1_u64, result, vec!["v_user".to_string()], None, None).unwrap();
842
843 // Get
844 let cached = cache.get(1_u64).unwrap();
845 assert!(cached.is_some(), "Should be cache hit");
846 assert_eq!(cached.unwrap().len(), 1);
847
848 let metrics = cache.metrics().unwrap();
849 assert_eq!(metrics.hits, 1);
850 assert_eq!(metrics.misses, 0);
851 assert_eq!(metrics.total_cached, 1);
852 }
853
854 #[test]
855 fn test_cache_hit_updates_hit_count() {
856 let cache = QueryResultCache::new(CacheConfig::enabled());
857
858 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
859
860 // First hit
861 cache.get(1_u64).unwrap();
862 // Second hit
863 cache.get(1_u64).unwrap();
864
865 let metrics = cache.metrics().unwrap();
866 assert_eq!(metrics.hits, 2);
867 }
868
869 // ========================================================================
870 // TTL Expiry Tests
871 // ========================================================================
872
873 #[test]
874 fn test_ttl_expiry() {
875 let config = CacheConfig {
876 ttl_seconds: 1,
877 enabled: true,
878 ..Default::default()
879 };
880
881 let cache = QueryResultCache::new(config);
882
883 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
884
885 // Wait for expiry
886 std::thread::sleep(std::time::Duration::from_secs(2));
887 cache.store.run_pending_tasks();
888
889 // Should be expired
890 let result = cache.get(1_u64).unwrap();
891 assert!(result.is_none(), "Entry should be expired");
892
893 let metrics = cache.metrics().unwrap();
894 assert_eq!(metrics.misses, 1); // Expired counts as miss
895 }
896
897 #[test]
898 fn test_per_entry_ttl_override_expires_early() {
899 // Global config has 1-hour TTL but entry overrides to 1 second
900 let config = CacheConfig {
901 ttl_seconds: 3600,
902 enabled: true,
903 ..Default::default()
904 };
905 let cache = QueryResultCache::new(config);
906
907 cache
908 .put(
909 1_u64,
910 test_result(),
911 vec!["v_ref".to_string()],
912 Some(1), // 1-second per-entry override
913 None,
914 )
915 .unwrap();
916
917 std::thread::sleep(std::time::Duration::from_secs(2));
918 cache.store.run_pending_tasks();
919
920 let result = cache.get(1_u64).unwrap();
921 assert!(result.is_none(), "Entry with per-entry TTL=1s should have expired");
922 }
923
924 #[test]
925 fn test_per_entry_ttl_zero_cached_indefinitely() {
926 // TTL=0 = no time-based expiry; entry lives until mutation invalidation.
927 let cache = QueryResultCache::new(CacheConfig::enabled());
928
929 cache
930 .put(1_u64, test_result(), vec!["v_live".to_string()], Some(0), None)
931 .unwrap();
932
933 let result = cache.get(1_u64).unwrap();
934 assert!(result.is_some(), "Entry with TTL=0 should be cached indefinitely");
935 }
936
937 #[test]
938 fn test_ttl_not_expired() {
939 let config = CacheConfig {
940 ttl_seconds: 3600, // 1 hour TTL
941 enabled: true,
942 ..Default::default()
943 };
944
945 let cache = QueryResultCache::new(config);
946
947 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
948
949 // Should still be valid
950 let result = cache.get(1_u64).unwrap();
951 assert!(result.is_some(), "Entry should not be expired");
952 }
953
954 // ========================================================================
955 // Eviction Tests (capacity-based)
956 // ========================================================================
957
958 #[test]
959 fn test_capacity_eviction() {
960 let config = CacheConfig {
961 max_entries: 2,
962 enabled: true,
963 ..Default::default()
964 };
965
966 let cache = QueryResultCache::new(config);
967
968 // Add 3 entries (max is 2); moka will evict one
969 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
970 cache.put(2_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
971 cache.put(3_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
972
973 // Run pending tasks to flush evictions
974 cache.store.run_pending_tasks();
975
976 let metrics = cache.metrics().unwrap();
977 assert!(metrics.size <= 2, "Cache size should not exceed max capacity");
978 }
979
980 // ========================================================================
981 // Cache Disabled Tests
982 // ========================================================================
983
984 #[test]
985 fn test_cache_disabled() {
986 let config = CacheConfig::disabled();
987 let cache = QueryResultCache::new(config);
988
989 // Put should be no-op
990 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
991
992 // Get should return None
993 assert!(cache.get(1_u64).unwrap().is_none(), "Cache disabled should always miss");
994
995 let metrics = cache.metrics().unwrap();
996 assert_eq!(metrics.total_cached, 0);
997 }
998
999 // ========================================================================
1000 // Invalidation Tests
1001 // ========================================================================
1002
1003 #[test]
1004 fn test_invalidate_single_view() {
1005 let cache = QueryResultCache::new(CacheConfig::enabled());
1006
1007 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1008 cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1009
1010 // Invalidate v_user
1011 let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1012 assert_eq!(invalidated, 1);
1013
1014 // v_user entry gone, v_post remains
1015 assert!(cache.get(1_u64).unwrap().is_none());
1016 assert!(cache.get(2_u64).unwrap().is_some());
1017 }
1018
1019 #[test]
1020 fn test_invalidate_multiple_views() {
1021 let cache = QueryResultCache::new(CacheConfig::enabled());
1022
1023 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1024 cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1025 cache
1026 .put(3_u64, test_result(), vec!["v_product".to_string()], None, None)
1027 .unwrap();
1028
1029 // Invalidate v_user and v_post
1030 let invalidated =
1031 cache.invalidate_views(&["v_user".to_string(), "v_post".to_string()]).unwrap();
1032 assert_eq!(invalidated, 2);
1033
1034 assert!(cache.get(1_u64).unwrap().is_none());
1035 assert!(cache.get(2_u64).unwrap().is_none());
1036 assert!(cache.get(3_u64).unwrap().is_some());
1037 }
1038
1039 #[test]
1040 fn test_invalidate_entry_with_multiple_views() {
1041 let cache = QueryResultCache::new(CacheConfig::enabled());
1042
1043 // Entry accesses both v_user and v_post
1044 cache
1045 .put(
1046 1_u64,
1047 test_result(),
1048 vec!["v_user".to_string(), "v_post".to_string()],
1049 None,
1050 None,
1051 )
1052 .unwrap();
1053
1054 // Invalidating either view should remove the entry
1055 let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1056 assert_eq!(invalidated, 1);
1057
1058 assert!(cache.get(1_u64).unwrap().is_none());
1059 }
1060
1061 #[test]
1062 fn test_invalidate_nonexistent_view() {
1063 let cache = QueryResultCache::new(CacheConfig::enabled());
1064
1065 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1066
1067 // Invalidate view that doesn't exist
1068 let invalidated = cache.invalidate_views(&["v_nonexistent".to_string()]).unwrap();
1069 assert_eq!(invalidated, 0);
1070
1071 // Entry should remain
1072 assert!(cache.get(1_u64).unwrap().is_some());
1073 }
1074
1075 // ========================================================================
1076 // Clear Tests
1077 // ========================================================================
1078
1079 #[test]
1080 fn test_clear() {
1081 let cache = QueryResultCache::new(CacheConfig::enabled());
1082
1083 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1084 cache.put(2_u64, test_result(), vec!["v_post".to_string()], None, None).unwrap();
1085
1086 cache.clear().unwrap();
1087
1088 // Run pending tasks to flush moka's eviction pipeline
1089 cache.store.run_pending_tasks();
1090
1091 assert!(cache.get(1_u64).unwrap().is_none());
1092 assert!(cache.get(2_u64).unwrap().is_none());
1093
1094 let metrics = cache.metrics().unwrap();
1095 assert_eq!(metrics.size, 0);
1096 }
1097
1098 // ========================================================================
1099 // Metrics Tests
1100 // ========================================================================
1101
1102 #[test]
1103 fn test_metrics_tracking() {
1104 let cache = QueryResultCache::new(CacheConfig::enabled());
1105
1106 // Miss
1107 cache.get(999_u64).unwrap();
1108
1109 // Put
1110 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1111
1112 // Hit
1113 cache.get(1_u64).unwrap();
1114
1115 // moka::sync::Cache entry_count() is eventually consistent — flush pending
1116 // write operations before asserting on size.
1117 cache.store.run_pending_tasks();
1118
1119 let metrics = cache.metrics().unwrap();
1120 assert_eq!(metrics.hits, 1);
1121 assert_eq!(metrics.misses, 1);
1122 assert_eq!(metrics.size, 1);
1123 assert_eq!(metrics.total_cached, 1);
1124 }
1125
1126 #[test]
1127 fn test_metrics_hit_rate() {
1128 let metrics = CacheMetrics {
1129 hits: 80,
1130 misses: 20,
1131 total_cached: 100,
1132 invalidations: 5,
1133 size: 95,
1134 memory_bytes: 1_000_000,
1135 };
1136
1137 assert!((metrics.hit_rate() - 0.8).abs() < f64::EPSILON);
1138 assert!(metrics.is_healthy());
1139 }
1140
1141 #[test]
1142 fn test_metrics_hit_rate_zero_requests() {
1143 let metrics = CacheMetrics {
1144 hits: 0,
1145 misses: 0,
1146 total_cached: 0,
1147 invalidations: 0,
1148 size: 0,
1149 memory_bytes: 0,
1150 };
1151
1152 assert!((metrics.hit_rate() - 0.0).abs() < f64::EPSILON);
1153 assert!(!metrics.is_healthy());
1154 }
1155
1156 #[test]
1157 fn test_metrics_is_healthy() {
1158 let good = CacheMetrics {
1159 hits: 70,
1160 misses: 30,
1161 total_cached: 100,
1162 invalidations: 5,
1163 size: 95,
1164 memory_bytes: 1_000_000,
1165 };
1166 assert!(good.is_healthy()); // 70% > 60%
1167
1168 let bad = CacheMetrics {
1169 hits: 50,
1170 misses: 50,
1171 total_cached: 100,
1172 invalidations: 5,
1173 size: 95,
1174 memory_bytes: 1_000_000,
1175 };
1176 assert!(!bad.is_healthy()); // 50% < 60%
1177 }
1178
1179 // ========================================================================
1180 // Entity-Aware Invalidation Tests
1181 // ========================================================================
1182
1183 fn entity_result(id: &str) -> Vec<JsonbValue> {
1184 vec![JsonbValue::new(
1185 serde_json::json!({"id": id, "name": "test"}),
1186 )]
1187 }
1188
1189 #[test]
1190 fn test_invalidate_by_entity_only_removes_matching_entries() {
1191 let cache = QueryResultCache::new(CacheConfig::enabled());
1192
1193 // Cache User A and User B as separate entries
1194 cache
1195 .put(1_u64, entity_result("uuid-a"), vec!["v_user".to_string()], None, Some("User"))
1196 .unwrap();
1197 cache
1198 .put(2_u64, entity_result("uuid-b"), vec!["v_user".to_string()], None, Some("User"))
1199 .unwrap();
1200
1201 // Invalidate User A — User B must remain
1202 let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1203 assert_eq!(evicted, 1);
1204 assert!(cache.get(1_u64).unwrap().is_none(), "User A should be evicted");
1205 assert!(cache.get(2_u64).unwrap().is_some(), "User B should remain");
1206 }
1207
1208 #[test]
1209 fn test_invalidate_by_entity_removes_list_containing_entity() {
1210 let cache = QueryResultCache::new(CacheConfig::enabled());
1211
1212 // Cache a single-entity entry (entity_ref uses first row's id)
1213 cache
1214 .put(1_u64, entity_result("uuid-a"), vec!["v_user".to_string()], None, Some("User"))
1215 .unwrap();
1216
1217 // Invalidate by User A
1218 let evicted = cache.invalidate_by_entity("User", "uuid-a").unwrap();
1219 assert_eq!(evicted, 1);
1220 assert!(cache.get(1_u64).unwrap().is_none(), "Entry for A should be evicted");
1221 }
1222
1223 #[test]
1224 fn test_invalidate_by_entity_leaves_unrelated_types() {
1225 let cache = QueryResultCache::new(CacheConfig::enabled());
1226
1227 // Cache a User entry and a Post entry
1228 cache
1229 .put(
1230 1_u64,
1231 entity_result("uuid-user"),
1232 vec!["v_user".to_string()],
1233 None,
1234 Some("User"),
1235 )
1236 .unwrap();
1237 cache
1238 .put(
1239 2_u64,
1240 entity_result("uuid-post"),
1241 vec!["v_post".to_string()],
1242 None,
1243 Some("Post"),
1244 )
1245 .unwrap();
1246
1247 // Invalidate the User — Post entry must remain untouched
1248 let evicted = cache.invalidate_by_entity("User", "uuid-user").unwrap();
1249 assert_eq!(evicted, 1);
1250 assert!(cache.get(1_u64).unwrap().is_none(), "User entry should be evicted");
1251 assert!(cache.get(2_u64).unwrap().is_some(), "Post entry should remain");
1252 }
1253
1254 #[test]
1255 fn test_put_builds_entity_id_index() {
1256 let cache = QueryResultCache::new(CacheConfig::enabled());
1257
1258 cache
1259 .put(1_u64, entity_result("uuid-1"), vec!["v_user".to_string()], None, Some("User"))
1260 .unwrap();
1261
1262 // Invalidating by uuid-1 should evict the entry
1263 let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1264 assert_eq!(evicted, 1);
1265 assert!(cache.get(1_u64).unwrap().is_none());
1266 }
1267
1268 #[test]
1269 fn test_put_without_entity_type_not_indexed() {
1270 let cache = QueryResultCache::new(CacheConfig::enabled());
1271
1272 cache
1273 .put(
1274 1_u64,
1275 entity_result("uuid-1"),
1276 vec!["v_user".to_string()],
1277 None,
1278 None, // no entity type
1279 )
1280 .unwrap();
1281
1282 // invalidate_by_entity should not match (no index was built)
1283 let evicted = cache.invalidate_by_entity("User", "uuid-1").unwrap();
1284 assert_eq!(evicted, 0);
1285 assert!(cache.get(1_u64).unwrap().is_some(), "Non-indexed entry should remain");
1286 }
1287
1288 // ========================================================================
1289 // Multi-entity indexing + list_index / invalidate_list_queries tests
1290 // ========================================================================
1291
1292 fn list_result(ids: &[&str]) -> Vec<JsonbValue> {
1293 ids.iter()
1294 .map(|id| JsonbValue::new(serde_json::json!({"id": id, "name": "test"})))
1295 .collect()
1296 }
1297
1298 #[test]
1299 fn test_put_indexes_all_entities_in_list() {
1300 let cache = QueryResultCache::new(CacheConfig::enabled());
1301 let rows = list_result(&["uuid-A", "uuid-B", "uuid-C"]);
1302 cache.put(0xABC, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1303
1304 let evicted_a = cache.invalidate_by_entity("User", "uuid-A").unwrap();
1305 assert_eq!(evicted_a, 1, "uuid-A must be indexed and evictable");
1306
1307 // Re-insert to test uuid-C
1308 let rows2 = list_result(&["uuid-A", "uuid-B", "uuid-C"]);
1309 cache.put(0xDEF, rows2, vec!["v_user".to_string()], None, Some("User")).unwrap();
1310 let evicted_c = cache.invalidate_by_entity("User", "uuid-C").unwrap();
1311 assert_eq!(evicted_c, 1, "uuid-C at position 2 must also be indexed");
1312 }
1313
1314 #[test]
1315 fn test_update_evicts_list_query_via_non_first_entity() {
1316 let cache = QueryResultCache::new(CacheConfig::enabled());
1317 let rows = list_result(&["uuid-A", "uuid-B"]);
1318 cache.put(0x111, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1319
1320 // uuid-B is at position 1 — must still be evicted
1321 let evicted = cache.invalidate_by_entity("User", "uuid-B").unwrap();
1322 assert_eq!(evicted, 1);
1323 assert!(cache.get(0x111).unwrap().is_none(), "list entry containing uuid-B must be gone");
1324 }
1325
1326 #[test]
1327 fn test_invalidate_list_queries_spares_point_lookups() {
1328 let cache = QueryResultCache::new(CacheConfig::enabled());
1329
1330 // Point lookup: single row
1331 let single = vec![JsonbValue::new(serde_json::json!({"id": "uuid-X"}))];
1332 cache
1333 .put(0x001, single, vec!["v_user".to_string()], None, Some("User"))
1334 .unwrap();
1335
1336 // List query: multiple rows
1337 let list = list_result(&["uuid-A", "uuid-B"]);
1338 cache.put(0x002, list, vec!["v_user".to_string()], None, Some("User")).unwrap();
1339
1340 // CREATE fires invalidate_list_queries
1341 let evicted = cache.invalidate_list_queries(&["v_user".to_string()]).unwrap();
1342 assert_eq!(evicted, 1, "only the list entry should be evicted");
1343 assert!(cache.get(0x001).unwrap().is_some(), "point lookup must survive");
1344 assert!(cache.get(0x002).unwrap().is_none(), "list entry must be evicted");
1345 }
1346
1347 #[test]
1348 fn test_invalidate_by_entity_short_circuits_on_empty_index() {
1349 let cache = QueryResultCache::new(CacheConfig::enabled());
1350 // Nothing cached — must return 0 without panicking
1351 let count = cache.invalidate_by_entity("User", "uuid-X").unwrap();
1352 assert_eq!(count, 0);
1353 }
1354
1355 #[test]
1356 fn test_eviction_listener_cleans_all_entity_refs() {
1357 let cache = QueryResultCache::new(CacheConfig::enabled());
1358 let rows = list_result(&["uuid-A", "uuid-B"]);
1359 cache.put(0x001, rows, vec!["v_user".to_string()], None, Some("User")).unwrap();
1360
1361 // Force eviction via invalidate_views
1362 cache.invalidate_views(&["v_user".to_string()]).unwrap();
1363 // Flush moka's async eviction pipeline
1364 cache.store.run_pending_tasks();
1365
1366 // After eviction the entity_index must be cleaned up (no dangling refs)
1367 let count_a = cache.invalidate_by_entity("User", "uuid-A").unwrap();
1368 let count_b = cache.invalidate_by_entity("User", "uuid-B").unwrap();
1369 assert_eq!(count_a, 0, "entity_index must be clean after eviction");
1370 assert_eq!(count_b, 0, "entity_index must be clean after eviction");
1371 }
1372
1373 // ========================================================================
1374 // Thread Safety Tests
1375 // ========================================================================
1376
1377 #[test]
1378 fn test_concurrent_access() {
1379 use std::{sync::Arc, thread};
1380
1381 let cache = Arc::new(QueryResultCache::new(CacheConfig::enabled()));
1382
1383 // Spawn multiple threads accessing cache
1384 let handles: Vec<_> = (0_u64..10)
1385 .map(|key| {
1386 let cache_clone = cache.clone();
1387 thread::spawn(move || {
1388 cache_clone
1389 .put(key, test_result(), vec!["v_user".to_string()], None, None)
1390 .unwrap();
1391 cache_clone.get(key).unwrap();
1392 })
1393 })
1394 .collect();
1395
1396 for handle in handles {
1397 handle.join().unwrap();
1398 }
1399
1400 let metrics = cache.metrics().unwrap();
1401 assert_eq!(metrics.total_cached, 10);
1402 assert_eq!(metrics.hits, 10);
1403 }
1404
1405 // ========================================================================
1406 // Sentinel tests — boundary guards for mutation testing
1407 // ========================================================================
1408
1409 /// Sentinel: `cache_list_queries = false` must skip results with >1 row.
1410 ///
1411 /// Kills the `> → >=` mutation at the list-query guard: `result.len() > 1`.
1412 #[test]
1413 fn test_cache_list_queries_false_skips_multi_row() {
1414 let config = CacheConfig {
1415 enabled: true,
1416 cache_list_queries: false,
1417 ..CacheConfig::default()
1418 };
1419 let cache = QueryResultCache::new(config);
1420
1421 // Two-row result: must be skipped (killed by > → >= mutant)
1422 let two_rows = vec![
1423 JsonbValue::new(json!({"id": 1})),
1424 JsonbValue::new(json!({"id": 2})),
1425 ];
1426 cache.put(1_u64, two_rows, vec!["v_user".to_string()], None, None).unwrap();
1427 assert!(
1428 cache.get(1_u64).unwrap().is_none(),
1429 "multi-row result must not be cached when cache_list_queries=false"
1430 );
1431 }
1432
1433 /// Sentinel: `cache_list_queries = false` must still store single-row results.
1434 ///
1435 /// Complements the above: the single-row path must remain unaffected.
1436 #[test]
1437 fn test_cache_list_queries_false_allows_single_row() {
1438 let config = CacheConfig {
1439 enabled: true,
1440 cache_list_queries: false,
1441 ..CacheConfig::default()
1442 };
1443 let cache = QueryResultCache::new(config);
1444
1445 // One-row result: must be stored
1446 let one_row = vec![JsonbValue::new(json!({"id": 1}))];
1447 cache.put(1_u64, one_row, vec!["v_user".to_string()], None, None).unwrap();
1448 assert!(
1449 cache.get(1_u64).unwrap().is_some(),
1450 "single-row result must be cached even when cache_list_queries=false"
1451 );
1452 }
1453
1454 /// Sentinel: entries exceeding `max_entry_bytes` must be silently skipped.
1455 ///
1456 /// Kills mutations on the `estimated > max_entry` guard.
1457 #[test]
1458 fn test_max_entry_bytes_skips_oversized_entry() {
1459 let config = CacheConfig {
1460 enabled: true,
1461 max_entry_bytes: Some(10), // 10 bytes — smaller than any JSON row
1462 ..CacheConfig::default()
1463 };
1464 let cache = QueryResultCache::new(config);
1465
1466 // A typical row serialises to far more than 10 bytes
1467 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1468 assert!(cache.get(1_u64).unwrap().is_none(), "oversized entry must be silently skipped");
1469 }
1470
1471 /// Sentinel: entries within `max_entry_bytes` must be stored normally.
1472 ///
1473 /// Complements the above to pin both sides of the size boundary.
1474 #[test]
1475 fn test_max_entry_bytes_allows_small_entry() {
1476 let config = CacheConfig {
1477 enabled: true,
1478 max_entry_bytes: Some(100_000), // 100 KB — plenty for a test row
1479 ..CacheConfig::default()
1480 };
1481 let cache = QueryResultCache::new(config);
1482
1483 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1484 assert!(
1485 cache.get(1_u64).unwrap().is_some(),
1486 "small entry must be cached when within max_entry_bytes"
1487 );
1488 }
1489
1490 /// Sentinel: `put()` must skip new entries when `max_total_bytes` budget is exhausted.
1491 ///
1492 /// Kills mutations on the `current >= max_total` guard.
1493 #[test]
1494 fn test_max_total_bytes_skips_when_budget_exhausted() {
1495 let config = CacheConfig {
1496 enabled: true,
1497 max_total_bytes: Some(0), // 0 bytes — always exhausted
1498 ..CacheConfig::default()
1499 };
1500 let cache = QueryResultCache::new(config);
1501
1502 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1503 assert!(
1504 cache.get(1_u64).unwrap().is_none(),
1505 "entry must be skipped when max_total_bytes budget is already exhausted"
1506 );
1507 }
1508
1509 // ========================================================================
1510 // Cross-key invalidation Tests (replaces cross-shard tests)
1511 // ========================================================================
1512
1513 /// `invalidate_views` clears matching entries regardless of cache key.
1514 #[test]
1515 fn test_cross_key_view_invalidation() {
1516 let config = CacheConfig {
1517 max_entries: 10_000,
1518 enabled: true,
1519 ..CacheConfig::default()
1520 };
1521 let cache = QueryResultCache::new(config);
1522
1523 // Insert many entries
1524 for i in 0_u64..200 {
1525 let view = if i % 2 == 0 { "v_user" } else { "v_post" };
1526 cache.put(i, test_result(), vec![view.to_string()], None, None).unwrap();
1527 }
1528
1529 // Invalidate v_user — should remove exactly 100 entries
1530 let invalidated = cache.invalidate_views(&["v_user".to_string()]).unwrap();
1531 assert_eq!(invalidated, 100);
1532
1533 // All v_user entries gone, all v_post entries remain
1534 for i in 0_u64..200 {
1535 if i % 2 == 0 {
1536 assert!(cache.get(i).unwrap().is_none(), "v_user entry should be invalidated");
1537 } else {
1538 assert!(cache.get(i).unwrap().is_some(), "v_post entry should remain");
1539 }
1540 }
1541 }
1542
1543 /// Cross-key entity invalidation works across all cache keys.
1544 #[test]
1545 fn test_cross_key_entity_invalidation() {
1546 let config = CacheConfig {
1547 max_entries: 10_000,
1548 enabled: true,
1549 ..CacheConfig::default()
1550 };
1551 let cache = QueryResultCache::new(config);
1552
1553 // Insert entries for the same entity across different cache keys
1554 for i in 0_u64..50 {
1555 cache
1556 .put(
1557 i,
1558 entity_result("uuid-target"),
1559 vec!["v_user".to_string()],
1560 None,
1561 Some("User"),
1562 )
1563 .unwrap();
1564 }
1565
1566 // Also insert an unrelated entry
1567 cache
1568 .put(
1569 999_u64,
1570 entity_result("uuid-other"),
1571 vec!["v_user".to_string()],
1572 None,
1573 Some("User"),
1574 )
1575 .unwrap();
1576
1577 let evicted = cache.invalidate_by_entity("User", "uuid-target").unwrap();
1578 assert_eq!(evicted, 50);
1579 assert!(cache.get(999_u64).unwrap().is_some(), "unrelated entity should remain");
1580 }
1581
1582 /// Clear works for all entries.
1583 #[test]
1584 fn test_clear_all() {
1585 let config = CacheConfig {
1586 max_entries: 10_000,
1587 enabled: true,
1588 ..CacheConfig::default()
1589 };
1590 let cache = QueryResultCache::new(config);
1591
1592 for i in 0_u64..200 {
1593 cache.put(i, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1594 }
1595
1596 cache.clear().unwrap();
1597 cache.store.run_pending_tasks();
1598
1599 let metrics = cache.metrics().unwrap();
1600 assert_eq!(metrics.size, 0);
1601
1602 for i in 0_u64..200 {
1603 assert!(cache.get(i).unwrap().is_none());
1604 }
1605 }
1606
1607 /// `memory_bytes` is tracked and reported via `metrics()`.
1608 #[test]
1609 fn test_memory_bytes_tracked() {
1610 let cache = QueryResultCache::new(CacheConfig::enabled());
1611
1612 cache.put(1_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1613 cache.put(2_u64, test_result(), vec!["v".to_string()], None, None).unwrap();
1614
1615 let before = cache.metrics().unwrap().memory_bytes;
1616 assert!(before > 0, "memory_bytes should be tracked");
1617 }
1618
1619 /// `memory_bytes` decreases after invalidation (synchronously via clear).
1620 #[test]
1621 fn test_memory_bytes_decreases_on_clear() {
1622 let cache = QueryResultCache::new(CacheConfig::enabled());
1623
1624 cache.put(1_u64, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1625
1626 let before = cache.metrics().unwrap().memory_bytes;
1627 assert!(before > 0);
1628
1629 cache.clear().unwrap();
1630
1631 let after = cache.metrics().unwrap().memory_bytes;
1632 assert_eq!(after, 0, "memory_bytes should be zero after clear()");
1633 }
1634
1635 // ========================================================================
1636 // Concurrency regression test (#185)
1637 // ========================================================================
1638
1639 /// Regression guard for #185: LRU+Mutex serialized all hot-key reads through
1640 /// one shard's mutex. With moka, reads are lock-free and should scale near-
1641 /// linearly with thread count.
1642 #[test]
1643 #[ignore = "wall-clock dependent — run manually to confirm lock-free read scaling"]
1644 fn test_concurrent_reads_do_not_serialize() {
1645 const ITERS: usize = 10_000;
1646 let config = CacheConfig::enabled();
1647 let cache = Arc::new(QueryResultCache::new(config));
1648 let key = 42_u64;
1649 cache.put(key, test_result(), vec!["v_user".to_string()], None, None).unwrap();
1650
1651 // Single-threaded baseline
1652 let start = std::time::Instant::now();
1653 for _ in 0..ITERS {
1654 let _ = cache.get(key).unwrap();
1655 }
1656 let single_elapsed = start.elapsed();
1657
1658 // 40-thread concurrent
1659 let start = std::time::Instant::now();
1660 let handles: Vec<_> = (0..40)
1661 .map(|_| {
1662 let c = Arc::clone(&cache);
1663 std::thread::spawn(move || {
1664 for _ in 0..ITERS {
1665 let _ = c.get(key).unwrap();
1666 }
1667 })
1668 })
1669 .collect();
1670 for h in handles {
1671 h.join().unwrap();
1672 }
1673 let multi_elapsed = start.elapsed();
1674
1675 // 40× the work in ≤2× the time → near-linear scaling.
1676 // Under old LRU+Mutex, 40-thread took ~20-40× single-thread time.
1677 assert!(
1678 multi_elapsed <= single_elapsed * 2,
1679 "40-thread ({:?}) was more than 2× single-thread ({:?}) — suggests serialization",
1680 multi_elapsed,
1681 single_elapsed,
1682 );
1683 }
1684}