Skip to main content

amaters_sdk_rust/
cache.rs

1//! Client-side query result cache for the AmateRS Rust SDK
2//!
3//! Provides an LRU (Least Recently Used) cache for query results, reducing
4//! round-trips to the server for frequently accessed data. The cache is
5//! thread-safe and supports TTL-based expiration, collection-level invalidation,
6//! and configurable size limits.
7//!
8//! # Example
9//!
10//! ```no_run
11//! use amaters_sdk_rust::cache::{QueryCache, QueryCacheConfig, InvalidationPolicy};
12//! use std::time::Duration;
13//!
14//! let config = QueryCacheConfig::default()
15//!     .with_max_entries(500)
16//!     .with_ttl(Duration::from_secs(120))
17//!     .with_max_value_size(512 * 1024);
18//!
19//! let cache = QueryCache::new(config);
20//!
21//! // Put and get
22//! cache.put(b"key1", vec![1, 2, 3]);
23//! if let Some(data) = cache.get(b"key1") {
24//!     assert_eq!(data, vec![1, 2, 3]);
25//! }
26//! ```
27
28use parking_lot::RwLock;
29use std::collections::HashMap;
30use std::time::{Duration, Instant};
31
32// ---------------------------------------------------------------------------
33// Configuration
34// ---------------------------------------------------------------------------
35
36/// Configuration for the query cache.
37#[derive(Debug, Clone)]
38pub struct QueryCacheConfig {
39    /// Maximum number of entries the cache will hold.
40    pub max_entries: usize,
41    /// Time-to-live for each cached entry.
42    pub ttl: Duration,
43    /// Maximum size (in bytes) of a single cached value.
44    pub max_value_size: usize,
45    /// Cache invalidation policy on write operations.
46    pub invalidation_policy: InvalidationPolicy,
47}
48
49impl Default for QueryCacheConfig {
50    fn default() -> Self {
51        Self {
52            max_entries: 1000,
53            ttl: Duration::from_secs(60),
54            max_value_size: 1024 * 1024, // 1 MB
55            invalidation_policy: InvalidationPolicy::OnWrite,
56        }
57    }
58}
59
60impl QueryCacheConfig {
61    /// Set the maximum number of cache entries.
62    #[must_use]
63    pub fn with_max_entries(mut self, max_entries: usize) -> Self {
64        self.max_entries = max_entries;
65        self
66    }
67
68    /// Set the TTL for cached entries.
69    #[must_use]
70    pub fn with_ttl(mut self, ttl: Duration) -> Self {
71        self.ttl = ttl;
72        self
73    }
74
75    /// Set the maximum size of a single cached value in bytes.
76    #[must_use]
77    pub fn with_max_value_size(mut self, max_value_size: usize) -> Self {
78        self.max_value_size = max_value_size;
79        self
80    }
81
82    /// Set the invalidation policy.
83    #[must_use]
84    pub fn with_invalidation_policy(mut self, policy: InvalidationPolicy) -> Self {
85        self.invalidation_policy = policy;
86        self
87    }
88}
89
90/// Policy for cache invalidation when a write operation occurs.
91#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum InvalidationPolicy {
93    /// Automatically invalidate affected cache entries on write (default).
94    OnWrite,
95    /// Cache entries are only invalidated manually by the caller.
96    Manual,
97    /// No invalidation — entries live until TTL expiry or LRU eviction.
98    None,
99}
100
101// ---------------------------------------------------------------------------
102// CacheStats
103// ---------------------------------------------------------------------------
104
105/// Statistics about cache usage.
106#[derive(Debug, Clone)]
107pub struct CacheStats {
108    /// Number of cache hits.
109    pub hits: u64,
110    /// Number of cache misses.
111    pub misses: u64,
112    /// Number of entries evicted due to capacity constraints.
113    pub evictions: u64,
114    /// Current number of entries in the cache.
115    pub size: usize,
116    /// Total number of entries that have been inserted.
117    pub total_inserts: u64,
118    /// Total number of explicit invalidations.
119    pub invalidations: u64,
120}
121
122impl CacheStats {
123    /// Cache hit rate as a value in `[0.0, 1.0]`.
124    /// Returns `0.0` if there have been no lookups.
125    pub fn hit_rate(&self) -> f64 {
126        let total = self.hits + self.misses;
127        if total == 0 {
128            0.0
129        } else {
130            self.hits as f64 / total as f64
131        }
132    }
133}
134
135// ---------------------------------------------------------------------------
136// Internal structures
137// ---------------------------------------------------------------------------
138
139/// A cached result entry.
140#[derive(Debug, Clone)]
141struct CachedResult {
142    /// The raw cached data.
143    data: Vec<u8>,
144    /// When the entry was inserted / last refreshed.
145    inserted_at: Instant,
146    /// How many times this entry has been hit.
147    hit_count: u64,
148    /// The collection this entry belongs to (for collection-level invalidation).
149    collection: Option<String>,
150}
151
152/// Node in the doubly-linked LRU list.
153///
154/// We store prev/next as `Option<CacheKey>` indices into the `HashMap` so that
155/// the entire data structure lives inside a single allocation-friendly map.
156#[derive(Debug, Clone)]
157struct LruNode {
158    prev: Option<CacheKey>,
159    next: Option<CacheKey>,
160}
161
162/// Opaque cache key (blake3 hash).
163type CacheKey = [u8; 32];
164
165/// Internal mutable state protected by a `RwLock`.
166struct CacheInner {
167    /// The main storage: key -> (cached result, LRU node).
168    entries: HashMap<CacheKey, (CachedResult, LruNode)>,
169    /// Head of the LRU list (most recently used).
170    head: Option<CacheKey>,
171    /// Tail of the LRU list (least recently used — eviction candidate).
172    tail: Option<CacheKey>,
173    /// Reverse index: collection name -> set of cache keys belonging to it.
174    collection_index: HashMap<String, Vec<CacheKey>>,
175    /// Accumulated statistics.
176    stats: CacheStats,
177}
178
179impl CacheInner {
180    fn new() -> Self {
181        Self {
182            entries: HashMap::new(),
183            head: None,
184            tail: None,
185            collection_index: HashMap::new(),
186            stats: CacheStats {
187                hits: 0,
188                misses: 0,
189                evictions: 0,
190                size: 0,
191                total_inserts: 0,
192                invalidations: 0,
193            },
194        }
195    }
196
197    // ---- LRU helpers -------------------------------------------------------
198
199    /// Detach `key` from the doubly-linked list without removing it from the map.
200    fn detach(&mut self, key: &CacheKey) {
201        let node = if let Some((_, node)) = self.entries.get(key) {
202            node.clone()
203        } else {
204            return;
205        };
206
207        // Fix previous node's next pointer
208        if let Some(prev_key) = &node.prev {
209            if let Some((_, prev_node)) = self.entries.get_mut(prev_key) {
210                prev_node.next = node.next;
211            }
212        } else {
213            // This node was the head
214            self.head = node.next;
215        }
216
217        // Fix next node's prev pointer
218        if let Some(next_key) = &node.next {
219            if let Some((_, next_node)) = self.entries.get_mut(next_key) {
220                next_node.prev = node.prev;
221            }
222        } else {
223            // This node was the tail
224            self.tail = node.prev;
225        }
226
227        // Clear this node's pointers
228        if let Some((_, n)) = self.entries.get_mut(key) {
229            n.prev = None;
230            n.next = None;
231        }
232    }
233
234    /// Push `key` to the front (head) of the LRU list. Assumes the key is
235    /// already detached (or freshly inserted).
236    fn push_front(&mut self, key: CacheKey) {
237        if let Some(old_head) = self.head {
238            if old_head == key {
239                return; // already at head
240            }
241            // Point old head's prev to this key
242            if let Some((_, node)) = self.entries.get_mut(&old_head) {
243                node.prev = Some(key);
244            }
245        }
246
247        // Set this node's pointers
248        if let Some((_, node)) = self.entries.get_mut(&key) {
249            node.prev = None;
250            node.next = self.head;
251        }
252
253        self.head = Some(key);
254
255        if self.tail.is_none() {
256            self.tail = Some(key);
257        }
258    }
259
260    /// Move an existing key to the front (most recently used).
261    fn touch(&mut self, key: &CacheKey) {
262        let k = *key;
263        self.detach(&k);
264        self.push_front(k);
265    }
266
267    /// Evict the least recently used entry (tail). Returns the evicted key.
268    fn evict_lru(&mut self) -> Option<CacheKey> {
269        let tail_key = self.tail?;
270        self.remove_entry(&tail_key);
271        self.stats.evictions += 1;
272        Some(tail_key)
273    }
274
275    /// Remove an entry entirely (map + LRU list + collection index).
276    fn remove_entry(&mut self, key: &CacheKey) {
277        self.detach(key);
278        if let Some((result, _)) = self.entries.remove(key) {
279            self.stats.size = self.stats.size.saturating_sub(1);
280            // Remove from collection index
281            if let Some(ref coll) = result.collection {
282                if let Some(keys) = self.collection_index.get_mut(coll) {
283                    keys.retain(|k| k != key);
284                    if keys.is_empty() {
285                        self.collection_index.remove(coll);
286                    }
287                }
288            }
289        }
290    }
291}
292
293// ---------------------------------------------------------------------------
294// QueryCache (public API)
295// ---------------------------------------------------------------------------
296
297/// Thread-safe LRU cache for query results.
298///
299/// Uses `parking_lot::RwLock` for efficient concurrent access. Read operations
300/// (cache hits) only require a read lock; mutations (inserts, evictions,
301/// invalidations) acquire a write lock.
302pub struct QueryCache {
303    inner: RwLock<CacheInner>,
304    config: QueryCacheConfig,
305}
306
307impl QueryCache {
308    /// Create a new cache with the given configuration.
309    pub fn new(config: QueryCacheConfig) -> Self {
310        Self {
311            inner: RwLock::new(CacheInner::new()),
312            config,
313        }
314    }
315
316    /// Look up a cached value by its raw key bytes.
317    ///
318    /// Returns `None` if the entry does not exist or has expired.
319    /// A successful lookup increments the hit counter and moves the entry
320    /// to the most-recently-used position.
321    pub fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
322        let cache_key = Self::hash_key(key);
323
324        // First try with a read lock to check existence and expiry
325        {
326            let inner = self.inner.read();
327            match inner.entries.get(&cache_key) {
328                Some((result, _)) => {
329                    if result.inserted_at.elapsed() > self.config.ttl {
330                        // Expired — we'll remove it below with a write lock
331                        drop(inner);
332                        let mut inner = self.inner.write();
333                        inner.remove_entry(&cache_key);
334                        inner.stats.misses += 1;
335                        return None;
336                    }
337                }
338                None => {
339                    drop(inner);
340                    let mut inner = self.inner.write();
341                    inner.stats.misses += 1;
342                    return None;
343                }
344            }
345        }
346
347        // Entry exists and is not expired — promote & bump hit count.
348        let mut inner = self.inner.write();
349        // Re-check under write lock (another thread may have evicted it).
350        if let Some((result, _)) = inner.entries.get_mut(&cache_key) {
351            if result.inserted_at.elapsed() > self.config.ttl {
352                inner.remove_entry(&cache_key);
353                inner.stats.misses += 1;
354                return None;
355            }
356            result.hit_count += 1;
357            let data = result.data.clone();
358            inner.stats.hits += 1;
359            inner.touch(&cache_key);
360            Some(data)
361        } else {
362            inner.stats.misses += 1;
363            None
364        }
365    }
366
367    /// Insert a value into the cache.
368    ///
369    /// If the value exceeds `max_value_size` it is silently dropped. If the
370    /// cache is at capacity, the least recently used entry is evicted first.
371    pub fn put(&self, key: &[u8], value: Vec<u8>) {
372        self.put_with_collection(key, value, None);
373    }
374
375    /// Insert a value into the cache with an associated collection name.
376    ///
377    /// The collection name is used for `invalidate_collection`.
378    pub fn put_with_collection(&self, key: &[u8], value: Vec<u8>, collection: Option<&str>) {
379        if value.len() > self.config.max_value_size {
380            return; // silently reject oversized values
381        }
382
383        let cache_key = Self::hash_key(key);
384
385        let mut inner = self.inner.write();
386
387        // If key already exists, remove it first so we can re-insert cleanly.
388        if inner.entries.contains_key(&cache_key) {
389            inner.remove_entry(&cache_key);
390        }
391
392        // Evict if at capacity
393        while inner.entries.len() >= self.config.max_entries {
394            inner.evict_lru();
395        }
396
397        let coll_string = collection.map(String::from);
398
399        // Insert into collection index
400        if let Some(ref coll) = coll_string {
401            inner
402                .collection_index
403                .entry(coll.clone())
404                .or_default()
405                .push(cache_key);
406        }
407
408        let result = CachedResult {
409            data: value,
410            inserted_at: Instant::now(),
411            hit_count: 0,
412            collection: coll_string,
413        };
414
415        let node = LruNode {
416            prev: None,
417            next: None,
418        };
419
420        inner.entries.insert(cache_key, (result, node));
421        inner.stats.size += 1;
422        inner.stats.total_inserts += 1;
423        inner.push_front(cache_key);
424    }
425
426    /// Remove a specific entry from the cache.
427    pub fn invalidate(&self, key: &[u8]) {
428        let cache_key = Self::hash_key(key);
429        let mut inner = self.inner.write();
430        if inner.entries.contains_key(&cache_key) {
431            inner.remove_entry(&cache_key);
432            inner.stats.invalidations += 1;
433        }
434    }
435
436    /// Remove all cache entries belonging to a given collection.
437    pub fn invalidate_collection(&self, collection: &str) {
438        let mut inner = self.inner.write();
439        if let Some(keys) = inner.collection_index.remove(collection) {
440            for key in &keys {
441                inner.detach(key);
442                inner.entries.remove(key);
443                inner.stats.size = inner.stats.size.saturating_sub(1);
444                inner.stats.invalidations += 1;
445            }
446        }
447    }
448
449    /// Remove all entries from the cache.
450    pub fn clear(&self) {
451        let mut inner = self.inner.write();
452        let prev_size = inner.entries.len();
453        inner.entries.clear();
454        inner.head = None;
455        inner.tail = None;
456        inner.collection_index.clear();
457        inner.stats.size = 0;
458        inner.stats.invalidations += prev_size as u64;
459    }
460
461    /// Return a snapshot of the current cache statistics.
462    pub fn stats(&self) -> CacheStats {
463        let inner = self.inner.read();
464        inner.stats.clone()
465    }
466
467    /// Return the current number of entries in the cache.
468    pub fn len(&self) -> usize {
469        let inner = self.inner.read();
470        inner.entries.len()
471    }
472
473    /// Check whether the cache is empty.
474    pub fn is_empty(&self) -> bool {
475        self.len() == 0
476    }
477
478    /// Get a reference to the cache configuration.
479    pub fn config(&self) -> &QueryCacheConfig {
480        &self.config
481    }
482
483    /// Return the invalidation policy.
484    pub fn invalidation_policy(&self) -> InvalidationPolicy {
485        self.config.invalidation_policy
486    }
487
488    // ---- key helpers -------------------------------------------------------
489
490    /// Build a composite cache key from collection + query key and hash it.
491    pub fn make_key(collection: &str, query_key: &[u8]) -> Vec<u8> {
492        let mut buf = Vec::with_capacity(collection.len() + 1 + query_key.len());
493        buf.extend_from_slice(collection.as_bytes());
494        buf.push(b':');
495        buf.extend_from_slice(query_key);
496        buf
497    }
498
499    /// Compute a blake3 hash of the raw key bytes.
500    fn hash_key(key: &[u8]) -> CacheKey {
501        let hash = blake3::hash(key);
502        *hash.as_bytes()
503    }
504}
505
506// ---------------------------------------------------------------------------
507// Tests
508// ---------------------------------------------------------------------------
509
510#[cfg(test)]
511mod tests {
512    use super::*;
513    use std::thread;
514    use std::time::Duration;
515
516    fn default_cache() -> QueryCache {
517        QueryCache::new(QueryCacheConfig::default())
518    }
519
520    // --- basic hit / miss ---------------------------------------------------
521
522    #[test]
523    fn test_cache_hit() {
524        let cache = default_cache();
525        cache.put(b"key1", vec![10, 20, 30]);
526
527        let result = cache.get(b"key1");
528        assert!(result.is_some());
529        assert_eq!(result.expect("should have value"), vec![10, 20, 30]);
530
531        let stats = cache.stats();
532        assert_eq!(stats.hits, 1);
533        assert_eq!(stats.misses, 0);
534    }
535
536    #[test]
537    fn test_cache_miss() {
538        let cache = default_cache();
539
540        let result = cache.get(b"nonexistent");
541        assert!(result.is_none());
542
543        let stats = cache.stats();
544        assert_eq!(stats.hits, 0);
545        assert_eq!(stats.misses, 1);
546    }
547
548    // --- TTL expiry ---------------------------------------------------------
549
550    #[test]
551    fn test_ttl_expiry() {
552        let config = QueryCacheConfig::default().with_ttl(Duration::from_millis(50));
553        let cache = QueryCache::new(config);
554
555        cache.put(b"key1", vec![1, 2, 3]);
556        assert!(cache.get(b"key1").is_some());
557
558        // Wait for TTL to expire
559        thread::sleep(Duration::from_millis(80));
560
561        assert!(cache.get(b"key1").is_none());
562
563        let stats = cache.stats();
564        assert_eq!(stats.hits, 1);
565        assert_eq!(stats.misses, 1); // the expired lookup
566    }
567
568    // --- LRU eviction -------------------------------------------------------
569
570    #[test]
571    fn test_lru_eviction() {
572        let config = QueryCacheConfig::default().with_max_entries(3);
573        let cache = QueryCache::new(config);
574
575        cache.put(b"a", vec![1]);
576        cache.put(b"b", vec![2]);
577        cache.put(b"c", vec![3]);
578
579        // Cache is full. Insert one more — should evict "a" (LRU).
580        cache.put(b"d", vec![4]);
581
582        assert!(cache.get(b"a").is_none(), "a should have been evicted");
583        assert!(cache.get(b"b").is_some());
584        assert!(cache.get(b"c").is_some());
585        assert!(cache.get(b"d").is_some());
586
587        let stats = cache.stats();
588        assert_eq!(stats.evictions, 1);
589    }
590
591    #[test]
592    fn test_lru_access_order() {
593        let config = QueryCacheConfig::default().with_max_entries(3);
594        let cache = QueryCache::new(config);
595
596        cache.put(b"a", vec![1]);
597        cache.put(b"b", vec![2]);
598        cache.put(b"c", vec![3]);
599
600        // Access "a" to make it most recently used
601        let _ = cache.get(b"a");
602
603        // Insert "d" — should evict "b" (now LRU), not "a"
604        cache.put(b"d", vec![4]);
605
606        assert!(
607            cache.get(b"a").is_some(),
608            "a was accessed and should not be evicted"
609        );
610        assert!(cache.get(b"b").is_none(), "b should have been evicted");
611        assert!(cache.get(b"c").is_some());
612        assert!(cache.get(b"d").is_some());
613    }
614
615    // --- write invalidation -------------------------------------------------
616
617    #[test]
618    fn test_invalidate_key() {
619        let cache = default_cache();
620
621        cache.put(b"key1", vec![1]);
622        cache.put(b"key2", vec![2]);
623
624        cache.invalidate(b"key1");
625
626        assert!(cache.get(b"key1").is_none());
627        assert!(cache.get(b"key2").is_some());
628
629        let stats = cache.stats();
630        assert_eq!(stats.invalidations, 1);
631    }
632
633    // --- collection invalidation --------------------------------------------
634
635    #[test]
636    fn test_invalidate_collection() {
637        let cache = default_cache();
638
639        let key1 = QueryCache::make_key("users", b"u1");
640        let key2 = QueryCache::make_key("users", b"u2");
641        let key3 = QueryCache::make_key("orders", b"o1");
642
643        cache.put_with_collection(&key1, vec![1], Some("users"));
644        cache.put_with_collection(&key2, vec![2], Some("users"));
645        cache.put_with_collection(&key3, vec![3], Some("orders"));
646
647        cache.invalidate_collection("users");
648
649        assert!(cache.get(&key1).is_none());
650        assert!(cache.get(&key2).is_none());
651        assert!(cache.get(&key3).is_some(), "orders entry should remain");
652
653        let stats = cache.stats();
654        assert_eq!(stats.invalidations, 2);
655    }
656
657    // --- stats accuracy -----------------------------------------------------
658
659    #[test]
660    fn test_stats_accuracy() {
661        let cache = default_cache();
662
663        // 3 inserts
664        cache.put(b"a", vec![1]);
665        cache.put(b"b", vec![2]);
666        cache.put(b"c", vec![3]);
667
668        // 2 hits
669        let _ = cache.get(b"a");
670        let _ = cache.get(b"b");
671
672        // 1 miss
673        let _ = cache.get(b"z");
674
675        // 1 invalidation
676        cache.invalidate(b"c");
677
678        let stats = cache.stats();
679        assert_eq!(stats.total_inserts, 3);
680        assert_eq!(stats.hits, 2);
681        assert_eq!(stats.misses, 1);
682        assert_eq!(stats.invalidations, 1);
683        assert_eq!(stats.size, 2);
684
685        let rate = stats.hit_rate();
686        // 2 hits / (2 hits + 1 miss) = 0.666...
687        assert!((rate - 2.0 / 3.0).abs() < 1e-9);
688    }
689
690    #[test]
691    fn test_hit_rate_no_lookups() {
692        let cache = default_cache();
693        let stats = cache.stats();
694        assert!((stats.hit_rate() - 0.0).abs() < f64::EPSILON);
695    }
696
697    // --- concurrent access --------------------------------------------------
698
699    #[test]
700    fn test_concurrent_access() {
701        use std::sync::Arc;
702
703        let cache = Arc::new(QueryCache::new(
704            QueryCacheConfig::default().with_max_entries(500),
705        ));
706
707        let mut handles = Vec::new();
708
709        // Spawn writer threads
710        for t in 0..4 {
711            let cache = Arc::clone(&cache);
712            handles.push(thread::spawn(move || {
713                for i in 0..200 {
714                    let key = format!("thread-{}-key-{}", t, i);
715                    cache.put(key.as_bytes(), vec![t as u8; 64]);
716                }
717            }));
718        }
719
720        // Spawn reader threads
721        for t in 0..4 {
722            let cache = Arc::clone(&cache);
723            handles.push(thread::spawn(move || {
724                for i in 0..200 {
725                    let key = format!("thread-{}-key-{}", t, i);
726                    let _ = cache.get(key.as_bytes());
727                }
728            }));
729        }
730
731        for h in handles {
732            h.join().expect("thread should not panic");
733        }
734
735        let stats = cache.stats();
736        // Just verify no panics occurred and stats are sane
737        assert!(stats.total_inserts > 0);
738        assert!(stats.size <= 500);
739    }
740
741    // --- max value size enforcement -----------------------------------------
742
743    #[test]
744    fn test_max_value_size_enforcement() {
745        let config = QueryCacheConfig::default().with_max_value_size(100);
746        let cache = QueryCache::new(config);
747
748        // This should be accepted (100 bytes exactly)
749        cache.put(b"small", vec![0u8; 100]);
750        assert!(cache.get(b"small").is_some());
751
752        // This should be silently rejected (101 bytes)
753        cache.put(b"big", vec![0u8; 101]);
754        assert!(cache.get(b"big").is_none());
755
756        let stats = cache.stats();
757        assert_eq!(stats.total_inserts, 1); // only the small one
758    }
759
760    // --- clear --------------------------------------------------------------
761
762    #[test]
763    fn test_clear() {
764        let cache = default_cache();
765
766        cache.put(b"a", vec![1]);
767        cache.put(b"b", vec![2]);
768        cache.put(b"c", vec![3]);
769
770        assert_eq!(cache.len(), 3);
771
772        cache.clear();
773
774        assert_eq!(cache.len(), 0);
775        assert!(cache.is_empty());
776        assert!(cache.get(b"a").is_none());
777
778        let stats = cache.stats();
779        assert_eq!(stats.size, 0);
780        assert_eq!(stats.invalidations, 3);
781    }
782
783    // --- make_key helper ----------------------------------------------------
784
785    #[test]
786    fn test_make_key() {
787        let key = QueryCache::make_key("users", b"abc");
788        assert_eq!(key, b"users:abc");
789    }
790
791    // --- overwrite existing key ---------------------------------------------
792
793    #[test]
794    fn test_overwrite_existing_key() {
795        let cache = default_cache();
796
797        cache.put(b"key", vec![1, 2, 3]);
798        assert_eq!(cache.get(b"key").expect("should exist"), vec![1, 2, 3]);
799
800        cache.put(b"key", vec![4, 5, 6]);
801        assert_eq!(cache.get(b"key").expect("should exist"), vec![4, 5, 6]);
802
803        assert_eq!(cache.len(), 1);
804
805        let stats = cache.stats();
806        assert_eq!(stats.total_inserts, 2);
807    }
808
809    // --- invalidation policy ------------------------------------------------
810
811    #[test]
812    fn test_invalidation_policy_config() {
813        let config =
814            QueryCacheConfig::default().with_invalidation_policy(InvalidationPolicy::Manual);
815        let cache = QueryCache::new(config);
816        assert_eq!(cache.invalidation_policy(), InvalidationPolicy::Manual);
817    }
818
819    // --- single entry cache edge case ---------------------------------------
820
821    #[test]
822    fn test_single_entry_cache() {
823        let config = QueryCacheConfig::default().with_max_entries(1);
824        let cache = QueryCache::new(config);
825
826        cache.put(b"a", vec![1]);
827        assert!(cache.get(b"a").is_some());
828
829        cache.put(b"b", vec![2]);
830        assert!(cache.get(b"a").is_none());
831        assert!(cache.get(b"b").is_some());
832
833        let stats = cache.stats();
834        assert_eq!(stats.evictions, 1);
835    }
836}