Skip to main content

sqry_core/cache/
storage.rs

1// RKG: CODE:SQRY-CORE implements REQ:SQRY-P2-6-CACHE-EVICTION-POLICY
2//! In-memory cache storage with LRU eviction.
3//!
4//! This module provides a thread-safe, concurrent cache storage layer using:
5//! - **`DashMap`**: Sharded hash map for lock-free concurrent reads
6//! - **LRU / `TinyLFU`**: Eviction framework (default LRU; adaptive policies wired later)
7//! - **Size tracking**: Maintains total byte count for eviction
8//!
9//! # Architecture
10//!
11//! ```text
12//! ┌─────────────────────────────────────┐
13//! │         CacheStorage                │
14//! ├─────────────────────────────────────┤
15//! │  DashMap<CacheKey, CacheEntry>      │  ← Lock-free reads
16//! │  Mutex<LruCache<CacheKey, ()>>      │  ← Protected LRU ordering
17//! │  Total size counter (atomic)        │  ← Enforce cap
18//! └─────────────────────────────────────┘
19//! ```
20//!
21//! # Concurrency
22//!
23//! - **Reads**: Lock-free via `DashMap` sharding
24//! - **Writes**: Per-key locks (`DashMap` handles this)
25//! - **LRU updates**: Brief lock on LRU cache to record access
26//! - **Eviction**: Lock-protected to prevent race conditions
27//!
28//! # Example
29//!
30//! ```rust,ignore
31//! use sqry_core::cache::{CacheStorage, CacheKey, GraphNodeSummary};
32//!
33//! let storage = CacheStorage::new(50 * 1024 * 1024); // 50 MB cap
34//!
35//! storage.insert(key, vec![summary1, summary2]);
36//!
37//! if let Some(summaries) = storage.get(&key) {
38//!     // Cache hit
39//! }
40//! ```
41
42use super::config::CacheConfig;
43use super::policy::{
44    CacheAdmission, CachePolicy, CachePolicyConfig, CachePolicyKind, CachePolicyMetrics,
45    build_cache_policy,
46};
47use crate::cache::{CacheKey, GraphNodeSummary};
48use dashmap::DashMap;
49use lru::LruCache;
50use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
51use std::sync::{Arc, Mutex};
52
53/// Cache entry with size metadata.
54///
55/// Uses `Arc<[GraphNodeSummary]>` for zero-copy sharing across cache hits.
56#[derive(Debug, Clone)]
57struct CacheEntry {
58    /// Node summaries for this file (shared via Arc)
59    summaries: Arc<[GraphNodeSummary]>,
60
61    /// Estimated size in bytes (cached to avoid repeated serialization)
62    size_bytes: usize,
63}
64
65impl CacheEntry {
66    /// Create a new cache entry from a vector of summaries.
67    fn new(summaries: Vec<GraphNodeSummary>) -> Self {
68        // Estimate size: serialize one summary and multiply by count
69        let size_bytes = if summaries.is_empty() {
70            0
71        } else {
72            let sample_size = postcard::to_allocvec(&summaries[0])
73                .map(|bytes| bytes.len())
74                .unwrap_or(256); // Fallback to budget estimate
75            sample_size * summaries.len()
76        };
77
78        Self {
79            summaries: Arc::from(summaries.into_boxed_slice()),
80            size_bytes,
81        }
82    }
83}
84
85/// Thread-safe in-memory cache storage with LRU eviction.
86///
87/// # Thread Safety
88///
89/// All operations are thread-safe and can be called concurrently from
90/// multiple threads (e.g., Rayon parallel queries or MCP server requests).
91///
92/// # Eviction Policy
93///
94/// Uses an incremental LRU cache with Mutex protection to prevent race conditions.
95/// When the cache exceeds `max_bytes`, the least recently used entries
96/// are evicted until the size drops below the cap.
97///
98/// # Examples
99///
100/// ```rust,ignore
101/// use sqry_core::cache::{CacheStorage, CacheKey};
102///
103/// let storage = CacheStorage::new(50 * 1024 * 1024); // 50 MB
104///
105/// // Thread-safe insert
106/// storage.insert(key, summaries);
107///
108/// // Thread-safe get (updates LRU)
109/// if let Some(summaries) = storage.get(&key) {
110///     // Use summaries
111/// }
112///
113/// // Check stats
114/// let stats = storage.stats();
115/// println!("Size: {} bytes, Entries: {}", stats.total_bytes, stats.entry_count);
116/// ```
117pub struct CacheStorage {
118    /// Concurrent hash map (sharded for lock-free reads)
119    entries: DashMap<CacheKey, CacheEntry>,
120
121    /// LRU ordering (Mutex-protected to prevent eviction races)
122    ///
123    /// The LRU cache only tracks access order, not the actual data.
124    /// We use `NonZeroUsize::MAX` as the capacity since we're managing
125    /// eviction based on byte size, not entry count.
126    lru: Mutex<LruCache<CacheKey, ()>>,
127
128    /// Maximum total cache size in bytes
129    max_bytes: u64,
130
131    /// Current total size (approximate, updated atomically)
132    total_bytes: AtomicU64,
133
134    /// Cache statistics
135    hits: AtomicUsize,
136    misses: AtomicUsize,
137    evictions: AtomicUsize,
138
139    /// Eviction policy controller
140    policy: Arc<dyn CachePolicy<CacheKey>>,
141}
142
143impl CacheStorage {
144    /// Create a new cache storage with the given size limit.
145    ///
146    /// # Arguments
147    ///
148    /// - `max_bytes`: Maximum total cache size in bytes
149    ///
150    /// # Examples
151    ///
152    /// ```rust
153    /// use sqry_core::cache::CacheStorage;
154    ///
155    /// let storage = CacheStorage::new(50 * 1024 * 1024); // 50 MB
156    /// ```
157    #[must_use]
158    pub fn new(max_bytes: u64) -> Self {
159        Self::with_policy(&CachePolicyConfig::new(
160            CachePolicyKind::Lru,
161            max_bytes,
162            CacheConfig::DEFAULT_POLICY_WINDOW_RATIO,
163        ))
164    }
165
166    /// Create cache storage with a specific eviction policy configuration.
167    #[must_use]
168    pub fn with_policy(config: &CachePolicyConfig) -> Self {
169        let policy = build_cache_policy::<CacheKey>(config);
170        Self {
171            entries: DashMap::new(),
172            lru: Mutex::new(LruCache::unbounded()),
173            max_bytes: config.max_bytes,
174            total_bytes: AtomicU64::new(0),
175            hits: AtomicUsize::new(0),
176            misses: AtomicUsize::new(0),
177            evictions: AtomicUsize::new(0),
178            policy,
179        }
180    }
181
182    fn handle_policy_evictions(&self) {
183        for eviction in self.policy.drain_evictions() {
184            let key = eviction.key;
185            if let Some((_, removed)) = self.entries.remove(&key) {
186                self.total_bytes
187                    .fetch_sub(removed.size_bytes as u64, Ordering::Relaxed);
188                self.evictions.fetch_add(1, Ordering::Relaxed);
189            }
190
191            if let Ok(mut lru) = self.lru.lock() {
192                let _ = lru.pop(&key);
193            }
194        }
195    }
196
197    /// Get cached summaries for a key.
198    ///
199    /// Returns an `Arc<[GraphNodeSummary]>` to avoid cloning the entire vector
200    /// on every cache hit. This provides zero-copy sharing of cached data.
201    ///
202    /// Updates the LRU ordering on hit.
203    ///
204    /// # Returns
205    ///
206    /// - `Some(Arc<[GraphNodeSummary]>)` if the key is in cache (hit)
207    /// - `None` if the key is not in cache (miss)
208    ///
209    /// # Examples
210    ///
211    /// ```rust,ignore
212    /// if let Some(summaries) = storage.get(&key) {
213    ///     // Cache hit - Arc clone is cheap (just bumps ref count)
214    ///     for summary in summaries.iter() {
215    ///         // Process symbols...
216    ///     }
217    /// }
218    /// ```
219    pub fn get(&self, key: &CacheKey) -> Option<Arc<[GraphNodeSummary]>> {
220        if let Some(entry) = self.entries.get(key) {
221            let _ = self.policy.record_hit(key);
222            // Update LRU ordering (brief lock)
223            if let Ok(mut lru) = self.lru.lock() {
224                lru.get_or_insert(key.clone(), || ());
225            }
226
227            self.hits.fetch_add(1, Ordering::Relaxed);
228            Some(Arc::clone(&entry.summaries))
229        } else {
230            self.misses.fetch_add(1, Ordering::Relaxed);
231            None
232        }
233    }
234
235    /// Insert summaries into the cache.
236    ///
237    /// Triggers eviction if the cache exceeds the size limit.
238    ///
239    /// # Arguments
240    ///
241    /// - `key`: Cache key (file path + language + content hash)
242    /// - `summaries`: Node summaries to cache
243    ///
244    /// # Examples
245    ///
246    /// ```rust,ignore
247    /// storage.insert(key, vec![summary1, summary2]);
248    /// ```
249    pub fn insert(&self, key: CacheKey, summaries: Vec<GraphNodeSummary>) {
250        let entry = CacheEntry::new(summaries);
251        let entry_size = entry.size_bytes as u64;
252        let key_for_lru = key.clone();
253
254        // Remove old entry if it exists (to update size correctly)
255        if let Some((_, old_entry)) = self.entries.remove(&key) {
256            self.total_bytes
257                .fetch_sub(old_entry.size_bytes as u64, Ordering::Relaxed);
258            self.policy.invalidate(&key);
259        }
260
261        if matches!(
262            self.policy.admit(&key, entry_size),
263            CacheAdmission::Rejected
264        ) {
265            log::debug!(
266                "cache policy {:?} rejected entry {:?} ({} bytes)",
267                self.policy.kind(),
268                &key,
269                entry_size
270            );
271            return;
272        }
273
274        // Insert new entry
275        self.entries.insert(key, entry);
276        self.total_bytes.fetch_add(entry_size, Ordering::Relaxed);
277
278        // Update LRU ordering (brief lock)
279        if let Ok(mut lru) = self.lru.lock() {
280            lru.put(key_for_lru, ());
281        }
282
283        self.handle_policy_evictions();
284
285        // Evict if needed (lock-protected)
286        if self.total_bytes.load(Ordering::Relaxed) > self.max_bytes {
287            self.evict_lru();
288        }
289    }
290
291    /// Evict least recently used entries until under size cap.
292    ///
293    /// This is called automatically by `insert()` when needed.
294    /// Uses incremental LRU eviction with lock protection to prevent races.
295    fn evict_lru(&self) {
296        // Lock LRU to prevent concurrent evictions
297        let Ok(mut lru) = self.lru.lock() else {
298            log::warn!("Failed to acquire LRU lock for eviction");
299            return;
300        };
301
302        let mut current_size = self.total_bytes.load(Ordering::Relaxed);
303
304        // Evict oldest entries until under cap
305        while current_size > self.max_bytes {
306            // Pop least recently used key
307            let Some((key, ())) = lru.pop_lru() else {
308                break; // No more entries to evict
309            };
310
311            // Remove from entries map
312            if let Some((_, removed)) = self.entries.remove(&key) {
313                current_size = current_size.saturating_sub(removed.size_bytes as u64);
314                self.total_bytes
315                    .fetch_sub(removed.size_bytes as u64, Ordering::Relaxed);
316                self.evictions.fetch_add(1, Ordering::Relaxed);
317                self.policy.invalidate(&key);
318
319                log::debug!(
320                    "Evicted cache entry: {} ({} bytes)",
321                    key,
322                    removed.size_bytes
323                );
324            }
325        }
326    }
327
328    /// Clear all entries from the cache.
329    ///
330    /// # Examples
331    ///
332    /// ```rust,ignore
333    /// storage.clear();
334    /// ```
335    pub fn clear(&self) {
336        self.entries.clear();
337        self.total_bytes.store(0, Ordering::Relaxed);
338        self.hits.store(0, Ordering::Relaxed);
339        self.misses.store(0, Ordering::Relaxed);
340        self.evictions.store(0, Ordering::Relaxed);
341        self.policy.reset();
342
343        if let Ok(mut lru) = self.lru.lock() {
344            lru.clear();
345        }
346
347        log::debug!("Cache cleared");
348    }
349
350    /// Get cache statistics.
351    ///
352    /// # Examples
353    ///
354    /// ```rust,ignore
355    /// let stats = storage.stats();
356    /// println!("Hit rate: {:.1}%", stats.hit_rate() * 100.0);
357    /// ```
358    pub fn stats(&self) -> CacheStats {
359        CacheStats {
360            entry_count: self.entries.len(),
361            total_bytes: self.total_bytes.load(Ordering::Relaxed),
362            max_bytes: self.max_bytes,
363            hits: self.hits.load(Ordering::Relaxed),
364            misses: self.misses.load(Ordering::Relaxed),
365            evictions: self.evictions.load(Ordering::Relaxed),
366            policy: self.policy.stats(),
367        }
368    }
369}
370
371/// Cache statistics for telemetry and diagnostics.
372#[derive(Debug, Clone, Copy, Default)]
373pub struct CacheStats {
374    /// Number of entries in cache
375    pub entry_count: usize,
376
377    /// Total size in bytes
378    pub total_bytes: u64,
379
380    /// Maximum allowed size
381    pub max_bytes: u64,
382
383    /// Number of cache hits
384    pub hits: usize,
385
386    /// Number of cache misses
387    pub misses: usize,
388
389    /// Number of evictions
390    pub evictions: usize,
391
392    /// Policy-specific telemetry (LFU rejects, hot/cold evictions, etc.)
393    pub policy: CachePolicyMetrics,
394}
395
396impl CacheStats {
397    fn usize_to_f64(value: usize) -> f64 {
398        #[allow(clippy::cast_precision_loss)]
399        {
400            value as f64
401        }
402    }
403
404    fn u64_to_f64(value: u64) -> f64 {
405        #[allow(clippy::cast_precision_loss)]
406        {
407            value as f64
408        }
409    }
410
411    /// Calculate cache hit rate (0.0 to 1.0).
412    ///
413    /// Returns 0.0 if no requests have been made yet.
414    #[must_use]
415    pub fn hit_rate(&self) -> f64 {
416        let total = self.hits + self.misses;
417        if total == 0 {
418            0.0
419        } else {
420            Self::usize_to_f64(self.hits) / Self::usize_to_f64(total)
421        }
422    }
423
424    /// Calculate cache utilization (0.0 to 1.0).
425    ///
426    /// Returns the fraction of `max_bytes` currently in use.
427    #[must_use]
428    pub fn utilization(&self) -> f64 {
429        if self.max_bytes == 0 {
430            0.0
431        } else {
432            Self::u64_to_f64(self.total_bytes) / Self::u64_to_f64(self.max_bytes)
433        }
434    }
435}
436
437#[cfg(test)]
438mod tests {
439    use super::*;
440    use crate::cache::policy::{CachePolicyConfig, CachePolicyKind};
441    use crate::graph::unified::node::NodeKind;
442    use crate::hash::Blake3Hash;
443    use approx::assert_abs_diff_eq;
444    use std::path::{Path, PathBuf};
445    use std::sync::Arc;
446    use std::thread;
447
448    fn make_test_key(name: &str, lang: &str) -> CacheKey {
449        let hash = Blake3Hash::from_bytes([name.as_bytes()[0]; 32]);
450        CacheKey::from_raw_path(PathBuf::from(name), lang, hash)
451    }
452
453    fn make_test_summary(name: &str) -> GraphNodeSummary {
454        GraphNodeSummary::new(
455            Arc::from(name),
456            NodeKind::Function,
457            Arc::from(Path::new("test.rs")),
458            1,
459            0,
460            1,
461            10,
462        )
463    }
464
465    #[test]
466    fn test_storage_new() {
467        let storage = CacheStorage::new(1024);
468        let stats = storage.stats();
469
470        assert_eq!(stats.entry_count, 0);
471        assert_eq!(stats.total_bytes, 0);
472        assert_eq!(stats.max_bytes, 1024);
473        assert_eq!(stats.hits, 0);
474        assert_eq!(stats.misses, 0);
475    }
476
477    #[test]
478    fn test_storage_insert_and_get() {
479        let storage = CacheStorage::new(10 * 1024);
480
481        let key = make_test_key("file.rs", "rust");
482        let summaries = vec![make_test_summary("test_fn")];
483
484        storage.insert(key.clone(), summaries.clone());
485
486        // Get should return the inserted value
487        let retrieved = storage.get(&key).unwrap();
488        assert_eq!(retrieved.len(), 1);
489        assert_eq!(retrieved[0].name.as_ref(), "test_fn");
490
491        // Stats should show one hit
492        let stats = storage.stats();
493        assert_eq!(stats.hits, 1);
494        assert_eq!(stats.misses, 0);
495        assert_eq!(stats.entry_count, 1);
496    }
497
498    #[test]
499    fn test_storage_miss() {
500        let storage = CacheStorage::new(10 * 1024);
501
502        let key = make_test_key("file.rs", "rust");
503
504        // Get on empty cache should miss
505        assert!(storage.get(&key).is_none());
506
507        let stats = storage.stats();
508        assert_eq!(stats.hits, 0);
509        assert_eq!(stats.misses, 1);
510    }
511
512    #[test]
513    fn test_storage_update() {
514        let storage = CacheStorage::new(10 * 1024);
515
516        let key = make_test_key("file.rs", "rust");
517        let summaries1 = vec![make_test_summary("fn1")];
518        let summaries2 = vec![make_test_summary("fn2"), make_test_summary("fn3")];
519
520        // Insert first value
521        storage.insert(key.clone(), summaries1);
522
523        // Update with new value
524        storage.insert(key.clone(), summaries2.clone());
525
526        // Should get the updated value
527        let retrieved = storage.get(&key).unwrap();
528        assert_eq!(retrieved.len(), 2);
529        assert_eq!(retrieved[0].name.as_ref(), "fn2");
530    }
531
532    #[test]
533    fn test_storage_clear() {
534        let storage = CacheStorage::new(10 * 1024);
535
536        let key = make_test_key("file.rs", "rust");
537        storage.insert(key.clone(), vec![make_test_summary("test")]);
538
539        assert!(storage.get(&key).is_some());
540
541        storage.clear();
542
543        assert!(storage.get(&key).is_none());
544        let stats = storage.stats();
545        assert_eq!(stats.entry_count, 0);
546        assert_eq!(stats.total_bytes, 0);
547    }
548
549    #[test]
550    fn test_storage_eviction() {
551        // Small cache: 100 bytes (postcard varint encoding is compact)
552        let storage = CacheStorage::new(100);
553
554        // Insert entries until eviction triggers
555        for i in 0..10 {
556            let key = make_test_key(&format!("file{i}.rs"), "rust");
557            let summaries = vec![make_test_summary(&format!("fn{i}"))];
558            storage.insert(key, summaries);
559        }
560
561        let stats = storage.stats();
562
563        // Should have evicted entries
564        assert!(stats.evictions > 0, "Expected evictions, got 0");
565        assert!(
566            stats.entry_count < 10,
567            "Expected < 10 entries due to eviction"
568        );
569        assert!(
570            stats.total_bytes <= 100,
571            "Cache size should be under cap, got {}",
572            stats.total_bytes
573        );
574    }
575
576    #[test]
577    fn test_storage_lru_order() {
578        // Use very small cache to ensure eviction happens (postcard is compact)
579        let storage = CacheStorage::new(80);
580
581        // Insert three entries
582        let key1 = make_test_key("file1.rs", "rust");
583        let key2 = make_test_key("file2.rs", "rust");
584        let key3 = make_test_key("file3.rs", "rust");
585
586        storage.insert(key1.clone(), vec![make_test_summary("fn1")]);
587        storage.insert(key2.clone(), vec![make_test_summary("fn2")]);
588        storage.insert(key3.clone(), vec![make_test_summary("fn3")]);
589
590        // Access key1 to make it most recently used
591        storage.get(&key1);
592
593        // Insert many more entries to force eviction
594        for i in 4..20 {
595            let key = make_test_key(&format!("file{i}.rs"), "rust");
596            storage.insert(key, vec![make_test_summary(&format!("fn{i}"))]);
597        }
598
599        let stats = storage.stats();
600
601        // At least one eviction should have happened
602        assert!(
603            stats.evictions > 0,
604            "Expected evictions with small cache, got 0"
605        );
606
607        // key1 was accessed most recently, so has best chance to survive
608        // This is probabilistic with small caches
609        let key1_present = storage.get(&key1).is_some();
610        let key2_present = storage.get(&key2).is_some();
611
612        // If key2 survived but key1 didn't, that's an LRU violation
613        assert!(
614            key1_present || !key2_present,
615            "LRU violation: older key2 survived but recently accessed key1 didn't"
616        );
617    }
618
619    #[test]
620    fn test_concurrent_insert_and_get() {
621        let storage = Arc::new(CacheStorage::new(10 * 1024));
622        let mut handles = vec![];
623
624        // Spawn 10 threads that insert and get concurrently
625        for i in 0..10 {
626            let storage = Arc::clone(&storage);
627            let handle = thread::spawn(move || {
628                let key = make_test_key(&format!("file{i}.rs"), "rust");
629                let summaries = vec![make_test_summary(&format!("fn{i}"))];
630
631                storage.insert(key.clone(), summaries.clone());
632
633                // Verify we can get it back
634                let retrieved = storage.get(&key).expect("Should retrieve inserted value");
635                assert_eq!(retrieved.len(), 1);
636                assert_eq!(retrieved[0].name.as_ref(), &format!("fn{i}"));
637            });
638            handles.push(handle);
639        }
640
641        // Wait for all threads
642        for handle in handles {
643            handle.join().unwrap();
644        }
645
646        // All 10 entries should be present
647        let stats = storage.stats();
648        assert_eq!(stats.entry_count, 10);
649        assert_eq!(stats.hits, 10); // One get per thread
650    }
651
652    #[test]
653    fn test_concurrent_eviction() {
654        // Very small cache to force evictions (postcard is compact)
655        let storage = Arc::new(CacheStorage::new(100));
656        let mut handles = vec![];
657
658        // Spawn 20 threads that insert concurrently
659        for i in 0..20 {
660            let storage = Arc::clone(&storage);
661            let handle = thread::spawn(move || {
662                let key = make_test_key(&format!("file{i}.rs"), "rust");
663                let summaries = vec![make_test_summary(&format!("fn{i}"))];
664                storage.insert(key, summaries);
665            });
666            handles.push(handle);
667        }
668
669        // Wait for all threads
670        for handle in handles {
671            handle.join().unwrap();
672        }
673
674        let stats = storage.stats();
675
676        // Should have evicted some entries
677        assert!(
678            stats.evictions > 0,
679            "Expected evictions with small cache and concurrent inserts"
680        );
681
682        // Should respect size cap
683        assert!(
684            stats.total_bytes <= 100,
685            "Cache size should be under cap, got {}",
686            stats.total_bytes
687        );
688
689        // Should have some entries remaining
690        assert!(stats.entry_count > 0, "Should have entries after eviction");
691        assert!(
692            stats.entry_count < 20,
693            "Should have evicted some of 20 entries"
694        );
695    }
696
697    #[test]
698    fn test_cache_stats_hit_rate() {
699        let stats = CacheStats {
700            entry_count: 10,
701            total_bytes: 1000,
702            max_bytes: 2000,
703            hits: 75,
704            misses: 25,
705            evictions: 0,
706            policy: CachePolicyMetrics::default(),
707        };
708
709        assert_abs_diff_eq!(stats.hit_rate(), 0.75, epsilon = 1e-10);
710    }
711
712    #[test]
713    fn test_cache_stats_utilization() {
714        let stats = CacheStats {
715            entry_count: 10,
716            total_bytes: 1000,
717            max_bytes: 2000,
718            hits: 0,
719            misses: 0,
720            evictions: 0,
721            policy: CachePolicyMetrics::default(),
722        };
723
724        assert_abs_diff_eq!(stats.utilization(), 0.5, epsilon = 1e-10);
725    }
726
727    #[test]
728    fn test_cache_stats_empty() {
729        let stats = CacheStats {
730            entry_count: 0,
731            total_bytes: 0,
732            max_bytes: 1000,
733            hits: 0,
734            misses: 0,
735            evictions: 0,
736            policy: CachePolicyMetrics::default(),
737        };
738
739        assert_abs_diff_eq!(stats.hit_rate(), 0.0, epsilon = 1e-10);
740        assert_abs_diff_eq!(stats.utilization(), 0.0, epsilon = 1e-10);
741    }
742
743    #[test]
744    fn test_tiny_lfu_rejects_cold_workload() {
745        let storage =
746            CacheStorage::with_policy(&CachePolicyConfig::new(CachePolicyKind::TinyLfu, 1024, 0.2));
747
748        let hot_key = make_test_key("hot.rs", "rust");
749        storage.insert(hot_key.clone(), vec![make_test_summary("hot_fn")]);
750        for _ in 0..8 {
751            assert!(storage.get(&hot_key).is_some());
752        }
753
754        for i in 0..50 {
755            let key = make_test_key(&format!("cold{i}.rs"), "rust");
756            storage.insert(key.clone(), vec![make_test_summary(&format!("cold{i}"))]);
757            let _ = storage.get(&key);
758        }
759
760        let stats = storage.stats();
761        assert!(
762            stats.policy.lfu_rejects > 0,
763            "expected TinyLFU policy to reject some cold inserts"
764        );
765    }
766}