1use super::config::CacheConfig;
43use super::policy::{
44 CacheAdmission, CachePolicy, CachePolicyConfig, CachePolicyKind, CachePolicyMetrics,
45 build_cache_policy,
46};
47use crate::cache::{CacheKey, GraphNodeSummary};
48use dashmap::DashMap;
49use lru::LruCache;
50use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
51use std::sync::{Arc, Mutex};
52
53#[derive(Debug, Clone)]
57struct CacheEntry {
58 summaries: Arc<[GraphNodeSummary]>,
60
61 size_bytes: usize,
63}
64
65impl CacheEntry {
66 fn new(summaries: Vec<GraphNodeSummary>) -> Self {
68 let size_bytes = if summaries.is_empty() {
70 0
71 } else {
72 let sample_size = postcard::to_allocvec(&summaries[0])
73 .map(|bytes| bytes.len())
74 .unwrap_or(256); sample_size * summaries.len()
76 };
77
78 Self {
79 summaries: Arc::from(summaries.into_boxed_slice()),
80 size_bytes,
81 }
82 }
83}
84
85pub struct CacheStorage {
118 entries: DashMap<CacheKey, CacheEntry>,
120
121 lru: Mutex<LruCache<CacheKey, ()>>,
127
128 max_bytes: u64,
130
131 total_bytes: AtomicU64,
133
134 hits: AtomicUsize,
136 misses: AtomicUsize,
137 evictions: AtomicUsize,
138
139 policy: Arc<dyn CachePolicy<CacheKey>>,
141}
142
143impl CacheStorage {
144 #[must_use]
158 pub fn new(max_bytes: u64) -> Self {
159 Self::with_policy(&CachePolicyConfig::new(
160 CachePolicyKind::Lru,
161 max_bytes,
162 CacheConfig::DEFAULT_POLICY_WINDOW_RATIO,
163 ))
164 }
165
166 #[must_use]
168 pub fn with_policy(config: &CachePolicyConfig) -> Self {
169 let policy = build_cache_policy::<CacheKey>(config);
170 Self {
171 entries: DashMap::new(),
172 lru: Mutex::new(LruCache::unbounded()),
173 max_bytes: config.max_bytes,
174 total_bytes: AtomicU64::new(0),
175 hits: AtomicUsize::new(0),
176 misses: AtomicUsize::new(0),
177 evictions: AtomicUsize::new(0),
178 policy,
179 }
180 }
181
182 fn handle_policy_evictions(&self) {
183 for eviction in self.policy.drain_evictions() {
184 let key = eviction.key;
185 if let Some((_, removed)) = self.entries.remove(&key) {
186 self.total_bytes
187 .fetch_sub(removed.size_bytes as u64, Ordering::Relaxed);
188 self.evictions.fetch_add(1, Ordering::Relaxed);
189 }
190
191 if let Ok(mut lru) = self.lru.lock() {
192 let _ = lru.pop(&key);
193 }
194 }
195 }
196
197 pub fn get(&self, key: &CacheKey) -> Option<Arc<[GraphNodeSummary]>> {
220 if let Some(entry) = self.entries.get(key) {
221 let _ = self.policy.record_hit(key);
222 if let Ok(mut lru) = self.lru.lock() {
224 lru.get_or_insert(key.clone(), || ());
225 }
226
227 self.hits.fetch_add(1, Ordering::Relaxed);
228 Some(Arc::clone(&entry.summaries))
229 } else {
230 self.misses.fetch_add(1, Ordering::Relaxed);
231 None
232 }
233 }
234
235 pub fn insert(&self, key: CacheKey, summaries: Vec<GraphNodeSummary>) {
250 let entry = CacheEntry::new(summaries);
251 let entry_size = entry.size_bytes as u64;
252 let key_for_lru = key.clone();
253
254 if let Some((_, old_entry)) = self.entries.remove(&key) {
256 self.total_bytes
257 .fetch_sub(old_entry.size_bytes as u64, Ordering::Relaxed);
258 self.policy.invalidate(&key);
259 }
260
261 if matches!(
262 self.policy.admit(&key, entry_size),
263 CacheAdmission::Rejected
264 ) {
265 log::debug!(
266 "cache policy {:?} rejected entry {:?} ({} bytes)",
267 self.policy.kind(),
268 &key,
269 entry_size
270 );
271 return;
272 }
273
274 self.entries.insert(key, entry);
276 self.total_bytes.fetch_add(entry_size, Ordering::Relaxed);
277
278 if let Ok(mut lru) = self.lru.lock() {
280 lru.put(key_for_lru, ());
281 }
282
283 self.handle_policy_evictions();
284
285 if self.total_bytes.load(Ordering::Relaxed) > self.max_bytes {
287 self.evict_lru();
288 }
289 }
290
291 fn evict_lru(&self) {
296 let Ok(mut lru) = self.lru.lock() else {
298 log::warn!("Failed to acquire LRU lock for eviction");
299 return;
300 };
301
302 let mut current_size = self.total_bytes.load(Ordering::Relaxed);
303
304 while current_size > self.max_bytes {
306 let Some((key, ())) = lru.pop_lru() else {
308 break; };
310
311 if let Some((_, removed)) = self.entries.remove(&key) {
313 current_size = current_size.saturating_sub(removed.size_bytes as u64);
314 self.total_bytes
315 .fetch_sub(removed.size_bytes as u64, Ordering::Relaxed);
316 self.evictions.fetch_add(1, Ordering::Relaxed);
317 self.policy.invalidate(&key);
318
319 log::debug!(
320 "Evicted cache entry: {} ({} bytes)",
321 key,
322 removed.size_bytes
323 );
324 }
325 }
326 }
327
328 pub fn clear(&self) {
336 self.entries.clear();
337 self.total_bytes.store(0, Ordering::Relaxed);
338 self.hits.store(0, Ordering::Relaxed);
339 self.misses.store(0, Ordering::Relaxed);
340 self.evictions.store(0, Ordering::Relaxed);
341 self.policy.reset();
342
343 if let Ok(mut lru) = self.lru.lock() {
344 lru.clear();
345 }
346
347 log::debug!("Cache cleared");
348 }
349
350 pub fn stats(&self) -> CacheStats {
359 CacheStats {
360 entry_count: self.entries.len(),
361 total_bytes: self.total_bytes.load(Ordering::Relaxed),
362 max_bytes: self.max_bytes,
363 hits: self.hits.load(Ordering::Relaxed),
364 misses: self.misses.load(Ordering::Relaxed),
365 evictions: self.evictions.load(Ordering::Relaxed),
366 policy: self.policy.stats(),
367 }
368 }
369}
370
371#[derive(Debug, Clone, Copy, Default)]
373pub struct CacheStats {
374 pub entry_count: usize,
376
377 pub total_bytes: u64,
379
380 pub max_bytes: u64,
382
383 pub hits: usize,
385
386 pub misses: usize,
388
389 pub evictions: usize,
391
392 pub policy: CachePolicyMetrics,
394}
395
396impl CacheStats {
397 fn usize_to_f64(value: usize) -> f64 {
398 #[allow(clippy::cast_precision_loss)]
399 {
400 value as f64
401 }
402 }
403
404 fn u64_to_f64(value: u64) -> f64 {
405 #[allow(clippy::cast_precision_loss)]
406 {
407 value as f64
408 }
409 }
410
411 #[must_use]
415 pub fn hit_rate(&self) -> f64 {
416 let total = self.hits + self.misses;
417 if total == 0 {
418 0.0
419 } else {
420 Self::usize_to_f64(self.hits) / Self::usize_to_f64(total)
421 }
422 }
423
424 #[must_use]
428 pub fn utilization(&self) -> f64 {
429 if self.max_bytes == 0 {
430 0.0
431 } else {
432 Self::u64_to_f64(self.total_bytes) / Self::u64_to_f64(self.max_bytes)
433 }
434 }
435}
436
437#[cfg(test)]
438mod tests {
439 use super::*;
440 use crate::cache::policy::{CachePolicyConfig, CachePolicyKind};
441 use crate::graph::unified::node::NodeKind;
442 use crate::hash::Blake3Hash;
443 use approx::assert_abs_diff_eq;
444 use std::path::{Path, PathBuf};
445 use std::sync::Arc;
446 use std::thread;
447
448 fn make_test_key(name: &str, lang: &str) -> CacheKey {
449 let hash = Blake3Hash::from_bytes([name.as_bytes()[0]; 32]);
450 CacheKey::from_raw_path(PathBuf::from(name), lang, hash)
451 }
452
453 fn make_test_summary(name: &str) -> GraphNodeSummary {
454 GraphNodeSummary::new(
455 Arc::from(name),
456 NodeKind::Function,
457 Arc::from(Path::new("test.rs")),
458 1,
459 0,
460 1,
461 10,
462 )
463 }
464
465 #[test]
466 fn test_storage_new() {
467 let storage = CacheStorage::new(1024);
468 let stats = storage.stats();
469
470 assert_eq!(stats.entry_count, 0);
471 assert_eq!(stats.total_bytes, 0);
472 assert_eq!(stats.max_bytes, 1024);
473 assert_eq!(stats.hits, 0);
474 assert_eq!(stats.misses, 0);
475 }
476
477 #[test]
478 fn test_storage_insert_and_get() {
479 let storage = CacheStorage::new(10 * 1024);
480
481 let key = make_test_key("file.rs", "rust");
482 let summaries = vec![make_test_summary("test_fn")];
483
484 storage.insert(key.clone(), summaries.clone());
485
486 let retrieved = storage.get(&key).unwrap();
488 assert_eq!(retrieved.len(), 1);
489 assert_eq!(retrieved[0].name.as_ref(), "test_fn");
490
491 let stats = storage.stats();
493 assert_eq!(stats.hits, 1);
494 assert_eq!(stats.misses, 0);
495 assert_eq!(stats.entry_count, 1);
496 }
497
498 #[test]
499 fn test_storage_miss() {
500 let storage = CacheStorage::new(10 * 1024);
501
502 let key = make_test_key("file.rs", "rust");
503
504 assert!(storage.get(&key).is_none());
506
507 let stats = storage.stats();
508 assert_eq!(stats.hits, 0);
509 assert_eq!(stats.misses, 1);
510 }
511
512 #[test]
513 fn test_storage_update() {
514 let storage = CacheStorage::new(10 * 1024);
515
516 let key = make_test_key("file.rs", "rust");
517 let summaries1 = vec![make_test_summary("fn1")];
518 let summaries2 = vec![make_test_summary("fn2"), make_test_summary("fn3")];
519
520 storage.insert(key.clone(), summaries1);
522
523 storage.insert(key.clone(), summaries2.clone());
525
526 let retrieved = storage.get(&key).unwrap();
528 assert_eq!(retrieved.len(), 2);
529 assert_eq!(retrieved[0].name.as_ref(), "fn2");
530 }
531
532 #[test]
533 fn test_storage_clear() {
534 let storage = CacheStorage::new(10 * 1024);
535
536 let key = make_test_key("file.rs", "rust");
537 storage.insert(key.clone(), vec![make_test_summary("test")]);
538
539 assert!(storage.get(&key).is_some());
540
541 storage.clear();
542
543 assert!(storage.get(&key).is_none());
544 let stats = storage.stats();
545 assert_eq!(stats.entry_count, 0);
546 assert_eq!(stats.total_bytes, 0);
547 }
548
549 #[test]
550 fn test_storage_eviction() {
551 let storage = CacheStorage::new(100);
553
554 for i in 0..10 {
556 let key = make_test_key(&format!("file{i}.rs"), "rust");
557 let summaries = vec![make_test_summary(&format!("fn{i}"))];
558 storage.insert(key, summaries);
559 }
560
561 let stats = storage.stats();
562
563 assert!(stats.evictions > 0, "Expected evictions, got 0");
565 assert!(
566 stats.entry_count < 10,
567 "Expected < 10 entries due to eviction"
568 );
569 assert!(
570 stats.total_bytes <= 100,
571 "Cache size should be under cap, got {}",
572 stats.total_bytes
573 );
574 }
575
576 #[test]
577 fn test_storage_lru_order() {
578 let storage = CacheStorage::new(80);
580
581 let key1 = make_test_key("file1.rs", "rust");
583 let key2 = make_test_key("file2.rs", "rust");
584 let key3 = make_test_key("file3.rs", "rust");
585
586 storage.insert(key1.clone(), vec![make_test_summary("fn1")]);
587 storage.insert(key2.clone(), vec![make_test_summary("fn2")]);
588 storage.insert(key3.clone(), vec![make_test_summary("fn3")]);
589
590 storage.get(&key1);
592
593 for i in 4..20 {
595 let key = make_test_key(&format!("file{i}.rs"), "rust");
596 storage.insert(key, vec![make_test_summary(&format!("fn{i}"))]);
597 }
598
599 let stats = storage.stats();
600
601 assert!(
603 stats.evictions > 0,
604 "Expected evictions with small cache, got 0"
605 );
606
607 let key1_present = storage.get(&key1).is_some();
610 let key2_present = storage.get(&key2).is_some();
611
612 assert!(
614 key1_present || !key2_present,
615 "LRU violation: older key2 survived but recently accessed key1 didn't"
616 );
617 }
618
619 #[test]
620 fn test_concurrent_insert_and_get() {
621 let storage = Arc::new(CacheStorage::new(10 * 1024));
622 let mut handles = vec![];
623
624 for i in 0..10 {
626 let storage = Arc::clone(&storage);
627 let handle = thread::spawn(move || {
628 let key = make_test_key(&format!("file{i}.rs"), "rust");
629 let summaries = vec![make_test_summary(&format!("fn{i}"))];
630
631 storage.insert(key.clone(), summaries.clone());
632
633 let retrieved = storage.get(&key).expect("Should retrieve inserted value");
635 assert_eq!(retrieved.len(), 1);
636 assert_eq!(retrieved[0].name.as_ref(), &format!("fn{i}"));
637 });
638 handles.push(handle);
639 }
640
641 for handle in handles {
643 handle.join().unwrap();
644 }
645
646 let stats = storage.stats();
648 assert_eq!(stats.entry_count, 10);
649 assert_eq!(stats.hits, 10); }
651
652 #[test]
653 fn test_concurrent_eviction() {
654 let storage = Arc::new(CacheStorage::new(100));
656 let mut handles = vec![];
657
658 for i in 0..20 {
660 let storage = Arc::clone(&storage);
661 let handle = thread::spawn(move || {
662 let key = make_test_key(&format!("file{i}.rs"), "rust");
663 let summaries = vec![make_test_summary(&format!("fn{i}"))];
664 storage.insert(key, summaries);
665 });
666 handles.push(handle);
667 }
668
669 for handle in handles {
671 handle.join().unwrap();
672 }
673
674 let stats = storage.stats();
675
676 assert!(
678 stats.evictions > 0,
679 "Expected evictions with small cache and concurrent inserts"
680 );
681
682 assert!(
684 stats.total_bytes <= 100,
685 "Cache size should be under cap, got {}",
686 stats.total_bytes
687 );
688
689 assert!(stats.entry_count > 0, "Should have entries after eviction");
691 assert!(
692 stats.entry_count < 20,
693 "Should have evicted some of 20 entries"
694 );
695 }
696
697 #[test]
698 fn test_cache_stats_hit_rate() {
699 let stats = CacheStats {
700 entry_count: 10,
701 total_bytes: 1000,
702 max_bytes: 2000,
703 hits: 75,
704 misses: 25,
705 evictions: 0,
706 policy: CachePolicyMetrics::default(),
707 };
708
709 assert_abs_diff_eq!(stats.hit_rate(), 0.75, epsilon = 1e-10);
710 }
711
712 #[test]
713 fn test_cache_stats_utilization() {
714 let stats = CacheStats {
715 entry_count: 10,
716 total_bytes: 1000,
717 max_bytes: 2000,
718 hits: 0,
719 misses: 0,
720 evictions: 0,
721 policy: CachePolicyMetrics::default(),
722 };
723
724 assert_abs_diff_eq!(stats.utilization(), 0.5, epsilon = 1e-10);
725 }
726
727 #[test]
728 fn test_cache_stats_empty() {
729 let stats = CacheStats {
730 entry_count: 0,
731 total_bytes: 0,
732 max_bytes: 1000,
733 hits: 0,
734 misses: 0,
735 evictions: 0,
736 policy: CachePolicyMetrics::default(),
737 };
738
739 assert_abs_diff_eq!(stats.hit_rate(), 0.0, epsilon = 1e-10);
740 assert_abs_diff_eq!(stats.utilization(), 0.0, epsilon = 1e-10);
741 }
742
743 #[test]
744 fn test_tiny_lfu_rejects_cold_workload() {
745 let storage =
746 CacheStorage::with_policy(&CachePolicyConfig::new(CachePolicyKind::TinyLfu, 1024, 0.2));
747
748 let hot_key = make_test_key("hot.rs", "rust");
749 storage.insert(hot_key.clone(), vec![make_test_summary("hot_fn")]);
750 for _ in 0..8 {
751 assert!(storage.get(&hot_key).is_some());
752 }
753
754 for i in 0..50 {
755 let key = make_test_key(&format!("cold{i}.rs"), "rust");
756 storage.insert(key.clone(), vec![make_test_summary(&format!("cold{i}"))]);
757 let _ = storage.get(&key);
758 }
759
760 let stats = storage.stats();
761 assert!(
762 stats.policy.lfu_rejects > 0,
763 "expected TinyLFU policy to reject some cold inserts"
764 );
765 }
766}