use super::key::CacheKey;
use dashmap::DashMap;
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc, Mutex,
};
#[derive(Debug)]
pub struct CachedSample {
pub samples: Arc<Vec<f32>>,
pub sample_rate: f32,
pub duration: f32,
pub reference_frequency: f32,
size_bytes: usize,
last_access: AtomicU64,
}
impl Clone for CachedSample {
fn clone(&self) -> Self {
Self {
samples: self.samples.clone(),
sample_rate: self.sample_rate,
duration: self.duration,
reference_frequency: self.reference_frequency,
size_bytes: self.size_bytes,
last_access: AtomicU64::new(self.last_access.load(Ordering::Relaxed)),
}
}
}
impl CachedSample {
pub fn new(samples: Vec<f32>, sample_rate: f32, duration: f32, reference_frequency: f32) -> Self {
let size_bytes = samples.len() * std::mem::size_of::<f32>();
Self {
samples: Arc::new(samples),
sample_rate,
duration,
reference_frequency,
size_bytes,
last_access: AtomicU64::new(0),
}
}
pub fn size_bytes(&self) -> usize {
self.size_bytes
}
pub fn size_mb(&self) -> f32 {
self.size_bytes as f32 / (1024.0 * 1024.0)
}
}
#[derive(Debug, Clone)]
pub struct CachePolicy {
pub max_size_mb: usize,
pub min_cache_duration_ms: f32,
}
impl Default for CachePolicy {
fn default() -> Self {
Self {
max_size_mb: 500, min_cache_duration_ms: 100.0, }
}
}
#[derive(Debug)]
pub struct SampleCache {
policy: CachePolicy,
cache: DashMap<CacheKey, CachedSample>,
generation: AtomicU64,
metadata: Mutex<CacheMetadata>,
stats: CacheStats,
}
#[derive(Debug)]
struct CacheMetadata {
total_size_bytes: usize,
}
#[derive(Debug, Default)]
pub struct CacheStats {
pub hits: AtomicU64,
pub misses: AtomicU64,
pub evictions: AtomicU64,
pub insertions: AtomicU64,
}
impl CacheStats {
pub fn hit_rate(&self) -> f32 {
let hits = self.hits.load(Ordering::Relaxed);
let misses = self.misses.load(Ordering::Relaxed);
let total = hits + misses;
if total == 0 {
0.0
} else {
hits as f32 / total as f32
}
}
pub fn snapshot(&self) -> CacheStatsSnapshot {
CacheStatsSnapshot {
hits: self.hits.load(Ordering::Relaxed),
misses: self.misses.load(Ordering::Relaxed),
evictions: self.evictions.load(Ordering::Relaxed),
insertions: self.insertions.load(Ordering::Relaxed),
}
}
}
#[derive(Debug, Clone)]
pub struct CacheStatsSnapshot {
pub hits: u64,
pub misses: u64,
pub evictions: u64,
pub insertions: u64,
}
impl SampleCache {
pub fn new() -> Self {
Self {
policy: CachePolicy::default(),
cache: DashMap::new(),
generation: AtomicU64::new(1),
metadata: Mutex::new(CacheMetadata {
total_size_bytes: 0,
}),
stats: CacheStats::default(),
}
}
pub fn with_max_size_mb(mut self, max_mb: usize) -> Self {
self.policy.max_size_mb = max_mb;
self
}
pub fn with_min_duration_ms(mut self, min_ms: f32) -> Self {
self.policy.min_cache_duration_ms = min_ms;
self
}
pub fn get(&self, key: &CacheKey) -> Option<CachedSample> {
if let Some(entry) = self.cache.get(key) {
self.stats.hits.fetch_add(1, Ordering::Relaxed);
let gen = self.generation.fetch_add(1, Ordering::Relaxed);
entry.last_access.store(gen, Ordering::Relaxed);
Some(entry.clone())
} else {
self.stats.misses.fetch_add(1, Ordering::Relaxed);
None
}
}
pub fn insert(&self, key: CacheKey, sample: CachedSample) {
if sample.duration < self.policy.min_cache_duration_ms / 1000.0 {
return; }
let sample_size = sample.size_bytes();
let max_bytes = self.policy.max_size_mb * 1024 * 1024;
let gen = self.generation.fetch_add(1, Ordering::Relaxed);
sample.last_access.store(gen, Ordering::Relaxed);
let needs_eviction = {
let metadata = self.metadata.lock().unwrap();
metadata.total_size_bytes + sample_size > max_bytes
};
if needs_eviction {
while self.size_bytes() + sample_size > max_bytes && !self.cache.is_empty() {
self.evict_oldest();
}
}
if let Some(old_sample) = self.cache.insert(key, sample) {
let mut metadata = self.metadata.lock().unwrap();
metadata.total_size_bytes = metadata.total_size_bytes
.saturating_sub(old_sample.size_bytes())
.saturating_add(sample_size);
} else {
let mut metadata = self.metadata.lock().unwrap();
metadata.total_size_bytes += sample_size;
}
self.stats.insertions.fetch_add(1, Ordering::Relaxed);
}
fn evict_oldest(&self) {
let oldest = self.cache.iter()
.min_by_key(|entry| entry.last_access.load(Ordering::Relaxed))
.map(|entry| *entry.key());
if let Some(key) = oldest {
if let Some((_, sample)) = self.cache.remove(&key) {
let mut metadata = self.metadata.lock().unwrap();
metadata.total_size_bytes = metadata.total_size_bytes.saturating_sub(sample.size_bytes());
self.stats.evictions.fetch_add(1, Ordering::Relaxed);
}
}
}
pub fn clear(&self) {
self.cache.clear();
let mut metadata = self.metadata.lock().unwrap();
metadata.total_size_bytes = 0;
}
pub fn size_bytes(&self) -> usize {
self.metadata.lock().unwrap().total_size_bytes
}
pub fn size_mb(&self) -> f32 {
self.size_bytes() as f32 / (1024.0 * 1024.0)
}
pub fn entry_count(&self) -> usize {
self.cache.len()
}
pub fn stats(&self) -> &CacheStats {
&self.stats
}
pub fn print_stats(&self) {
let stats = self.stats.snapshot();
println!("\n📊 Sample Cache Statistics:");
println!(" Entries: {}", self.entry_count());
println!(" Size: {:.2} MB / {} MB", self.size_mb(), self.policy.max_size_mb);
println!(" Hits: {}", stats.hits);
println!(" Misses: {}", stats.misses);
println!(" Hit rate: {:.1}%", self.stats.hit_rate() * 100.0);
println!(" Evictions: {}", stats.evictions);
println!(" Insertions: {}", stats.insertions);
}
}
impl Default for SampleCache {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_test_sample(duration_sec: f32, sample_rate: f32) -> CachedSample {
let num_samples = (duration_sec * sample_rate) as usize;
let samples = vec![0.0f32; num_samples];
CachedSample::new(samples, sample_rate, duration_sec, 261.63)
}
#[test]
fn test_cache_insert_and_get() {
let cache = SampleCache::new();
let key = CacheKey::new(12345);
let sample = make_test_sample(1.0, 44100.0);
cache.insert(key, sample.clone());
assert!(cache.get(&key).is_some());
assert_eq!(cache.entry_count(), 1);
let stats = cache.stats().snapshot();
assert_eq!(stats.hits, 1);
assert_eq!(stats.misses, 0);
}
#[test]
fn test_cache_miss() {
let cache = SampleCache::new();
let key = CacheKey::new(12345);
assert!(cache.get(&key).is_none());
let stats = cache.stats().snapshot();
assert_eq!(stats.hits, 0);
assert_eq!(stats.misses, 1);
}
#[test]
fn test_lru_eviction() {
let cache = SampleCache::new().with_max_size_mb(1);
let key1 = CacheKey::new(1);
let key2 = CacheKey::new(2);
let key3 = CacheKey::new(3);
let key4 = CacheKey::new(4);
let key5 = CacheKey::new(5);
let key6 = CacheKey::new(6);
cache.insert(key1, make_test_sample(1.0, 44100.0));
cache.insert(key2, make_test_sample(1.0, 44100.0));
cache.insert(key3, make_test_sample(1.0, 44100.0));
cache.insert(key4, make_test_sample(1.0, 44100.0));
cache.insert(key5, make_test_sample(1.0, 44100.0));
assert!(cache.size_mb() < 1.0);
cache.insert(key6, make_test_sample(1.0, 44100.0));
assert!(cache.get(&key1).is_none()); assert!(cache.get(&key6).is_some()); let stats = cache.stats().snapshot();
assert!(stats.evictions > 0);
}
#[test]
fn test_min_duration_filter() {
let cache = SampleCache::new().with_min_duration_ms(100.0);
let key = CacheKey::new(1);
cache.insert(key, make_test_sample(0.05, 44100.0));
assert_eq!(cache.entry_count(), 0); let stats = cache.stats().snapshot();
assert_eq!(stats.insertions, 0);
}
#[test]
fn test_clear() {
let cache = SampleCache::new();
cache.insert(CacheKey::new(1), make_test_sample(1.0, 44100.0));
cache.insert(CacheKey::new(2), make_test_sample(1.0, 44100.0));
assert_eq!(cache.entry_count(), 2);
cache.clear();
assert_eq!(cache.entry_count(), 0);
assert_eq!(cache.size_bytes(), 0);
}
}