impl<K, V> AdaptiveCache<K, V>
where
K: Clone + Eq + std::hash::Hash + Send + Sync + 'static,
V: Clone + Send + Sync + 'static,
{
#[must_use]
pub fn new(config: AdvancedCacheConfig) -> Self {
let mut tier_stats = FxHashMap::default();
tier_stats.insert(CacheTier::L1, TierStats::default());
tier_stats.insert(CacheTier::L2, TierStats::default());
tier_stats.insert(CacheTier::L3, TierStats::default());
Self {
config,
l1_cache: Arc::new(RwLock::new(FxHashMap::default())),
l2_cache: Arc::new(RwLock::new(FxHashMap::default())),
l3_cache: Arc::new(RwLock::new(FxHashMap::default())),
access_patterns: Arc::new(RwLock::new(FxHashMap::default())),
stats: Arc::new(RwLock::new(AdaptiveCacheStats {
tier_stats,
..Default::default()
})),
predictor: Arc::new(super::advanced_strategies_predictor::CachePredictor::new(0.8)),
}
}
pub async fn get(&self, key: &K) -> Option<Arc<V>> {
let start = Instant::now();
if let Some(entry) = self.get_from_tier(key, CacheTier::L1) {
self.record_hit(CacheTier::L1, start.elapsed());
self.update_access_pattern(key);
return Some(entry.value);
}
if let Some(entry) = self.get_from_tier(key, CacheTier::L2) {
self.record_hit(CacheTier::L2, start.elapsed());
if self.should_promote(&entry.pattern) {
let _ = self.promote_to_l1(key, &entry).await;
}
self.update_access_pattern(key);
return Some(entry.value);
}
if let Some(entry) = self.get_from_tier(key, CacheTier::L3) {
self.record_hit(CacheTier::L3, start.elapsed());
if self.should_promote(&entry.pattern) {
if entry.pattern.frequency > 0.7 {
let _ = self.promote_to_l1(key, &entry).await;
} else if entry.pattern.frequency > 0.3 {
let _ = self.promote_to_l2(key, &entry).await;
}
}
self.update_access_pattern(key);
return Some(entry.value);
}
self.record_miss();
None
}
pub async fn put(&self, key: K, value: V) -> Result<()> {
let start = Instant::now();
let value_arc = Arc::new(value);
let size = std::mem::size_of::<V>();
let tier = self.determine_initial_tier(&key, size);
let entry = AdaptiveCacheEntry {
value: value_arc,
pattern: self.get_or_create_pattern(&key),
size,
tier,
created_at: Utc::now(),
expires_at: self.calculate_expiration(tier),
};
match tier {
CacheTier::L1 => self.insert_l1(key, entry).await?,
CacheTier::L2 => self.insert_l2(key, entry).await?,
CacheTier::L3 => self.insert_l3(key, entry).await?,
}
self.record_insert_time(start.elapsed());
Ok(())
}
pub async fn remove(&self, key: &K) -> Option<Arc<V>> {
let l1_removed = self.l1_cache.write().remove(key);
let l2_removed = self.l2_cache.write().remove(key);
let l3_removed = self.l3_cache.write().remove(key);
l1_removed
.or(l2_removed)
.or(l3_removed)
.map(|entry| entry.value)
}
pub async fn clear(&self) -> Result<()> {
self.l1_cache.write().clear();
self.l2_cache.write().clear();
self.l3_cache.write().clear();
self.access_patterns.write().clear();
let mut stats = self.stats.write();
for tier_stats in stats.tier_stats.values_mut() {
tier_stats.hits.store(0, Ordering::Relaxed);
tier_stats.misses.store(0, Ordering::Relaxed);
tier_stats.evictions.store(0, Ordering::Relaxed);
}
Ok(())
}
#[must_use]
pub fn get_stats(&self) -> AdaptiveCacheStats {
let _stats = self.stats.read();
AdaptiveCacheStats {
tier_stats: FxHashMap::default(), ..Default::default()
}
}
pub async fn warm_cache(&self, warm_keys: Vec<K>) -> Result<usize> {
let start = Instant::now();
let mut warmed_count = 0;
for key in warm_keys {
if let Some(_predicted_value) = self.predictor.predict_value(&key) {
warmed_count += 1;
}
}
let warming_time = start.elapsed();
self.stats.write().warming_stats.total_warming_time = warming_time;
self.stats
.write()
.warming_stats
.files_warmed
.store(warmed_count, Ordering::Relaxed);
info!(
"Cache warming completed: {} entries in {:?}",
warmed_count, warming_time
);
Ok(warmed_count)
}
pub async fn background_maintenance(&self) -> Result<()> {
if !self.config.performance_config.background_cleanup {
return Ok(());
}
self.cleanup_expired_entries().await?;
self.optimize_cache_layout().await?;
self.update_global_patterns();
self.stats
.write()
.performance
.cleanup_operations
.fetch_add(1, Ordering::Relaxed);
Ok(())
}
}