mod entry;
mod state;
mod types;
pub use types::{AdaptiveCacheConfig, AdaptiveCacheMetrics};
use self::entry::AdaptiveCacheEntry;
use self::state::AdaptiveCacheState;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use tokio::task::JoinHandle;
use tokio::time::{Duration as TokioDuration, interval};
use tracing::{debug, info};
use uuid::Uuid;
pub struct AdaptiveCache<V: Clone + Send + Sync + 'static> {
config: AdaptiveCacheConfig,
state: Arc<RwLock<AdaptiveCacheState<V>>>,
cleanup_task: Option<JoinHandle<()>>,
}
impl<V: Clone + Send + Sync + 'static> AdaptiveCache<V> {
pub fn new(config: AdaptiveCacheConfig) -> Self {
let state = Arc::new(RwLock::new(AdaptiveCacheState::<V>::new()));
let cleanup_task = if config.enable_background_cleanup && config.cleanup_interval_secs > 0 {
Some(Self::start_cleanup_task(
Arc::clone(&state),
config.cleanup_interval_secs,
))
} else {
None
};
info!(
"Initialized adaptive cache: default_ttl={}s, min_ttl={}s, max_ttl={}s, hot={}, cold={}",
config.default_ttl.as_secs(),
config.min_ttl.as_secs(),
config.max_ttl.as_secs(),
config.hot_threshold,
config.cold_threshold
);
Self {
config,
state,
cleanup_task,
}
}
pub async fn record_access(&self, id: Uuid, hit: bool, value: Option<V>) -> bool {
let now = Instant::now();
let mut state = self.state.write().await;
if hit {
if let Some(entry) = state.entries.get_mut(&id) {
if entry.is_expired(now) {
debug!("Cache entry expired on access: {}", id);
state.metrics.base.expirations += 1;
state.metrics.base.misses += 1;
state.remove_entry(&id);
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
return false;
}
entry.record_access(now, &self.config);
state.lru_queue.retain(|&qid| qid != id);
state.lru_queue.push_back(id);
state.metrics.base.hits += 1;
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
true
} else {
state.metrics.base.misses += 1;
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
false
}
} else {
state.metrics.base.misses += 1;
if let Some(v) = value {
if state.entries.len() >= self.config.max_size {
if let Some(oldest_id) = state.lru_queue.pop_front() {
state.entries.remove(&oldest_id);
state.metrics.base.evictions += 1;
debug!("Evicted LRU entry: {}", oldest_id);
}
}
let entry = AdaptiveCacheEntry::new(v, self.config.default_ttl);
state.entries.insert(id, entry);
state.lru_queue.push_back(id);
}
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
false
}
}
pub async fn get(&self, id: Uuid) -> Option<V> {
let state = self.state.read().await;
state.entries.get(&id).map(|entry| entry.value.clone())
}
pub async fn get_and_record(&self, id: Uuid) -> Option<V> {
let now = Instant::now();
let mut state = self.state.write().await;
if let Some(entry) = state.entries.get_mut(&id) {
if !entry.is_expired(now) {
let value = entry.value.clone();
entry.record_access(now, &self.config);
state.metrics.base.hits += 1;
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
return Some(value);
}
}
state.metrics.base.misses += 1;
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
None
}
pub async fn remove(&self, id: Uuid) {
let mut state = self.state.write().await;
state.remove_entry(&id);
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
}
pub async fn contains(&self, id: Uuid) -> bool {
let now = Instant::now();
let state = self.state.read().await;
if let Some(entry) = state.entries.get(&id) {
!entry.is_expired(now)
} else {
false
}
}
pub async fn access_count(&self, id: Uuid) -> Option<usize> {
let state = self.state.read().await;
state.entries.get(&id).map(|entry| entry.access_count())
}
pub async fn ttl(&self, id: Uuid) -> Option<Duration> {
let state = self.state.read().await;
state.entries.get(&id).map(|entry| entry.ttl())
}
pub async fn get_metrics(&self) -> AdaptiveCacheMetrics {
let state = self.state.read().await;
state.metrics.clone()
}
pub async fn clear(&self) {
let mut state = self.state.write().await;
state.clear();
}
pub async fn cleanup_expired(&self) -> usize {
let now = Instant::now();
let mut state = self.state.write().await;
let mut expired_ids = Vec::new();
for (id, entry) in &state.entries {
let ttl_secs = entry.ttl_seconds.load(Ordering::SeqCst);
let created_at = entry.created_at;
let expires_at = created_at + Duration::from_secs(ttl_secs);
let is_expired = now >= expires_at;
if is_expired {
expired_ids.push(*id);
}
}
let count = expired_ids.len();
for id in expired_ids {
state.remove_entry(&id);
state.metrics.base.expirations += 1;
}
state.update_metrics(self.config.hot_threshold, self.config.cold_threshold);
if count > 0 {
debug!("Cleaned up {} expired cache entries", count);
}
count
}
pub async fn hot_count(&self) -> usize {
let state = self.state.read().await;
state.metrics.hot_item_count
}
pub async fn cold_count(&self) -> usize {
let state = self.state.read().await;
state.metrics.cold_item_count
}
pub async fn len(&self) -> usize {
let state = self.state.read().await;
state.entries.len()
}
pub async fn is_empty(&self) -> bool {
self.len().await == 0
}
fn start_cleanup_task(
state: Arc<RwLock<AdaptiveCacheState<V>>>,
interval_secs: u64,
) -> JoinHandle<()> {
tokio::spawn(async move {
let mut ticker = interval(TokioDuration::from_secs(interval_secs));
loop {
ticker.tick().await;
let now = Instant::now();
let mut state_guard = state.write().await;
let mut expired_ids = Vec::new();
for (id, entry) in &state_guard.entries {
let ttl_secs = entry.ttl_seconds.load(Ordering::SeqCst);
let expires_at = entry.created_at + Duration::from_secs(ttl_secs);
if now >= expires_at {
expired_ids.push(*id);
}
}
let count = expired_ids.len();
for id in expired_ids {
state_guard.remove_entry(&id);
state_guard.metrics.base.expirations += 1;
}
state_guard.update_metrics(10, 2); drop(state_guard);
if count > 0 {
debug!("Background cleanup removed {} expired entries", count);
}
}
})
}
pub fn stop_cleanup(&mut self) {
if let Some(task) = self.cleanup_task.take() {
task.abort();
}
}
}
impl<V: Clone + Send + Sync + 'static> Drop for AdaptiveCache<V> {
fn drop(&mut self) {
self.stop_cleanup();
}
}
impl From<AdaptiveCacheConfig> for super::super::CacheConfig {
fn from(config: AdaptiveCacheConfig) -> Self {
Self {
max_size: 1000,
default_ttl_secs: config.default_ttl.as_secs(),
cleanup_interval_secs: config.cleanup_interval_secs,
enable_background_cleanup: config.enable_background_cleanup,
}
}
}