use crate::CacheStats;
use crate::multi_tier::CacheKey;
use std::collections::VecDeque;
use std::sync::Arc;
use tokio::sync::RwLock;
type AccessHistory = Arc<RwLock<VecDeque<(CacheKey, chrono::DateTime<chrono::Utc>)>>>;
type StatsHistory = Arc<RwLock<VecDeque<(chrono::DateTime<chrono::Utc>, CacheStats)>>>;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct TimeSeriesPoint {
pub timestamp: chrono::DateTime<chrono::Utc>,
pub value: f64,
}
#[derive(Debug, Clone, Default)]
pub struct CacheMetrics {
pub hit_rate: Vec<TimeSeriesPoint>,
pub miss_rate: Vec<TimeSeriesPoint>,
pub eviction_rate: Vec<TimeSeriesPoint>,
pub cache_size: Vec<TimeSeriesPoint>,
pub avg_latency: Vec<TimeSeriesPoint>,
}
impl CacheMetrics {
pub fn new() -> Self {
Self::default()
}
pub fn add_point(&mut self, metric_type: MetricType, value: f64) {
let point = TimeSeriesPoint {
timestamp: chrono::Utc::now(),
value,
};
match metric_type {
MetricType::HitRate => self.hit_rate.push(point),
MetricType::MissRate => self.miss_rate.push(point),
MetricType::EvictionRate => self.eviction_rate.push(point),
MetricType::CacheSize => self.cache_size.push(point),
MetricType::AvgLatency => self.avg_latency.push(point),
}
}
pub fn trim(&mut self, keep_last: usize) {
if self.hit_rate.len() > keep_last {
self.hit_rate.drain(0..self.hit_rate.len() - keep_last);
}
if self.miss_rate.len() > keep_last {
self.miss_rate.drain(0..self.miss_rate.len() - keep_last);
}
if self.eviction_rate.len() > keep_last {
self.eviction_rate
.drain(0..self.eviction_rate.len() - keep_last);
}
if self.cache_size.len() > keep_last {
self.cache_size.drain(0..self.cache_size.len() - keep_last);
}
if self.avg_latency.len() > keep_last {
self.avg_latency
.drain(0..self.avg_latency.len() - keep_last);
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum MetricType {
HitRate,
MissRate,
EvictionRate,
CacheSize,
AvgLatency,
}
#[derive(Debug, Clone)]
pub struct AccessPattern {
pub pattern_type: PatternType,
pub confidence: f64,
pub description: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PatternType {
Sequential,
Random,
TemporalLocality,
SpatialLocality,
Periodic,
}
impl std::fmt::Display for PatternType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PatternType::Sequential => write!(f, "Sequential"),
PatternType::Random => write!(f, "Random"),
PatternType::TemporalLocality => write!(f, "Temporal Locality"),
PatternType::SpatialLocality => write!(f, "Spatial Locality"),
PatternType::Periodic => write!(f, "Periodic"),
}
}
}
#[derive(Debug, Clone)]
pub struct CacheRecommendation {
pub recommendation_type: RecommendationType,
pub expected_improvement: f64,
pub rationale: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RecommendationType {
IncreaseSize,
DecreaseSize,
ChangeEvictionPolicy,
EnablePrefetching,
AdjustCompression,
EnableDistributed,
}
impl std::fmt::Display for RecommendationType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RecommendationType::IncreaseSize => write!(f, "Increase Cache Size"),
RecommendationType::DecreaseSize => write!(f, "Decrease Cache Size"),
RecommendationType::ChangeEvictionPolicy => write!(f, "Change Eviction Policy"),
RecommendationType::EnablePrefetching => write!(f, "Enable Prefetching"),
RecommendationType::AdjustCompression => write!(f, "Adjust Compression"),
RecommendationType::EnableDistributed => write!(f, "Enable Distributed Caching"),
}
}
}
#[derive(Debug, Clone)]
pub struct Anomaly {
pub anomaly_type: AnomalyType,
pub severity: f64,
pub timestamp: chrono::DateTime<chrono::Utc>,
pub description: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AnomalyType {
HitRateDrop,
EvictionSpike,
LatencySpike,
Thrashing,
}
impl std::fmt::Display for AnomalyType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AnomalyType::HitRateDrop => write!(f, "Hit Rate Drop"),
AnomalyType::EvictionSpike => write!(f, "Eviction Spike"),
AnomalyType::LatencySpike => write!(f, "Latency Spike"),
AnomalyType::Thrashing => write!(f, "Cache Thrashing"),
}
}
}
pub struct CacheAnalytics {
metrics: Arc<RwLock<CacheMetrics>>,
access_history: AccessHistory,
max_history: usize,
stats_history: StatsHistory,
max_stats_history: usize,
}
impl CacheAnalytics {
pub fn new() -> Self {
Self {
metrics: Arc::new(RwLock::new(CacheMetrics::new())),
access_history: Arc::new(RwLock::new(VecDeque::new())),
max_history: 10000,
stats_history: Arc::new(RwLock::new(VecDeque::new())),
max_stats_history: 1000,
}
}
pub async fn record_access(&self, key: CacheKey) {
let mut history = self.access_history.write().await;
if history.len() >= self.max_history {
history.pop_front();
}
history.push_back((key, chrono::Utc::now()));
}
pub async fn record_stats(&self, stats: CacheStats) {
let mut history = self.stats_history.write().await;
if history.len() >= self.max_stats_history {
history.pop_front();
}
history.push_back((chrono::Utc::now(), stats.clone()));
let mut metrics = self.metrics.write().await;
metrics.add_point(MetricType::HitRate, stats.hit_rate());
metrics.add_point(MetricType::MissRate, 100.0 - stats.hit_rate());
metrics.add_point(MetricType::CacheSize, stats.bytes_stored as f64);
metrics.trim(1000);
}
pub async fn analyze_patterns(&self) -> Vec<AccessPattern> {
let history = self.access_history.read().await;
let mut patterns = Vec::new();
if history.len() < 10 {
return patterns;
}
let sequential_confidence = self.detect_sequential(&history);
if sequential_confidence > 0.5 {
patterns.push(AccessPattern {
pattern_type: PatternType::Sequential,
confidence: sequential_confidence,
description: "Keys are accessed in sequential order".to_string(),
});
}
let temporal_confidence = self.detect_temporal_locality(&history);
if temporal_confidence > 0.5 {
patterns.push(AccessPattern {
pattern_type: PatternType::TemporalLocality,
confidence: temporal_confidence,
description: "Recently accessed keys are frequently reaccessed".to_string(),
});
}
patterns
}
fn detect_sequential(
&self,
history: &VecDeque<(CacheKey, chrono::DateTime<chrono::Utc>)>,
) -> f64 {
let mut sequential_count = 0;
let mut total_comparisons = 0;
for window in history.iter().collect::<Vec<_>>().windows(2) {
if let [a, b] = window {
total_comparisons += 1;
if a.0 < b.0 {
sequential_count += 1;
}
}
}
if total_comparisons > 0 {
sequential_count as f64 / total_comparisons as f64
} else {
0.0
}
}
fn detect_temporal_locality(
&self,
history: &VecDeque<(CacheKey, chrono::DateTime<chrono::Utc>)>,
) -> f64 {
let window_size = 10;
let time_threshold = chrono::Duration::seconds(60);
let mut reaccess_count = 0;
let mut total_count = 0;
for i in window_size..history.len() {
total_count += 1;
let (key, ts) = &history[i];
for (prev_key, prev_ts) in history.range(i.saturating_sub(window_size)..i) {
if key == prev_key && (*ts - *prev_ts) < time_threshold {
reaccess_count += 1;
break;
}
}
}
if total_count > 0 {
reaccess_count as f64 / total_count as f64
} else {
0.0
}
}
pub async fn generate_recommendations(&self) -> Vec<CacheRecommendation> {
let stats_history = self.stats_history.read().await;
let mut recommendations = Vec::new();
if stats_history.len() < 10 {
return recommendations;
}
let recent_stats: Vec<_> = stats_history
.iter()
.rev()
.take(10)
.map(|(_, s)| s)
.collect();
let avg_hit_rate: f64 =
recent_stats.iter().map(|s| s.hit_rate()).sum::<f64>() / recent_stats.len() as f64;
if avg_hit_rate < 50.0 {
recommendations.push(CacheRecommendation {
recommendation_type: RecommendationType::IncreaseSize,
expected_improvement: 20.0,
rationale: format!(
"Hit rate is low ({:.1}%). Increasing cache size may improve performance.",
avg_hit_rate
),
});
}
let avg_evictions: f64 = recent_stats.iter().map(|s| s.evictions as f64).sum::<f64>()
/ recent_stats.len() as f64;
if avg_evictions > 10.0 {
recommendations.push(CacheRecommendation {
recommendation_type: RecommendationType::ChangeEvictionPolicy,
expected_improvement: 15.0,
rationale: format!(
"High eviction rate ({:.1} per snapshot). Consider ARC or LFU policy.",
avg_evictions
),
});
}
recommendations
}
pub async fn detect_anomalies(&self) -> Vec<Anomaly> {
let stats_history = self.stats_history.read().await;
let mut anomalies = Vec::new();
if stats_history.len() < 20 {
return anomalies;
}
let baseline_stats: Vec<_> = stats_history
.iter()
.rev()
.skip(5)
.take(10)
.map(|(_, s)| s)
.collect();
let baseline_hit_rate: f64 =
baseline_stats.iter().map(|s| s.hit_rate()).sum::<f64>() / baseline_stats.len() as f64;
let recent_stats: Vec<_> = stats_history.iter().rev().take(5).collect();
for (ts, stats) in recent_stats {
let hit_rate = stats.hit_rate();
if hit_rate < baseline_hit_rate * 0.7 {
anomalies.push(Anomaly {
anomaly_type: AnomalyType::HitRateDrop,
severity: (baseline_hit_rate - hit_rate) / baseline_hit_rate,
timestamp: *ts,
description: format!(
"Hit rate dropped from {:.1}% to {:.1}%",
baseline_hit_rate, hit_rate
),
});
}
if stats.evictions > 100 {
anomalies.push(Anomaly {
anomaly_type: AnomalyType::EvictionSpike,
severity: 0.8,
timestamp: *ts,
description: format!("Eviction spike: {} evictions", stats.evictions),
});
}
}
anomalies
}
pub async fn metrics(&self) -> CacheMetrics {
self.metrics.read().await.clone()
}
pub async fn clear(&self) {
self.access_history.write().await.clear();
self.stats_history.write().await.clear();
*self.metrics.write().await = CacheMetrics::new();
}
}
impl Default for CacheAnalytics {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_cache_analytics() {
let analytics = CacheAnalytics::new();
for i in 0..20 {
analytics.record_access(format!("key{}", i)).await;
}
let stats = CacheStats {
hits: 80,
misses: 20,
evictions: 5,
bytes_stored: 1024 * 1024,
item_count: 100,
};
analytics.record_stats(stats).await;
let metrics = analytics.metrics().await;
assert!(!metrics.hit_rate.is_empty());
}
#[tokio::test]
async fn test_pattern_analysis() {
let analytics = CacheAnalytics::new();
for i in 0..50 {
analytics.record_access(format!("key{:03}", i)).await;
}
let patterns = analytics.analyze_patterns().await;
assert!(!patterns.is_empty());
}
#[tokio::test]
async fn test_recommendations() {
let analytics = CacheAnalytics::new();
for _ in 0..15 {
let stats = CacheStats {
hits: 30,
misses: 70,
evictions: 15,
bytes_stored: 1024 * 1024,
item_count: 100,
};
analytics.record_stats(stats).await;
}
let recommendations = analytics.generate_recommendations().await;
assert!(!recommendations.is_empty());
}
#[tokio::test]
async fn test_anomaly_detection() {
let analytics = CacheAnalytics::new();
for _ in 0..15 {
let stats = CacheStats {
hits: 80,
misses: 20,
evictions: 2,
bytes_stored: 1024 * 1024,
item_count: 100,
};
analytics.record_stats(stats).await;
}
for _ in 0..5 {
let stats = CacheStats {
hits: 30,
misses: 70,
evictions: 150,
bytes_stored: 1024 * 1024,
item_count: 100,
};
analytics.record_stats(stats).await;
}
let anomalies = analytics.detect_anomalies().await;
assert!(!anomalies.is_empty());
}
}