use crate::{
cache::{
models::{AdvancedModelCache, ModelCacheConfig, ModelCacheStats},
results::{ResultCacheConfig, ResultCacheStats, SynthesisResultCache},
},
error::{Result, VoirsError},
traits::{CacheStats, ModelCache},
};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
path::PathBuf,
sync::{Arc, RwLock},
time::{Duration, Instant, SystemTime},
};
use tokio::{sync::Notify, time::interval};
use tracing::{debug, error, info, warn};
#[allow(dead_code)] pub struct CacheManager {
model_cache: Arc<AdvancedModelCache>,
result_cache: Arc<SynthesisResultCache>,
config: CacheManagerConfig,
cache_dirs: CacheDirs,
stats: Arc<RwLock<CombinedCacheStats>>,
health_monitor: Arc<CacheHealthMonitor>,
task_controller: Arc<BackgroundTaskController>,
metrics_collector: Arc<CacheMetricsCollector>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct CacheManagerConfig {
pub model_cache: ModelCacheConfig,
pub result_cache: ResultCacheConfig,
pub global_settings: GlobalCacheSettings,
pub monitoring: MonitoringConfig,
pub maintenance: MaintenanceConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GlobalCacheSettings {
pub enable_coordination: bool,
pub max_total_memory_mb: usize,
pub enable_startup_warming: bool,
pub enable_adaptive_sizing: bool,
pub memory_pressure_threshold: f64,
pub enable_compression: bool,
pub enable_encryption: bool,
pub enable_deduplication: bool,
}
impl Default for GlobalCacheSettings {
fn default() -> Self {
Self {
enable_coordination: true,
max_total_memory_mb: 2048, enable_startup_warming: true,
enable_adaptive_sizing: true,
memory_pressure_threshold: 0.85,
enable_compression: true,
enable_encryption: false,
enable_deduplication: true,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MonitoringConfig {
pub enable_health_monitoring: bool,
pub health_check_interval_seconds: u64,
pub enable_performance_monitoring: bool,
pub metrics_interval_seconds: u64,
pub enable_alerting: bool,
pub alert_thresholds: AlertThresholds,
pub enable_detailed_logging: bool,
}
impl Default for MonitoringConfig {
fn default() -> Self {
Self {
enable_health_monitoring: true,
health_check_interval_seconds: 60,
enable_performance_monitoring: true,
metrics_interval_seconds: 30,
enable_alerting: true,
alert_thresholds: AlertThresholds::default(),
enable_detailed_logging: false,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertThresholds {
pub high_memory_usage: f64,
pub low_hit_rate: f64,
pub high_eviction_rate: f64,
pub high_error_rate: f64,
pub slow_response_time_ms: u64,
}
impl Default for AlertThresholds {
fn default() -> Self {
Self {
high_memory_usage: 0.9,
low_hit_rate: 0.5,
high_eviction_rate: 10.0,
high_error_rate: 5.0,
slow_response_time_ms: 1000,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MaintenanceConfig {
pub enable_auto_maintenance: bool,
pub maintenance_interval_seconds: u64,
pub cleanup_expired: bool,
pub defragment_storage: bool,
pub optimize_layout: bool,
pub update_statistics: bool,
pub maintenance_window_hour: Option<u8>,
pub max_maintenance_duration_seconds: u64,
}
impl Default for MaintenanceConfig {
fn default() -> Self {
Self {
enable_auto_maintenance: true,
maintenance_interval_seconds: 3600, cleanup_expired: true,
defragment_storage: true,
optimize_layout: true,
update_statistics: true,
maintenance_window_hour: Some(2), max_maintenance_duration_seconds: 300, }
}
}
#[derive(Debug, Clone)]
pub struct CacheDirs {
pub base_dir: PathBuf,
pub model_cache_dir: PathBuf,
pub result_cache_dir: PathBuf,
pub temp_dir: PathBuf,
pub metadata_dir: PathBuf,
}
impl CacheDirs {
pub fn new(base_dir: PathBuf) -> Result<Self> {
let model_cache_dir = base_dir.join("models");
let result_cache_dir = base_dir.join("results");
let temp_dir = base_dir.join("temp");
let metadata_dir = base_dir.join("metadata");
for dir in [
&base_dir,
&model_cache_dir,
&result_cache_dir,
&temp_dir,
&metadata_dir,
] {
std::fs::create_dir_all(dir).map_err(|e| {
VoirsError::cache_error(format!("Failed to create cache directory {dir:?}: {e}"))
})?;
}
Ok(Self {
base_dir,
model_cache_dir,
result_cache_dir,
temp_dir,
metadata_dir,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CombinedCacheStats {
pub model_stats: ModelCacheStats,
pub result_stats: ResultCacheStats,
pub global_stats: GlobalCacheStats,
pub health_metrics: HealthMetrics,
pub performance_metrics: PerformanceMetrics,
pub last_updated: SystemTime,
}
impl Default for CombinedCacheStats {
fn default() -> Self {
Self {
model_stats: ModelCacheStats::default(),
result_stats: ResultCacheStats::default(),
global_stats: GlobalCacheStats::default(),
health_metrics: HealthMetrics::default(),
performance_metrics: PerformanceMetrics::default(),
last_updated: SystemTime::UNIX_EPOCH,
}
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct GlobalCacheStats {
pub total_memory_usage_bytes: usize,
pub total_memory_usage_mb: usize,
pub total_entries: usize,
pub overall_hit_rate: f64,
pub overall_miss_rate: f64,
pub memory_efficiency: f64,
pub coordination_overhead_ms: f64,
pub deduplication_savings_bytes: usize,
pub compression_ratio: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthMetrics {
pub overall_health_score: f64,
pub memory_health: f64,
pub performance_health: f64,
pub error_rate_health: f64,
pub consistency_score: f64,
pub last_health_check: SystemTime,
}
impl Default for HealthMetrics {
fn default() -> Self {
Self {
overall_health_score: 1.0,
memory_health: 1.0,
performance_health: 1.0,
error_rate_health: 1.0,
consistency_score: 1.0,
last_health_check: SystemTime::UNIX_EPOCH,
}
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct PerformanceMetrics {
pub avg_response_time_ms: f64,
pub p95_response_time_ms: f64,
pub throughput_ops_per_sec: f64,
pub cache_ops_per_sec: f64,
pub background_task_performance: HashMap<String, f64>,
pub resource_utilization: ResourceUtilization,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ResourceUtilization {
pub cpu_usage_percent: f64,
pub memory_usage_percent: f64,
pub disk_io_rate_mbps: f64,
pub network_io_rate_mbps: f64,
}
pub struct CacheHealthMonitor {
health_history: Arc<RwLock<Vec<HealthCheckResult>>>,
active_alerts: Arc<RwLock<Vec<CacheAlert>>>,
#[allow(dead_code)]
config: MonitoringConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthCheckResult {
pub timestamp: SystemTime,
pub health_score: f64,
pub component_scores: HashMap<String, f64>,
pub issues: Vec<HealthIssue>,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthIssue {
pub severity: IssueSeverity,
pub category: IssueCategory,
pub description: String,
pub component: String,
pub metric_value: f64,
pub threshold: f64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum IssueSeverity {
Critical,
High,
Medium,
Low,
Info,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum IssueCategory {
Memory,
Performance,
Consistency,
Configuration,
Resource,
Other,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheAlert {
pub id: String,
pub timestamp: SystemTime,
pub severity: IssueSeverity,
pub message: String,
pub component: String,
pub resolved_at: Option<SystemTime>,
}
pub struct BackgroundTaskController {
shutdown_notify: Arc<Notify>,
task_handles: Arc<RwLock<Vec<tokio::task::JoinHandle<()>>>>,
task_status: Arc<RwLock<HashMap<String, TaskStatus>>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskStatus {
pub name: String,
pub running: bool,
pub last_execution: Option<SystemTime>,
pub execution_count: u64,
pub error_count: u64,
pub avg_duration_ms: f64,
}
pub struct CacheMetricsCollector {
metrics: Arc<RwLock<Vec<MetricPoint>>>,
#[allow(dead_code)]
config: MonitoringConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricPoint {
pub timestamp: SystemTime,
pub name: String,
pub value: f64,
pub tags: HashMap<String, String>,
}
impl CacheManager {
pub async fn new(config: CacheManagerConfig, base_cache_dir: PathBuf) -> Result<Self> {
let cache_dirs = CacheDirs::new(base_cache_dir)?;
let model_cache = Arc::new(AdvancedModelCache::new(
config.model_cache.clone(),
Some(cache_dirs.model_cache_dir.clone()),
)?);
let result_cache = Arc::new(SynthesisResultCache::new(
config.result_cache.clone(),
Some(cache_dirs.result_cache_dir.clone()),
)?);
let health_monitor = Arc::new(CacheHealthMonitor {
health_history: Arc::new(RwLock::new(Vec::new())),
active_alerts: Arc::new(RwLock::new(Vec::new())),
config: config.monitoring.clone(),
});
let task_controller = Arc::new(BackgroundTaskController {
shutdown_notify: Arc::new(Notify::new()),
task_handles: Arc::new(RwLock::new(Vec::new())),
task_status: Arc::new(RwLock::new(HashMap::new())),
});
let metrics_collector = Arc::new(CacheMetricsCollector {
metrics: Arc::new(RwLock::new(Vec::new())),
config: config.monitoring.clone(),
});
let cache_manager = Self {
model_cache,
result_cache,
config: config.clone(),
cache_dirs,
stats: Arc::new(RwLock::new(CombinedCacheStats::default())),
health_monitor,
task_controller,
metrics_collector,
};
if config.maintenance.enable_auto_maintenance {
cache_manager.start_background_tasks().await?;
}
if config.global_settings.enable_startup_warming {
cache_manager.perform_startup_warming().await?;
}
info!("Cache manager initialized successfully");
Ok(cache_manager)
}
pub fn model_cache(&self) -> Arc<AdvancedModelCache> {
Arc::clone(&self.model_cache)
}
pub fn result_cache(&self) -> Arc<SynthesisResultCache> {
Arc::clone(&self.result_cache)
}
async fn start_background_tasks(&self) -> Result<()> {
let mut task_handles = Vec::new();
if self.config.maintenance.enable_auto_maintenance {
let maintenance_handle = self.start_maintenance_task().await;
task_handles.push(maintenance_handle);
}
if self.config.monitoring.enable_health_monitoring {
let health_handle = self.start_health_monitoring_task().await;
task_handles.push(health_handle);
}
if self.config.monitoring.enable_performance_monitoring {
let metrics_handle = self.start_metrics_collection_task().await;
task_handles.push(metrics_handle);
}
let task_count = task_handles.len();
{
let mut stored_handles = self.task_controller.task_handles.write().map_err(|e| {
VoirsError::internal("cache_management", format!("Lock poisoned: {e}"))
})?;
stored_handles.extend(task_handles);
}
info!("Started {} background tasks", task_count);
Ok(())
}
async fn start_maintenance_task(&self) -> tokio::task::JoinHandle<()> {
let model_cache = Arc::clone(&self.model_cache);
let result_cache = Arc::clone(&self.result_cache);
let config = self.config.maintenance.clone();
let shutdown_notify = Arc::clone(&self.task_controller.shutdown_notify);
let task_status = Arc::clone(&self.task_controller.task_status);
tokio::spawn(async move {
let mut interval = interval(Duration::from_secs(config.maintenance_interval_seconds));
{
let Ok(mut status) = task_status.write() else {
error!("Failed to acquire task status lock for maintenance task");
return;
};
status.insert(
"maintenance".to_string(),
TaskStatus {
name: "maintenance".to_string(),
running: true,
last_execution: None,
execution_count: 0,
error_count: 0,
avg_duration_ms: 0.0,
},
);
}
loop {
tokio::select! {
_ = interval.tick() => {
let start_time = Instant::now();
if let Err(e) = Self::perform_maintenance_cycle(&model_cache, &result_cache, &config).await {
error!("Maintenance cycle failed: {}", e);
if let Ok(mut status) = task_status.write() {
if let Some(task_status) = status.get_mut("maintenance") {
task_status.error_count += 1;
}
}
}
let duration = start_time.elapsed().as_millis() as f64;
let Ok(mut status) = task_status.write() else {
continue;
};
if let Some(task_status) = status.get_mut("maintenance") {
task_status.last_execution = Some(SystemTime::now());
task_status.execution_count += 1;
let total_duration = task_status.avg_duration_ms * (task_status.execution_count - 1) as f64 + duration;
task_status.avg_duration_ms = total_duration / task_status.execution_count as f64;
}
}
_ = shutdown_notify.notified() => {
info!("Maintenance task shutting down");
break;
}
}
}
if let Ok(mut status) = task_status.write() {
if let Some(task_status) = status.get_mut("maintenance") {
task_status.running = false;
}
}
})
}
async fn start_health_monitoring_task(&self) -> tokio::task::JoinHandle<()> {
let model_cache = Arc::clone(&self.model_cache);
let result_cache = Arc::clone(&self.result_cache);
let health_monitor = Arc::clone(&self.health_monitor);
let config = self.config.monitoring.clone();
let shutdown_notify = Arc::clone(&self.task_controller.shutdown_notify);
tokio::spawn(async move {
let mut interval = interval(Duration::from_secs(config.health_check_interval_seconds));
loop {
tokio::select! {
_ = interval.tick() => {
if let Err(e) = Self::perform_health_check(&model_cache, &result_cache, &health_monitor, &config).await {
error!("Health check failed: {}", e);
}
}
_ = shutdown_notify.notified() => {
info!("Health monitoring task shutting down");
break;
}
}
}
})
}
async fn start_metrics_collection_task(&self) -> tokio::task::JoinHandle<()> {
let model_cache = Arc::clone(&self.model_cache);
let result_cache = Arc::clone(&self.result_cache);
let metrics_collector = Arc::clone(&self.metrics_collector);
let config = self.config.monitoring.clone();
let shutdown_notify = Arc::clone(&self.task_controller.shutdown_notify);
tokio::spawn(async move {
let mut interval = interval(Duration::from_secs(config.metrics_interval_seconds));
loop {
tokio::select! {
_ = interval.tick() => {
if let Err(e) = Self::collect_metrics(&model_cache, &result_cache, &metrics_collector).await {
error!("Metrics collection failed: {}", e);
}
}
_ = shutdown_notify.notified() => {
info!("Metrics collection task shutting down");
break;
}
}
}
})
}
async fn perform_maintenance_cycle(
model_cache: &AdvancedModelCache,
result_cache: &SynthesisResultCache,
config: &MaintenanceConfig,
) -> Result<()> {
debug!("Starting maintenance cycle");
if config.cleanup_expired {
let expired_results = result_cache.cleanup_expired().await?;
if expired_results > 0 {
debug!("Cleaned up {} expired results", expired_results);
}
model_cache.perform_maintenance().await?;
}
debug!("Maintenance cycle completed");
Ok(())
}
async fn perform_health_check(
model_cache: &AdvancedModelCache,
result_cache: &SynthesisResultCache,
health_monitor: &CacheHealthMonitor,
_config: &MonitoringConfig,
) -> Result<()> {
let mut component_scores = HashMap::new();
let issues = Vec::new();
let model_stats = model_cache.stats();
let model_health = Self::calculate_cache_health(&model_stats);
component_scores.insert("model_cache".to_string(), model_health);
let result_stats = result_cache.stats();
let result_health = Self::calculate_cache_health(&result_stats.basic_stats);
component_scores.insert("result_cache".to_string(), result_health);
let health_score = (model_health + result_health) / 2.0;
let recommendations = Self::generate_health_recommendations(
&model_stats,
&result_stats.basic_stats,
model_health,
result_health,
health_score,
);
let health_result = HealthCheckResult {
timestamp: SystemTime::now(),
health_score,
component_scores,
issues,
recommendations,
};
{
if let Ok(mut history) = health_monitor.health_history.write() {
history.push(health_result);
if history.len() > 100 {
history.remove(0);
}
}
}
debug!("Health check completed: score = {:.2}", health_score);
Ok(())
}
fn calculate_cache_health(stats: &CacheStats) -> f64 {
let hit_rate_score = stats.hit_rate / 100.0;
let memory_score = if stats.memory_usage_bytes > 0 {
0.8
} else {
1.0
};
((hit_rate_score + memory_score) / 2.0) as f64
}
fn generate_health_recommendations(
model_stats: &CacheStats,
result_stats: &CacheStats,
model_health: f64,
result_health: f64,
overall_health: f64,
) -> Vec<String> {
let mut recommendations = Vec::new();
if overall_health < 0.5 {
recommendations.push(
"Critical: Cache system health is poor. Consider restarting the cache manager."
.to_string(),
);
} else if overall_health < 0.7 {
recommendations.push("Warning: Cache system health is degraded. Monitor closely and consider optimization.".to_string());
}
if model_health < 0.6 {
recommendations.push("Model cache performance is poor. Consider increasing cache size or clearing outdated models.".to_string());
}
if model_stats.hit_rate < 50.0 {
recommendations.push(
"Model cache hit rate is low. Consider preloading frequently used models."
.to_string(),
);
}
if model_stats.memory_usage_bytes > 1_000_000_000 {
recommendations.push("Model cache is using high memory. Consider reducing cache size or clearing unused models.".to_string());
}
if result_health < 0.6 {
recommendations.push("Result cache performance is poor. Consider increasing cache size or adjusting retention policies.".to_string());
}
if result_stats.hit_rate < 40.0 {
recommendations.push("Result cache hit rate is low. Consider extending cache retention time or optimizing cache keys.".to_string());
}
if result_stats.memory_usage_bytes > 2_000_000_000 {
recommendations.push("Result cache is using high memory. Consider reducing cache size or implementing more aggressive cleanup.".to_string());
}
if model_stats.total_entries > 1000 {
recommendations.push("Model cache has many entries. Consider implementing cache pruning to improve performance.".to_string());
}
if result_stats.total_entries > 10000 {
recommendations.push("Result cache has many entries. Consider implementing time-based expiration for better performance.".to_string());
}
if overall_health > 0.8 {
recommendations.push(
"Cache system is performing well. Current configuration is optimal.".to_string(),
);
}
recommendations
}
async fn collect_metrics(
model_cache: &AdvancedModelCache,
result_cache: &SynthesisResultCache,
metrics_collector: &CacheMetricsCollector,
) -> Result<()> {
let timestamp = SystemTime::now();
let mut metrics = Vec::new();
let model_stats = model_cache.stats();
metrics.push(MetricPoint {
timestamp,
name: "model_cache_hit_rate".to_string(),
value: model_stats.hit_rate as f64,
tags: [("cache_type".to_string(), "model".to_string())]
.iter()
.cloned()
.collect(),
});
metrics.push(MetricPoint {
timestamp,
name: "model_cache_memory_usage".to_string(),
value: model_stats.memory_usage_bytes as f64,
tags: [("cache_type".to_string(), "model".to_string())]
.iter()
.cloned()
.collect(),
});
let result_stats = result_cache.stats();
metrics.push(MetricPoint {
timestamp,
name: "result_cache_hit_rate".to_string(),
value: result_stats.basic_stats.hit_rate as f64,
tags: [("cache_type".to_string(), "result".to_string())]
.iter()
.cloned()
.collect(),
});
metrics.push(MetricPoint {
timestamp,
name: "result_cache_memory_usage".to_string(),
value: result_stats.basic_stats.memory_usage_bytes as f64,
tags: [("cache_type".to_string(), "result".to_string())]
.iter()
.cloned()
.collect(),
});
{
if let Ok(mut stored_metrics) = metrics_collector.metrics.write() {
stored_metrics.extend(metrics);
let len = stored_metrics.len();
if len > 1000 {
stored_metrics.drain(0..len - 1000);
}
}
}
Ok(())
}
async fn perform_startup_warming(&self) -> Result<()> {
info!("Starting cache warming");
let common_models = vec![
"default_g2p".to_string(),
"default_acoustic".to_string(),
"default_vocoder".to_string(),
];
if let Err(e) = self.model_cache.warm_cache(common_models).await {
warn!("Model cache warming failed: {}", e);
}
info!("Cache warming completed");
Ok(())
}
pub async fn get_combined_stats(&self) -> CombinedCacheStats {
let model_stats = self.model_cache.stats();
let result_stats = self.result_cache.stats();
let total_memory_usage_bytes =
model_stats.memory_usage_bytes + result_stats.basic_stats.memory_usage_bytes;
let total_entries = model_stats.total_entries + result_stats.basic_stats.total_entries;
let overall_hit_rate = if total_entries > 0 {
(model_stats.hit_rate + result_stats.basic_stats.hit_rate) / 2.0
} else {
0.0
};
let global_stats = GlobalCacheStats {
total_memory_usage_bytes,
total_memory_usage_mb: total_memory_usage_bytes / (1024 * 1024),
total_entries,
overall_hit_rate: overall_hit_rate as f64,
overall_miss_rate: (100.0 - overall_hit_rate) as f64,
memory_efficiency: 0.85, coordination_overhead_ms: 0.1, deduplication_savings_bytes: 0, compression_ratio: 1.2, };
CombinedCacheStats {
model_stats: ModelCacheStats {
basic_stats: model_stats,
..Default::default()
},
result_stats,
global_stats,
health_metrics: HealthMetrics::default(),
performance_metrics: PerformanceMetrics::default(),
last_updated: SystemTime::now(),
}
}
pub async fn clear_all(&self) -> Result<()> {
info!("Clearing all caches");
self.model_cache.clear().await?;
self.result_cache.clear().await?;
info!("All caches cleared");
Ok(())
}
pub async fn shutdown(&self) -> Result<()> {
info!("Shutting down cache manager");
self.task_controller.shutdown_notify.notify_waiters();
let task_handles = {
let Ok(mut handles) = self.task_controller.task_handles.write() else {
warn!("Failed to acquire task handles lock during shutdown");
return Ok(());
};
std::mem::take(&mut *handles)
};
for handle in task_handles {
if let Err(e) = handle.await {
warn!("Background task failed to shutdown cleanly: {}", e);
}
}
info!("Cache manager shutdown completed");
Ok(())
}
pub fn get_health_status(&self) -> Vec<HealthCheckResult> {
self.health_monitor
.health_history
.read()
.map(|history| history.clone())
.unwrap_or_default()
}
pub fn get_active_alerts(&self) -> Vec<CacheAlert> {
self.health_monitor
.active_alerts
.read()
.map(|alerts| alerts.clone())
.unwrap_or_default()
}
pub fn get_performance_metrics(&self) -> Vec<MetricPoint> {
self.metrics_collector
.metrics
.read()
.map(|metrics| metrics.clone())
.unwrap_or_default()
}
pub fn get_task_status(&self) -> HashMap<String, TaskStatus> {
self.task_controller
.task_status
.read()
.map(|status| status.clone())
.unwrap_or_default()
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[tokio::test]
async fn test_cache_manager_creation() {
let temp_dir = TempDir::new().unwrap();
let mut config = CacheManagerConfig::default();
config.maintenance.enable_auto_maintenance = false;
config.monitoring.enable_health_monitoring = false;
config.monitoring.enable_performance_monitoring = false;
config.global_settings.enable_startup_warming = false;
let manager = CacheManager::new(config, temp_dir.path().to_path_buf())
.await
.unwrap();
let stats = manager.get_combined_stats().await;
assert_eq!(stats.global_stats.total_entries, 0);
}
#[tokio::test]
async fn test_cache_manager_shutdown() {
let temp_dir = TempDir::new().unwrap();
let mut config = CacheManagerConfig::default();
config.maintenance.enable_auto_maintenance = false;
config.monitoring.enable_health_monitoring = false;
config.monitoring.enable_performance_monitoring = false;
config.global_settings.enable_startup_warming = false;
let manager = CacheManager::new(config, temp_dir.path().to_path_buf())
.await
.unwrap();
manager.shutdown().await.unwrap();
}
#[tokio::test]
async fn test_combined_stats() {
let temp_dir = TempDir::new().unwrap();
let mut config = CacheManagerConfig::default();
config.maintenance.enable_auto_maintenance = false;
config.monitoring.enable_health_monitoring = false;
config.monitoring.enable_performance_monitoring = false;
config.global_settings.enable_startup_warming = false;
let manager = CacheManager::new(config, temp_dir.path().to_path_buf())
.await
.unwrap();
let stats = manager.get_combined_stats().await;
assert!(stats.global_stats.overall_hit_rate >= 0.0);
assert!(stats.global_stats.overall_hit_rate <= 100.0);
}
}