use crate::config::RetentionConfig;
use crate::database::AnalyticsDatabase;
use crate::error::Result;
use std::sync::Arc;
use tokio::time::{interval, Duration};
use tracing::{error, info};
pub struct RetentionService {
db: AnalyticsDatabase,
config: RetentionConfig,
}
impl RetentionService {
#[must_use]
pub const fn new(db: AnalyticsDatabase, config: RetentionConfig) -> Self {
Self { db, config }
}
pub async fn start(self: Arc<Self>) {
info!("Starting data retention service");
let interval_seconds = u64::from(self.config.cleanup_interval_hours) * 3600;
let mut interval = interval(Duration::from_secs(interval_seconds));
loop {
interval.tick().await;
if let Err(e) = self.run_cleanup().await {
error!("Error running data cleanup: {e}");
}
}
}
async fn run_cleanup(&self) -> Result<()> {
info!("Running analytics data cleanup");
let deleted = self.db.cleanup_minute_aggregates(self.config.minute_aggregates_days).await?;
info!("Deleted {deleted} old minute aggregates");
let deleted = self.db.cleanup_hour_aggregates(self.config.hour_aggregates_days).await?;
info!("Deleted {deleted} old hour aggregates");
let deleted = self.db.cleanup_error_events(self.config.error_events_days).await?;
info!("Deleted {deleted} old error events");
self.db.vacuum().await?;
info!("Data cleanup completed successfully");
Ok(())
}
pub async fn trigger_cleanup(&self) -> Result<()> {
self.run_cleanup().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::Path;
#[tokio::test]
async fn test_retention_service_creation() {
let db = AnalyticsDatabase::new(Path::new(":memory:")).await.unwrap();
db.run_migrations().await.unwrap();
let config = RetentionConfig::default();
let service = RetentionService::new(db, config);
service.trigger_cleanup().await.unwrap();
}
}