use chrono::Utc;
use std::fs;
use std::path::PathBuf;
use tracing::{error, info, warn};
pub struct HistoryProtection {
backup_dir: PathBuf,
min_entries_threshold: usize,
}
impl HistoryProtection {
#[must_use]
pub fn new(backup_dir: PathBuf) -> Self {
if !backup_dir.exists() {
let _ = fs::create_dir_all(&backup_dir);
}
Self {
backup_dir,
min_entries_threshold: 5, }
}
pub fn backup_before_write(&self, current_data: &str, entry_count: usize) {
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let backup_file = self.backup_dir.join(format!(
"history_backup_{timestamp}_entries_{entry_count}.json"
));
if let Err(e) = fs::write(&backup_file, current_data) {
error!("Failed to create history backup: {}", e);
} else {
info!(
"Created history backup: {:?} with {} entries",
backup_file, entry_count
);
}
self.cleanup_old_backups();
}
pub fn validate_write(&self, old_entries: usize, new_entries: usize, new_data: &str) -> bool {
if new_entries == 0 && old_entries > 0 {
error!(
"BLOCKED: Attempted to write empty history (had {} entries)",
old_entries
);
return false;
}
if new_entries < old_entries / 2 && old_entries > self.min_entries_threshold {
error!(
"BLOCKED: History would shrink too much ({} -> {})",
old_entries, new_entries
);
return false;
}
if new_data.len() < 50 && old_entries > 0 {
error!("BLOCKED: History data too small ({} bytes)", new_data.len());
return false;
}
if new_entries < old_entries && (old_entries - new_entries) > 5 {
warn!(
"History reduction detected: {} -> {} entries",
old_entries, new_entries
);
}
true
}
fn cleanup_old_backups(&self) {
if let Ok(entries) = fs::read_dir(&self.backup_dir) {
let mut backups: Vec<_> = entries
.filter_map(std::result::Result::ok)
.filter(|e| {
e.file_name()
.to_string_lossy()
.starts_with("history_backup_")
})
.collect();
backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
if backups.len() > 10 {
for backup in backups.iter().take(backups.len() - 10) {
if let Err(e) = fs::remove_file(backup.path()) {
warn!("Failed to remove old backup: {}", e);
}
}
}
}
}
pub fn recover_from_backup(&self) -> Option<String> {
if let Ok(entries) = fs::read_dir(&self.backup_dir) {
let mut backups: Vec<_> = entries
.filter_map(std::result::Result::ok)
.filter(|e| {
e.file_name()
.to_string_lossy()
.starts_with("history_backup_")
})
.collect();
backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
if let Some(latest) = backups.last() {
match fs::read_to_string(latest.path()) {
Ok(content) => {
info!("Recovered history from backup: {:?}", latest.path());
return Some(content);
}
Err(e) => {
error!("Failed to read backup: {}", e);
}
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn test_never_write_empty() {
let temp_dir = TempDir::new().unwrap();
let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
assert!(!protection.validate_write(10, 0, ""));
assert!(protection.validate_write(0, 0, ""));
}
#[test]
fn test_prevent_massive_shrink() {
let temp_dir = TempDir::new().unwrap();
let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
assert!(!protection.validate_write(100, 40, "some data"));
let valid_data = r#"[{"command": "SELECT * FROM users", "timestamp": "2025-01-01"}]"#;
assert!(protection.validate_write(100, 80, valid_data));
}
}