sql_cli/
history_protection.rs1use chrono::Utc;
2use std::fs;
3use std::path::PathBuf;
4use tracing::{error, info, warn};
5
6pub struct HistoryProtection {
8 backup_dir: PathBuf,
9 min_entries_threshold: usize,
10}
11
12impl HistoryProtection {
13 pub fn new(backup_dir: PathBuf) -> Self {
14 if !backup_dir.exists() {
16 let _ = fs::create_dir_all(&backup_dir);
17 }
18
19 Self {
20 backup_dir,
21 min_entries_threshold: 5, }
23 }
24
25 pub fn backup_before_write(&self, current_data: &str, entry_count: usize) {
27 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
28 let backup_file = self.backup_dir.join(format!(
29 "history_backup_{}_entries_{}.json",
30 timestamp, entry_count
31 ));
32
33 if let Err(e) = fs::write(&backup_file, current_data) {
34 error!("Failed to create history backup: {}", e);
35 } else {
36 info!(
37 "Created history backup: {:?} with {} entries",
38 backup_file, entry_count
39 );
40 }
41
42 self.cleanup_old_backups();
44 }
45
46 pub fn validate_write(&self, old_entries: usize, new_entries: usize, new_data: &str) -> bool {
48 if new_entries == 0 && old_entries > 0 {
50 error!(
51 "BLOCKED: Attempted to write empty history (had {} entries)",
52 old_entries
53 );
54 return false;
55 }
56
57 if new_entries < old_entries / 2 && old_entries > self.min_entries_threshold {
59 error!(
60 "BLOCKED: History would shrink too much ({} -> {})",
61 old_entries, new_entries
62 );
63 return false;
64 }
65
66 if new_data.len() < 50 && old_entries > 0 {
68 error!("BLOCKED: History data too small ({} bytes)", new_data.len());
69 return false;
70 }
71
72 if new_entries < old_entries && (old_entries - new_entries) > 5 {
74 warn!(
75 "History reduction detected: {} -> {} entries",
76 old_entries, new_entries
77 );
78 }
79
80 true
81 }
82
83 fn cleanup_old_backups(&self) {
85 if let Ok(entries) = fs::read_dir(&self.backup_dir) {
86 let mut backups: Vec<_> = entries
87 .filter_map(|e| e.ok())
88 .filter(|e| {
89 e.file_name()
90 .to_string_lossy()
91 .starts_with("history_backup_")
92 })
93 .collect();
94
95 backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
97
98 if backups.len() > 10 {
100 for backup in backups.iter().take(backups.len() - 10) {
101 if let Err(e) = fs::remove_file(backup.path()) {
102 warn!("Failed to remove old backup: {}", e);
103 }
104 }
105 }
106 }
107 }
108
109 pub fn recover_from_backup(&self) -> Option<String> {
111 if let Ok(entries) = fs::read_dir(&self.backup_dir) {
112 let mut backups: Vec<_> = entries
113 .filter_map(|e| e.ok())
114 .filter(|e| {
115 e.file_name()
116 .to_string_lossy()
117 .starts_with("history_backup_")
118 })
119 .collect();
120
121 backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
123
124 if let Some(latest) = backups.last() {
126 match fs::read_to_string(latest.path()) {
127 Ok(content) => {
128 info!("Recovered history from backup: {:?}", latest.path());
129 return Some(content);
130 }
131 Err(e) => {
132 error!("Failed to read backup: {}", e);
133 }
134 }
135 }
136 }
137 None
138 }
139}
140
141#[cfg(test)]
142mod tests {
143 use super::*;
144 use tempfile::TempDir;
145
146 #[test]
147 fn test_never_write_empty() {
148 let temp_dir = TempDir::new().unwrap();
149 let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
150
151 assert!(!protection.validate_write(10, 0, ""));
153
154 assert!(protection.validate_write(0, 0, ""));
156 }
157
158 #[test]
159 fn test_prevent_massive_shrink() {
160 let temp_dir = TempDir::new().unwrap();
161 let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
162
163 assert!(!protection.validate_write(100, 40, "some data"));
165
166 let valid_data = r#"[{"command": "SELECT * FROM users", "timestamp": "2025-01-01"}]"#;
169 assert!(protection.validate_write(100, 80, valid_data));
170 }
171}