sql_cli/
history_protection.rs

1use chrono::Utc;
2use std::fs;
3use std::path::PathBuf;
4use tracing::{error, info, warn};
5
6/// Protection layer for history to prevent data loss
7pub struct HistoryProtection {
8    backup_dir: PathBuf,
9    min_entries_threshold: usize,
10}
11
12impl HistoryProtection {
13    pub fn new(backup_dir: PathBuf) -> Self {
14        // Create backup directory if it doesn't exist
15        if !backup_dir.exists() {
16            let _ = fs::create_dir_all(&backup_dir);
17        }
18
19        Self {
20            backup_dir,
21            min_entries_threshold: 5, // Never allow history to shrink below 5 entries
22        }
23    }
24
25    /// Create a backup of current history before any write operation
26    pub fn backup_before_write(&self, current_data: &str, entry_count: usize) {
27        let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
28        let backup_file = self.backup_dir.join(format!(
29            "history_backup_{}_entries_{}.json",
30            timestamp, entry_count
31        ));
32
33        if let Err(e) = fs::write(&backup_file, current_data) {
34            error!("Failed to create history backup: {}", e);
35        } else {
36            info!(
37                "Created history backup: {:?} with {} entries",
38                backup_file, entry_count
39            );
40        }
41
42        // Keep only last 10 backups
43        self.cleanup_old_backups();
44    }
45
46    /// Validate that new history data is safe to write
47    pub fn validate_write(&self, old_entries: usize, new_entries: usize, new_data: &str) -> bool {
48        // Rule 1: Never write empty history
49        if new_entries == 0 && old_entries > 0 {
50            error!(
51                "BLOCKED: Attempted to write empty history (had {} entries)",
52                old_entries
53            );
54            return false;
55        }
56
57        // Rule 2: Never shrink by more than 50% (unless deduplication)
58        if new_entries < old_entries / 2 && old_entries > self.min_entries_threshold {
59            error!(
60                "BLOCKED: History would shrink too much ({} -> {})",
61                old_entries, new_entries
62            );
63            return false;
64        }
65
66        // Rule 3: Never write if data is suspiciously small
67        if new_data.len() < 50 && old_entries > 0 {
68            error!("BLOCKED: History data too small ({} bytes)", new_data.len());
69            return false;
70        }
71
72        // Rule 4: Warn if significant reduction
73        if new_entries < old_entries && (old_entries - new_entries) > 5 {
74            warn!(
75                "History reduction detected: {} -> {} entries",
76                old_entries, new_entries
77            );
78        }
79
80        true
81    }
82
83    /// Clean up old backups, keeping only the most recent ones
84    fn cleanup_old_backups(&self) {
85        if let Ok(entries) = fs::read_dir(&self.backup_dir) {
86            let mut backups: Vec<_> = entries
87                .filter_map(|e| e.ok())
88                .filter(|e| {
89                    e.file_name()
90                        .to_string_lossy()
91                        .starts_with("history_backup_")
92                })
93                .collect();
94
95            // Sort by modification time
96            backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
97
98            // Remove all but the last 10
99            if backups.len() > 10 {
100                for backup in backups.iter().take(backups.len() - 10) {
101                    if let Err(e) = fs::remove_file(backup.path()) {
102                        warn!("Failed to remove old backup: {}", e);
103                    }
104                }
105            }
106        }
107    }
108
109    /// Attempt to recover from the most recent backup
110    pub fn recover_from_backup(&self) -> Option<String> {
111        if let Ok(entries) = fs::read_dir(&self.backup_dir) {
112            let mut backups: Vec<_> = entries
113                .filter_map(|e| e.ok())
114                .filter(|e| {
115                    e.file_name()
116                        .to_string_lossy()
117                        .starts_with("history_backup_")
118                })
119                .collect();
120
121            // Sort by modification time (newest last)
122            backups.sort_by_key(|e| e.metadata().ok().and_then(|m| m.modified().ok()));
123
124            // Try to read the most recent backup
125            if let Some(latest) = backups.last() {
126                match fs::read_to_string(latest.path()) {
127                    Ok(content) => {
128                        info!("Recovered history from backup: {:?}", latest.path());
129                        return Some(content);
130                    }
131                    Err(e) => {
132                        error!("Failed to read backup: {}", e);
133                    }
134                }
135            }
136        }
137        None
138    }
139}
140
141#[cfg(test)]
142mod tests {
143    use super::*;
144    use tempfile::TempDir;
145
146    #[test]
147    fn test_never_write_empty() {
148        let temp_dir = TempDir::new().unwrap();
149        let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
150
151        // Should block empty write when we had entries
152        assert!(!protection.validate_write(10, 0, ""));
153
154        // Should allow if we never had entries
155        assert!(protection.validate_write(0, 0, ""));
156    }
157
158    #[test]
159    fn test_prevent_massive_shrink() {
160        let temp_dir = TempDir::new().unwrap();
161        let protection = HistoryProtection::new(temp_dir.path().to_path_buf());
162
163        // Should block if shrinking by more than 50%
164        assert!(!protection.validate_write(100, 40, "some data"));
165
166        // Should allow reasonable shrink (deduplication)
167        // Need at least 50 chars of data
168        let valid_data = r#"[{"command": "SELECT * FROM users", "timestamp": "2025-01-01"}]"#;
169        assert!(protection.validate_write(100, 80, valid_data));
170    }
171}