Skip to main content

amaters_core/storage/
backup.rs

1//! Backup and restore functionality for the storage engine
2//!
3//! Provides full backup creation, restoration, integrity verification,
4//! and backup lifecycle management. Backups are stored as direct file
5//! copies (no compression/archiving) with CRC32 checksums for integrity.
6
7use crate::error::{AmateRSError, ErrorContext, Result};
8use chrono::{DateTime, Utc};
9use serde::{Deserialize, Serialize};
10use std::fs;
11use std::io::Read;
12use std::path::{Path, PathBuf};
13
14/// Backup metadata persisted alongside backup data
15#[derive(Debug, Clone, Serialize, Deserialize)]
16pub struct BackupMetadata {
17    /// Unique identifier for this backup
18    pub backup_id: String,
19    /// Timestamp when the backup was created
20    pub created_at: DateTime<Utc>,
21    /// Source directory that was backed up
22    pub source_dir: PathBuf,
23    /// Total number of files in the backup
24    pub total_files: usize,
25    /// Total size in bytes of all backed-up files
26    pub total_bytes: u64,
27    /// CRC32 checksum of all files (deterministic ordering)
28    pub checksum: u32,
29    /// Type of backup (full or incremental)
30    pub backup_type: BackupType,
31    /// Software version at time of backup
32    pub version: String,
33}
34
35/// Type of backup
36#[derive(Debug, Clone, Serialize, Deserialize)]
37pub enum BackupType {
38    /// Full backup containing all data
39    Full,
40    /// Incremental backup relative to a base backup
41    Incremental {
42        /// ID of the base backup this increment builds upon
43        base_backup_id: String,
44    },
45}
46
47/// Manages backup creation, restoration, and lifecycle
48pub struct BackupManager {
49    /// Root directory where all backups are stored
50    backup_dir: PathBuf,
51}
52
53impl BackupManager {
54    /// Create a new BackupManager with the given backup storage directory.
55    ///
56    /// Creates the directory if it does not exist.
57    pub fn new(backup_dir: impl AsRef<Path>) -> Result<Self> {
58        let backup_dir = backup_dir.as_ref().to_path_buf();
59        fs::create_dir_all(&backup_dir).map_err(|e| {
60            AmateRSError::IoError(ErrorContext::new(format!(
61                "Failed to create backup directory '{}': {}",
62                backup_dir.display(),
63                e
64            )))
65        })?;
66        Ok(Self { backup_dir })
67    }
68
69    /// Create a full backup of the source data directory.
70    ///
71    /// Copies all files recursively from `source_dir` into a new backup
72    /// subdirectory, calculates a CRC32 checksum, and writes metadata.
73    pub fn create_backup(&self, source_dir: &Path) -> Result<BackupMetadata> {
74        if !source_dir.exists() {
75            return Err(AmateRSError::ValidationError(ErrorContext::new(format!(
76                "Source directory '{}' does not exist",
77                source_dir.display()
78            ))));
79        }
80
81        let backup_id = uuid::Uuid::new_v4().to_string();
82        let backup_path = self.backup_dir.join(&backup_id);
83        let data_path = backup_path.join("data");
84
85        fs::create_dir_all(&data_path).map_err(|e| {
86            AmateRSError::IoError(ErrorContext::new(format!(
87                "Failed to create backup data directory: {}",
88                e
89            )))
90        })?;
91
92        let (total_files, total_bytes) = copy_dir_recursive(source_dir, &data_path)?;
93        let checksum = calculate_dir_checksum(&data_path)?;
94
95        let metadata = BackupMetadata {
96            backup_id: backup_id.clone(),
97            created_at: Utc::now(),
98            source_dir: source_dir.to_path_buf(),
99            total_files,
100            total_bytes,
101            checksum,
102            backup_type: BackupType::Full,
103            version: env!("CARGO_PKG_VERSION").to_string(),
104        };
105
106        let metadata_path = backup_path.join("metadata.json");
107        let metadata_json = serde_json::to_string_pretty(&metadata).map_err(|e| {
108            AmateRSError::SerializationError(ErrorContext::new(format!(
109                "Failed to serialize backup metadata: {}",
110                e
111            )))
112        })?;
113        fs::write(&metadata_path, metadata_json).map_err(|e| {
114            AmateRSError::IoError(ErrorContext::new(format!(
115                "Failed to write backup metadata: {}",
116                e
117            )))
118        })?;
119
120        Ok(metadata)
121    }
122
123    /// Restore a backup to the given target directory.
124    ///
125    /// Verifies backup integrity before restoring. If `target_dir` already
126    /// exists, it is cleared first.
127    pub fn restore_backup(&self, backup_id: &str, target_dir: &Path) -> Result<BackupMetadata> {
128        let backup_path = self.backup_dir.join(backup_id);
129        if !backup_path.exists() {
130            return Err(AmateRSError::ValidationError(ErrorContext::new(format!(
131                "Backup '{}' does not exist",
132                backup_id
133            ))));
134        }
135
136        let metadata = self.load_metadata(backup_id)?;
137
138        // Verify integrity before restoring
139        if !self.verify_backup(backup_id)? {
140            return Err(AmateRSError::StorageIntegrity(ErrorContext::new(format!(
141                "Backup '{}' failed integrity check",
142                backup_id
143            ))));
144        }
145
146        // Clear target directory if it exists
147        if target_dir.exists() {
148            fs::remove_dir_all(target_dir).map_err(|e| {
149                AmateRSError::IoError(ErrorContext::new(format!(
150                    "Failed to clear target directory '{}': {}",
151                    target_dir.display(),
152                    e
153                )))
154            })?;
155        }
156
157        fs::create_dir_all(target_dir).map_err(|e| {
158            AmateRSError::IoError(ErrorContext::new(format!(
159                "Failed to create target directory '{}': {}",
160                target_dir.display(),
161                e
162            )))
163        })?;
164
165        let data_path = backup_path.join("data");
166        copy_dir_recursive(&data_path, target_dir)?;
167
168        // Verify restored data matches backup checksum
169        let restored_checksum = calculate_dir_checksum(target_dir)?;
170        if restored_checksum != metadata.checksum {
171            return Err(AmateRSError::StorageIntegrity(ErrorContext::new(format!(
172                "Restored data checksum mismatch: expected {}, got {}",
173                metadata.checksum, restored_checksum
174            ))));
175        }
176
177        Ok(metadata)
178    }
179
180    /// List all available backups sorted by creation time (newest first).
181    pub fn list_backups(&self) -> Result<Vec<BackupMetadata>> {
182        let mut backups = Vec::new();
183
184        let entries = fs::read_dir(&self.backup_dir).map_err(|e| {
185            AmateRSError::IoError(ErrorContext::new(format!(
186                "Failed to read backup directory: {}",
187                e
188            )))
189        })?;
190
191        for entry in entries {
192            let entry = entry.map_err(|e| {
193                AmateRSError::IoError(ErrorContext::new(format!(
194                    "Failed to read directory entry: {}",
195                    e
196                )))
197            })?;
198
199            let path = entry.path();
200            if path.is_dir() {
201                let metadata_path = path.join("metadata.json");
202                if metadata_path.exists() {
203                    match self.load_metadata_from_path(&metadata_path) {
204                        Ok(meta) => backups.push(meta),
205                        Err(_) => {
206                            // Skip directories without valid metadata
207                            continue;
208                        }
209                    }
210                }
211            }
212        }
213
214        // Sort by creation time, newest first
215        backups.sort_by_key(|b| std::cmp::Reverse(b.created_at));
216
217        Ok(backups)
218    }
219
220    /// Delete a backup and all its data.
221    pub fn delete_backup(&self, backup_id: &str) -> Result<()> {
222        let backup_path = self.backup_dir.join(backup_id);
223        if !backup_path.exists() {
224            return Err(AmateRSError::ValidationError(ErrorContext::new(format!(
225                "Backup '{}' does not exist",
226                backup_id
227            ))));
228        }
229
230        fs::remove_dir_all(&backup_path).map_err(|e| {
231            AmateRSError::IoError(ErrorContext::new(format!(
232                "Failed to delete backup '{}': {}",
233                backup_id, e
234            )))
235        })?;
236
237        Ok(())
238    }
239
240    /// Verify backup integrity by recalculating the CRC32 checksum.
241    ///
242    /// Returns `true` if the checksum matches, `false` otherwise.
243    pub fn verify_backup(&self, backup_id: &str) -> Result<bool> {
244        let backup_path = self.backup_dir.join(backup_id);
245        if !backup_path.exists() {
246            return Err(AmateRSError::ValidationError(ErrorContext::new(format!(
247                "Backup '{}' does not exist",
248                backup_id
249            ))));
250        }
251
252        let metadata = self.load_metadata(backup_id)?;
253        let data_path = backup_path.join("data");
254
255        if !data_path.exists() {
256            return Ok(metadata.total_files == 0 && metadata.checksum == 0);
257        }
258
259        let current_checksum = calculate_dir_checksum(&data_path)?;
260        Ok(current_checksum == metadata.checksum)
261    }
262
263    /// Get the total size in bytes of a backup (data files only).
264    pub fn backup_size(&self, backup_id: &str) -> Result<u64> {
265        let backup_path = self.backup_dir.join(backup_id);
266        if !backup_path.exists() {
267            return Err(AmateRSError::ValidationError(ErrorContext::new(format!(
268                "Backup '{}' does not exist",
269                backup_id
270            ))));
271        }
272
273        let data_path = backup_path.join("data");
274        if !data_path.exists() {
275            return Ok(0);
276        }
277
278        calculate_dir_size(&data_path)
279    }
280
281    /// Load backup metadata from the standard location.
282    fn load_metadata(&self, backup_id: &str) -> Result<BackupMetadata> {
283        let metadata_path = self.backup_dir.join(backup_id).join("metadata.json");
284        self.load_metadata_from_path(&metadata_path)
285    }
286
287    /// Load backup metadata from an arbitrary path.
288    fn load_metadata_from_path(&self, path: &Path) -> Result<BackupMetadata> {
289        let content = fs::read_to_string(path).map_err(|e| {
290            AmateRSError::IoError(ErrorContext::new(format!(
291                "Failed to read metadata file '{}': {}",
292                path.display(),
293                e
294            )))
295        })?;
296
297        serde_json::from_str(&content).map_err(|e| {
298            AmateRSError::SerializationError(ErrorContext::new(format!(
299                "Failed to deserialize backup metadata: {}",
300                e
301            )))
302        })
303    }
304}
305
306/// Copy a directory recursively from `src` to `dst`.
307///
308/// Returns `(file_count, total_bytes)` of all files copied.
309fn copy_dir_recursive(src: &Path, dst: &Path) -> Result<(usize, u64)> {
310    let mut file_count = 0usize;
311    let mut total_bytes = 0u64;
312
313    if !src.exists() {
314        return Ok((0, 0));
315    }
316
317    fs::create_dir_all(dst).map_err(|e| {
318        AmateRSError::IoError(ErrorContext::new(format!(
319            "Failed to create directory '{}': {}",
320            dst.display(),
321            e
322        )))
323    })?;
324
325    let entries = fs::read_dir(src).map_err(|e| {
326        AmateRSError::IoError(ErrorContext::new(format!(
327            "Failed to read directory '{}': {}",
328            src.display(),
329            e
330        )))
331    })?;
332
333    for entry in entries {
334        let entry = entry.map_err(|e| {
335            AmateRSError::IoError(ErrorContext::new(format!(
336                "Failed to read directory entry: {}",
337                e
338            )))
339        })?;
340
341        let src_path = entry.path();
342        let file_name = entry.file_name();
343        let dst_path = dst.join(&file_name);
344
345        if src_path.is_dir() {
346            let (sub_files, sub_bytes) = copy_dir_recursive(&src_path, &dst_path)?;
347            file_count += sub_files;
348            total_bytes += sub_bytes;
349        } else if src_path.is_file() {
350            let bytes = fs::copy(&src_path, &dst_path).map_err(|e| {
351                AmateRSError::IoError(ErrorContext::new(format!(
352                    "Failed to copy '{}' -> '{}': {}",
353                    src_path.display(),
354                    dst_path.display(),
355                    e
356                )))
357            })?;
358            file_count += 1;
359            total_bytes += bytes;
360        }
361    }
362
363    Ok((file_count, total_bytes))
364}
365
366/// Calculate CRC32 checksum of all files in a directory.
367///
368/// Files are processed in sorted order (by relative path) for determinism.
369fn calculate_dir_checksum(dir: &Path) -> Result<u32> {
370    let mut paths = collect_file_paths(dir, dir)?;
371    paths.sort();
372
373    let mut hasher = crc32fast::Hasher::new();
374
375    for relative_path in &paths {
376        let full_path = dir.join(relative_path);
377
378        // Include the relative path in the checksum for structural integrity
379        hasher.update(relative_path.to_string_lossy().as_bytes());
380
381        let mut file = fs::File::open(&full_path).map_err(|e| {
382            AmateRSError::IoError(ErrorContext::new(format!(
383                "Failed to open file '{}' for checksum: {}",
384                full_path.display(),
385                e
386            )))
387        })?;
388
389        let mut buffer = [0u8; 8192];
390        loop {
391            let bytes_read = file.read(&mut buffer).map_err(|e| {
392                AmateRSError::IoError(ErrorContext::new(format!(
393                    "Failed to read file '{}' for checksum: {}",
394                    full_path.display(),
395                    e
396                )))
397            })?;
398
399            if bytes_read == 0 {
400                break;
401            }
402
403            hasher.update(&buffer[..bytes_read]);
404        }
405    }
406
407    Ok(hasher.finalize())
408}
409
410/// Collect all file paths relative to `base_dir` under `dir`.
411fn collect_file_paths(dir: &Path, base_dir: &Path) -> Result<Vec<PathBuf>> {
412    let mut paths = Vec::new();
413
414    if !dir.exists() {
415        return Ok(paths);
416    }
417
418    let entries = fs::read_dir(dir).map_err(|e| {
419        AmateRSError::IoError(ErrorContext::new(format!(
420            "Failed to read directory '{}': {}",
421            dir.display(),
422            e
423        )))
424    })?;
425
426    for entry in entries {
427        let entry = entry.map_err(|e| {
428            AmateRSError::IoError(ErrorContext::new(format!(
429                "Failed to read directory entry: {}",
430                e
431            )))
432        })?;
433
434        let path = entry.path();
435
436        if path.is_dir() {
437            let sub_paths = collect_file_paths(&path, base_dir)?;
438            paths.extend(sub_paths);
439        } else if path.is_file() {
440            let relative = path.strip_prefix(base_dir).map_err(|e| {
441                AmateRSError::ValidationError(ErrorContext::new(format!(
442                    "Failed to compute relative path: {}",
443                    e
444                )))
445            })?;
446            paths.push(relative.to_path_buf());
447        }
448    }
449
450    Ok(paths)
451}
452
453/// Calculate total size of all files in a directory recursively.
454fn calculate_dir_size(dir: &Path) -> Result<u64> {
455    let mut total = 0u64;
456
457    let entries = fs::read_dir(dir).map_err(|e| {
458        AmateRSError::IoError(ErrorContext::new(format!(
459            "Failed to read directory '{}': {}",
460            dir.display(),
461            e
462        )))
463    })?;
464
465    for entry in entries {
466        let entry = entry.map_err(|e| {
467            AmateRSError::IoError(ErrorContext::new(format!(
468                "Failed to read directory entry: {}",
469                e
470            )))
471        })?;
472
473        let path = entry.path();
474        if path.is_dir() {
475            total += calculate_dir_size(&path)?;
476        } else if path.is_file() {
477            let meta = fs::metadata(&path).map_err(|e| {
478                AmateRSError::IoError(ErrorContext::new(format!(
479                    "Failed to get file metadata '{}': {}",
480                    path.display(),
481                    e
482                )))
483            })?;
484            total += meta.len();
485        }
486    }
487
488    Ok(total)
489}
490
491/// Verify that a directory's contents match an expected CRC32 checksum.
492pub fn verify_directory(dir: &Path, expected_checksum: u32) -> Result<bool> {
493    let actual = calculate_dir_checksum(dir)?;
494    Ok(actual == expected_checksum)
495}
496
497#[cfg(test)]
498mod tests {
499    use super::*;
500
501    /// Create a unique temp directory for a test
502    fn test_dir(name: &str) -> PathBuf {
503        let dir = std::env::temp_dir()
504            .join("amaters_backup_tests")
505            .join(name)
506            .join(uuid::Uuid::new_v4().to_string());
507        if dir.exists() {
508            fs::remove_dir_all(&dir).ok();
509        }
510        fs::create_dir_all(&dir).ok();
511        dir
512    }
513
514    /// Populate a directory with sample files for testing
515    fn populate_source(dir: &Path) -> Result<()> {
516        fs::create_dir_all(dir.join("subdir")).map_err(|e| {
517            AmateRSError::IoError(ErrorContext::new(format!("populate_source: {}", e)))
518        })?;
519
520        fs::write(dir.join("file1.dat"), b"hello world").map_err(|e| {
521            AmateRSError::IoError(ErrorContext::new(format!("populate_source: {}", e)))
522        })?;
523
524        fs::write(dir.join("file2.dat"), b"test data 1234567890").map_err(|e| {
525            AmateRSError::IoError(ErrorContext::new(format!("populate_source: {}", e)))
526        })?;
527
528        fs::write(dir.join("subdir").join("nested.dat"), b"nested content").map_err(|e| {
529            AmateRSError::IoError(ErrorContext::new(format!("populate_source: {}", e)))
530        })?;
531
532        Ok(())
533    }
534
535    #[test]
536    fn test_create_full_backup() -> Result<()> {
537        let root = test_dir("create_full");
538        let source = root.join("source");
539        let backups = root.join("backups");
540
541        populate_source(&source)?;
542        let manager = BackupManager::new(&backups)?;
543        let meta = manager.create_backup(&source)?;
544
545        assert_eq!(meta.total_files, 3);
546        assert!(meta.total_bytes > 0);
547        assert!(matches!(meta.backup_type, BackupType::Full));
548
549        // Verify backup directory exists with data and metadata
550        let backup_path = backups.join(&meta.backup_id);
551        assert!(backup_path.join("data").exists());
552        assert!(backup_path.join("metadata.json").exists());
553        assert!(backup_path.join("data").join("file1.dat").exists());
554        assert!(
555            backup_path
556                .join("data")
557                .join("subdir")
558                .join("nested.dat")
559                .exists()
560        );
561
562        fs::remove_dir_all(&root).ok();
563        Ok(())
564    }
565
566    #[test]
567    fn test_restore_backup() -> Result<()> {
568        let root = test_dir("restore");
569        let source = root.join("source");
570        let backups = root.join("backups");
571        let restored = root.join("restored");
572
573        populate_source(&source)?;
574        let manager = BackupManager::new(&backups)?;
575        let meta = manager.create_backup(&source)?;
576
577        let restored_meta = manager.restore_backup(&meta.backup_id, &restored)?;
578        assert_eq!(restored_meta.backup_id, meta.backup_id);
579
580        // Verify restored files match original
581        let original_content = fs::read(source.join("file1.dat"))
582            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
583        let restored_content = fs::read(restored.join("file1.dat"))
584            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
585        assert_eq!(original_content, restored_content);
586
587        let nested_original = fs::read(source.join("subdir").join("nested.dat"))
588            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
589        let nested_restored = fs::read(restored.join("subdir").join("nested.dat"))
590            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
591        assert_eq!(nested_original, nested_restored);
592
593        fs::remove_dir_all(&root).ok();
594        Ok(())
595    }
596
597    #[test]
598    fn test_list_backups() -> Result<()> {
599        let root = test_dir("list");
600        let source = root.join("source");
601        let backups = root.join("backups");
602
603        populate_source(&source)?;
604        let manager = BackupManager::new(&backups)?;
605
606        // Create multiple backups
607        let _meta1 = manager.create_backup(&source)?;
608        let _meta2 = manager.create_backup(&source)?;
609        let _meta3 = manager.create_backup(&source)?;
610
611        let list = manager.list_backups()?;
612        assert_eq!(list.len(), 3);
613
614        // Should be sorted newest first
615        assert!(list[0].created_at >= list[1].created_at);
616        assert!(list[1].created_at >= list[2].created_at);
617
618        fs::remove_dir_all(&root).ok();
619        Ok(())
620    }
621
622    #[test]
623    fn test_delete_backup() -> Result<()> {
624        let root = test_dir("delete");
625        let source = root.join("source");
626        let backups = root.join("backups");
627
628        populate_source(&source)?;
629        let manager = BackupManager::new(&backups)?;
630        let meta = manager.create_backup(&source)?;
631
632        assert_eq!(manager.list_backups()?.len(), 1);
633
634        manager.delete_backup(&meta.backup_id)?;
635
636        assert_eq!(manager.list_backups()?.len(), 0);
637
638        // Deleting non-existent backup should error
639        let result = manager.delete_backup("nonexistent");
640        assert!(result.is_err());
641
642        fs::remove_dir_all(&root).ok();
643        Ok(())
644    }
645
646    #[test]
647    fn test_verify_backup() -> Result<()> {
648        let root = test_dir("verify");
649        let source = root.join("source");
650        let backups = root.join("backups");
651
652        populate_source(&source)?;
653        let manager = BackupManager::new(&backups)?;
654        let meta = manager.create_backup(&source)?;
655
656        // Should pass verification
657        assert!(manager.verify_backup(&meta.backup_id)?);
658
659        // Corrupt a file and verify should fail
660        let corrupt_path = backups.join(&meta.backup_id).join("data").join("file1.dat");
661        fs::write(&corrupt_path, b"corrupted!")
662            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("write: {}", e))))?;
663
664        assert!(!manager.verify_backup(&meta.backup_id)?);
665
666        fs::remove_dir_all(&root).ok();
667        Ok(())
668    }
669
670    #[test]
671    fn test_backup_with_data() -> Result<()> {
672        let root = test_dir("with_data");
673        let source = root.join("source");
674        let backups = root.join("backups");
675        let restored = root.join("restored");
676
677        // Create source with binary data simulating SSTable/WAL content
678        fs::create_dir_all(source.join("wal"))
679            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("mkdir: {}", e))))?;
680        fs::create_dir_all(source.join("sstables").join("L0"))
681            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("mkdir: {}", e))))?;
682
683        let wal_data: Vec<u8> = (0..256).map(|i| (i % 256) as u8).collect();
684        fs::write(source.join("wal").join("000001.wal"), &wal_data)
685            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("write: {}", e))))?;
686
687        let sst_data: Vec<u8> = (0..1024).map(|i| ((i * 7) % 256) as u8).collect();
688        fs::write(
689            source.join("sstables").join("L0").join("table_001.sst"),
690            &sst_data,
691        )
692        .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("write: {}", e))))?;
693
694        let manager = BackupManager::new(&backups)?;
695        let meta = manager.create_backup(&source)?;
696
697        assert_eq!(meta.total_files, 2);
698
699        // Restore and verify binary content
700        manager.restore_backup(&meta.backup_id, &restored)?;
701
702        let restored_wal = fs::read(restored.join("wal").join("000001.wal"))
703            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
704        assert_eq!(restored_wal, wal_data);
705
706        let restored_sst = fs::read(restored.join("sstables").join("L0").join("table_001.sst"))
707            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("read: {}", e))))?;
708        assert_eq!(restored_sst, sst_data);
709
710        fs::remove_dir_all(&root).ok();
711        Ok(())
712    }
713
714    #[test]
715    fn test_backup_metadata_serialization() -> Result<()> {
716        let meta = BackupMetadata {
717            backup_id: "test-id-123".to_string(),
718            created_at: Utc::now(),
719            source_dir: PathBuf::from("/tmp/source"),
720            total_files: 42,
721            total_bytes: 123456,
722            checksum: 0xDEAD_BEEF,
723            backup_type: BackupType::Full,
724            version: "0.2.0".to_string(),
725        };
726
727        let json = serde_json::to_string(&meta).map_err(|e| {
728            AmateRSError::SerializationError(ErrorContext::new(format!("serialize: {}", e)))
729        })?;
730
731        let deserialized: BackupMetadata = serde_json::from_str(&json).map_err(|e| {
732            AmateRSError::SerializationError(ErrorContext::new(format!("deserialize: {}", e)))
733        })?;
734
735        assert_eq!(deserialized.backup_id, meta.backup_id);
736        assert_eq!(deserialized.total_files, meta.total_files);
737        assert_eq!(deserialized.total_bytes, meta.total_bytes);
738        assert_eq!(deserialized.checksum, meta.checksum);
739        assert!(matches!(deserialized.backup_type, BackupType::Full));
740
741        // Test incremental variant
742        let incremental_meta = BackupMetadata {
743            backup_type: BackupType::Incremental {
744                base_backup_id: "base-123".to_string(),
745            },
746            ..meta
747        };
748
749        let json2 = serde_json::to_string(&incremental_meta).map_err(|e| {
750            AmateRSError::SerializationError(ErrorContext::new(format!("serialize: {}", e)))
751        })?;
752
753        let deser2: BackupMetadata = serde_json::from_str(&json2).map_err(|e| {
754            AmateRSError::SerializationError(ErrorContext::new(format!("deserialize: {}", e)))
755        })?;
756
757        if let BackupType::Incremental { base_backup_id } = &deser2.backup_type {
758            assert_eq!(base_backup_id, "base-123");
759        } else {
760            return Err(AmateRSError::ValidationError(ErrorContext::new(
761                "Expected Incremental backup type",
762            )));
763        }
764
765        Ok(())
766    }
767
768    #[test]
769    fn test_backup_empty_database() -> Result<()> {
770        let root = test_dir("empty_db");
771        let source = root.join("source");
772        let backups = root.join("backups");
773        let restored = root.join("restored");
774
775        // Create an empty source directory
776        fs::create_dir_all(&source)
777            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("mkdir: {}", e))))?;
778
779        let manager = BackupManager::new(&backups)?;
780        let meta = manager.create_backup(&source)?;
781
782        assert_eq!(meta.total_files, 0);
783        assert_eq!(meta.total_bytes, 0);
784
785        // Verify and restore empty backup
786        assert!(manager.verify_backup(&meta.backup_id)?);
787        manager.restore_backup(&meta.backup_id, &restored)?;
788
789        assert!(restored.exists());
790
791        fs::remove_dir_all(&root).ok();
792        Ok(())
793    }
794
795    #[test]
796    fn test_restore_to_existing_directory() -> Result<()> {
797        let root = test_dir("restore_existing");
798        let source = root.join("source");
799        let backups = root.join("backups");
800        let target = root.join("target");
801
802        populate_source(&source)?;
803
804        // Pre-create target with different content
805        fs::create_dir_all(&target)
806            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("mkdir: {}", e))))?;
807        fs::write(target.join("old_file.txt"), b"old content")
808            .map_err(|e| AmateRSError::IoError(ErrorContext::new(format!("write: {}", e))))?;
809
810        let manager = BackupManager::new(&backups)?;
811        let meta = manager.create_backup(&source)?;
812
813        // Restore should clear existing content
814        manager.restore_backup(&meta.backup_id, &target)?;
815
816        // Old file should be gone
817        assert!(!target.join("old_file.txt").exists());
818
819        // New files should be present
820        assert!(target.join("file1.dat").exists());
821        assert!(target.join("file2.dat").exists());
822        assert!(target.join("subdir").join("nested.dat").exists());
823
824        fs::remove_dir_all(&root).ok();
825        Ok(())
826    }
827
828    #[test]
829    fn test_backup_size() -> Result<()> {
830        let root = test_dir("backup_size");
831        let source = root.join("source");
832        let backups = root.join("backups");
833
834        populate_source(&source)?;
835        let manager = BackupManager::new(&backups)?;
836        let meta = manager.create_backup(&source)?;
837
838        let size = manager.backup_size(&meta.backup_id)?;
839        assert_eq!(size, meta.total_bytes);
840
841        fs::remove_dir_all(&root).ok();
842        Ok(())
843    }
844
845    #[test]
846    fn test_verify_directory_helper() -> Result<()> {
847        let root = test_dir("verify_dir");
848        let source = root.join("source");
849
850        populate_source(&source)?;
851
852        let checksum = calculate_dir_checksum(&source)?;
853        assert!(verify_directory(&source, checksum)?);
854        assert!(!verify_directory(&source, checksum.wrapping_add(1))?);
855
856        fs::remove_dir_all(&root).ok();
857        Ok(())
858    }
859
860    #[test]
861    fn test_restore_nonexistent_backup() {
862        let root = test_dir("restore_nonexistent");
863        let backups = root.join("backups");
864        let target = root.join("target");
865
866        let manager = BackupManager::new(&backups).expect("BackupManager creation should succeed");
867        let result = manager.restore_backup("does-not-exist", &target);
868        assert!(result.is_err());
869
870        fs::remove_dir_all(&root).ok();
871    }
872
873    #[test]
874    fn test_backup_nonexistent_source() {
875        let root = test_dir("backup_nonexistent_source");
876        let backups = root.join("backups");
877
878        let manager = BackupManager::new(&backups).expect("BackupManager creation should succeed");
879        let result = manager.create_backup(Path::new("/nonexistent/path/that/does/not/exist"));
880        assert!(result.is_err());
881
882        fs::remove_dir_all(&root).ok();
883    }
884}