Skip to main content

torsh_package/
backup.rs

1//! Backup and Recovery System
2//!
3//! This module provides comprehensive backup and recovery capabilities for packages,
4//! including automated backup scheduling, multiple backup strategies, integrity
5//! verification, point-in-time recovery, and disaster recovery procedures.
6//!
7//! # Features
8//!
9//! - **Backup Strategies**: Full, incremental, and differential backups
10//! - **Automated Scheduling**: Configurable backup schedules with retention policies
11//! - **Integrity Verification**: SHA-256 checksums and backup validation
12//! - **Point-in-Time Recovery**: Restore packages to specific timestamps
13//! - **Backup Rotation**: Automatic cleanup based on retention policies
14//! - **Multiple Destinations**: Support for local, cloud, and distributed backups
15//! - **Compression**: Optional backup compression for storage efficiency
16//! - **Encryption**: Optional backup encryption for security
17//! - **Recovery Testing**: Validate backup integrity through test restores
18//!
19//! # Examples
20//!
21//! ```rust
22//! use torsh_package::backup::{BackupManager, BackupConfig, BackupStrategy, RetentionPolicy};
23//! use std::path::PathBuf;
24//!
25//! // Create backup manager
26//! let config = BackupConfig {
27//!     destination: PathBuf::from("/backups"),
28//!     strategy: BackupStrategy::Incremental,
29//!     compression: true,
30//!     encryption: false,
31//!     retention: RetentionPolicy::KeepLast(7),
32//! };
33//!
34//! let mut manager = BackupManager::new(config);
35//!
36//! // Create a backup
37//! let backup_id = manager.create_backup("my-package", "1.0.0", b"package data").unwrap();
38//!
39//! // Restore from backup
40//! let restored = manager.restore_backup(&backup_id).unwrap();
41//! ```
42
43use chrono::{DateTime, Duration as ChronoDuration, Utc};
44use serde::{Deserialize, Serialize};
45use sha2::{Digest, Sha256};
46use std::collections::HashMap;
47use std::path::PathBuf;
48use torsh_core::error::TorshError;
49
50/// Backup strategy type
51#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
52pub enum BackupStrategy {
53    /// Full backup of entire package
54    Full,
55    /// Incremental backup (only changes since last backup)
56    Incremental,
57    /// Differential backup (changes since last full backup)
58    Differential,
59}
60
61/// Retention policy for backup rotation
62#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
63pub enum RetentionPolicy {
64    /// Keep backups for specified number of days
65    KeepDays(u32),
66    /// Keep last N backups
67    KeepLast(usize),
68    /// Keep all backups (no rotation)
69    KeepAll,
70    /// Custom retention with daily, weekly, monthly
71    Custom {
72        /// Number of daily backups to keep
73        daily: u32,
74        /// Number of weekly backups to keep
75        weekly: u32,
76        /// Number of monthly backups to keep
77        monthly: u32,
78    },
79}
80
81/// Backup destination type
82#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
83pub enum BackupDestination {
84    /// Local filesystem path
85    Local(PathBuf),
86    /// S3-compatible storage
87    S3 {
88        /// S3 bucket name
89        bucket: String,
90        /// AWS region
91        region: String,
92        /// Object path within bucket
93        path: String,
94    },
95    /// Google Cloud Storage
96    Gcs {
97        /// GCS bucket name
98        bucket: String,
99        /// Object path within bucket
100        path: String,
101    },
102    /// Azure Blob Storage
103    Azure {
104        /// Azure container name
105        container: String,
106        /// Blob path within container
107        path: String,
108    },
109}
110
111/// Backup configuration
112#[derive(Debug, Clone, Serialize, Deserialize)]
113pub struct BackupConfig {
114    /// Backup destination
115    pub destination: PathBuf,
116    /// Backup strategy
117    pub strategy: BackupStrategy,
118    /// Enable compression
119    pub compression: bool,
120    /// Enable encryption
121    pub encryption: bool,
122    /// Retention policy
123    pub retention: RetentionPolicy,
124}
125
126/// Backup metadata
127#[derive(Debug, Clone, Serialize, Deserialize)]
128pub struct BackupMetadata {
129    /// Unique backup identifier
130    pub backup_id: String,
131    /// Package ID
132    pub package_id: String,
133    /// Package version
134    pub version: String,
135    /// Backup strategy used
136    pub strategy: BackupStrategy,
137    /// Backup creation timestamp
138    pub created_at: DateTime<Utc>,
139    /// Size in bytes (uncompressed)
140    pub size_bytes: u64,
141    /// Compressed size in bytes
142    pub compressed_size_bytes: Option<u64>,
143    /// SHA-256 checksum
144    pub checksum: String,
145    /// Parent backup ID (for incremental/differential)
146    pub parent_backup_id: Option<String>,
147    /// Compression enabled
148    pub compressed: bool,
149    /// Encryption enabled
150    pub encrypted: bool,
151    /// Additional metadata
152    pub metadata: HashMap<String, String>,
153}
154
155/// Backup verification result
156#[derive(Debug, Clone, Serialize, Deserialize)]
157pub struct VerificationResult {
158    /// Backup ID
159    pub backup_id: String,
160    /// Verification successful
161    pub success: bool,
162    /// Checksum match
163    pub checksum_valid: bool,
164    /// Backup readable
165    pub readable: bool,
166    /// Backup size matches metadata
167    pub size_valid: bool,
168    /// Verification errors
169    pub errors: Vec<String>,
170    /// Verification timestamp
171    pub verified_at: DateTime<Utc>,
172}
173
174/// Recovery point for point-in-time recovery
175#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct RecoveryPoint {
177    /// Recovery point ID
178    pub id: String,
179    /// Package ID
180    pub package_id: String,
181    /// Package version
182    pub version: String,
183    /// Timestamp of recovery point
184    pub timestamp: DateTime<Utc>,
185    /// Backup IDs needed for recovery
186    pub backup_chain: Vec<String>,
187    /// Description
188    pub description: String,
189}
190
191/// Backup statistics
192#[derive(Debug, Clone, Default, Serialize, Deserialize)]
193pub struct BackupStatistics {
194    /// Total backups
195    pub total_backups: usize,
196    /// Full backups
197    pub full_backups: usize,
198    /// Incremental backups
199    pub incremental_backups: usize,
200    /// Differential backups
201    pub differential_backups: usize,
202    /// Total storage used (bytes)
203    pub total_storage_bytes: u64,
204    /// Compressed storage used (bytes)
205    pub compressed_storage_bytes: u64,
206    /// Compression ratio (0.0 - 1.0)
207    pub compression_ratio: f64,
208    /// Oldest backup timestamp
209    pub oldest_backup: Option<DateTime<Utc>>,
210    /// Newest backup timestamp
211    pub newest_backup: Option<DateTime<Utc>>,
212    /// Failed backups
213    pub failed_backups: usize,
214}
215
216/// Backup manager
217///
218/// Manages package backups including creation, verification, restoration,
219/// and automated rotation based on retention policies.
220pub struct BackupManager {
221    /// Backup configuration
222    config: BackupConfig,
223    /// Backup metadata by backup ID
224    backups: HashMap<String, BackupMetadata>,
225    /// Recovery points
226    recovery_points: Vec<RecoveryPoint>,
227    /// Statistics
228    statistics: BackupStatistics,
229    /// Mock storage for backup data (in production, this would be on disk/cloud)
230    backup_data: HashMap<String, Vec<u8>>,
231}
232
233impl BackupManager {
234    /// Create a new backup manager
235    pub fn new(config: BackupConfig) -> Self {
236        Self {
237            config,
238            backups: HashMap::new(),
239            recovery_points: Vec::new(),
240            statistics: BackupStatistics::default(),
241            backup_data: HashMap::new(),
242        }
243    }
244
245    /// Create a backup
246    pub fn create_backup(
247        &mut self,
248        package_id: &str,
249        version: &str,
250        data: &[u8],
251    ) -> Result<String, TorshError> {
252        let backup_id = self.generate_backup_id(package_id, version);
253        let created_at = Utc::now();
254
255        // Calculate checksum
256        let checksum = self.calculate_checksum(data);
257
258        // Determine parent backup for incremental/differential
259        let parent_backup_id = match self.config.strategy {
260            BackupStrategy::Full => None,
261            BackupStrategy::Incremental => self.get_last_backup_id(package_id, version),
262            BackupStrategy::Differential => self.get_last_full_backup_id(package_id, version),
263        };
264
265        // Compress if enabled
266        let (final_data, compressed_size) = if self.config.compression {
267            let compressed = self.compress_data(data)?;
268            let size = compressed.len() as u64;
269            (compressed, Some(size))
270        } else {
271            (data.to_vec(), None)
272        };
273
274        // Encrypt if enabled
275        let final_data = if self.config.encryption {
276            self.encrypt_data(&final_data)?
277        } else {
278            final_data
279        };
280
281        // Store backup (mock implementation)
282        self.store_backup(&backup_id, &final_data)?;
283
284        // Create metadata
285        let metadata = BackupMetadata {
286            backup_id: backup_id.clone(),
287            package_id: package_id.to_string(),
288            version: version.to_string(),
289            strategy: self.config.strategy,
290            created_at,
291            size_bytes: data.len() as u64,
292            compressed_size_bytes: compressed_size,
293            checksum,
294            parent_backup_id,
295            compressed: self.config.compression,
296            encrypted: self.config.encryption,
297            metadata: HashMap::new(),
298        };
299
300        self.backups.insert(backup_id.clone(), metadata);
301
302        // Update statistics
303        self.update_statistics_after_backup();
304
305        // Apply retention policy
306        self.apply_retention_policy()?;
307
308        Ok(backup_id)
309    }
310
311    /// Restore from backup
312    pub fn restore_backup(&self, backup_id: &str) -> Result<Vec<u8>, TorshError> {
313        let metadata = self.backups.get(backup_id).ok_or_else(|| {
314            TorshError::InvalidArgument(format!("Backup {} not found", backup_id))
315        })?;
316
317        // Load backup data (mock implementation)
318        let mut data = self.load_backup(backup_id)?;
319
320        // Decrypt if needed
321        if metadata.encrypted {
322            data = self.decrypt_data(&data)?;
323        }
324
325        // Decompress if needed
326        if metadata.compressed {
327            data = self.decompress_data(&data)?;
328        }
329
330        // Verify checksum
331        let checksum = self.calculate_checksum(&data);
332        if checksum != metadata.checksum {
333            return Err(TorshError::RuntimeError(
334                "Backup checksum mismatch".to_string(),
335            ));
336        }
337
338        // Handle incremental/differential restoration
339        if let Some(parent_id) = &metadata.parent_backup_id {
340            let parent_data = self.restore_backup(parent_id)?;
341            data = self.merge_backup_data(&parent_data, &data)?;
342        }
343
344        Ok(data)
345    }
346
347    /// Verify backup integrity
348    pub fn verify_backup(&self, backup_id: &str) -> VerificationResult {
349        let metadata = match self.backups.get(backup_id) {
350            Some(m) => m,
351            None => {
352                return VerificationResult {
353                    backup_id: backup_id.to_string(),
354                    success: false,
355                    checksum_valid: false,
356                    readable: false,
357                    size_valid: false,
358                    errors: vec!["Backup not found".to_string()],
359                    verified_at: Utc::now(),
360                }
361            }
362        };
363
364        let mut errors = Vec::new();
365        let mut checksum_valid = false;
366        let mut readable = false;
367        let mut size_valid = false;
368
369        // Try to load backup
370        match self.load_backup(backup_id) {
371            Ok(data) => {
372                readable = true;
373
374                // Check size
375                let expected_size = if metadata.compressed {
376                    metadata
377                        .compressed_size_bytes
378                        .unwrap_or(metadata.size_bytes)
379                } else {
380                    metadata.size_bytes
381                };
382
383                if data.len() as u64 == expected_size {
384                    size_valid = true;
385                } else {
386                    errors.push(format!(
387                        "Size mismatch: expected {}, got {}",
388                        expected_size,
389                        data.len()
390                    ));
391                }
392
393                // Verify checksum (need to decompress/decrypt first)
394                match self.restore_backup(backup_id) {
395                    Ok(restored) => {
396                        let checksum = self.calculate_checksum(&restored);
397                        if checksum == metadata.checksum {
398                            checksum_valid = true;
399                        } else {
400                            errors.push("Checksum mismatch".to_string());
401                        }
402                    }
403                    Err(e) => {
404                        errors.push(format!("Restoration failed: {}", e));
405                    }
406                }
407            }
408            Err(e) => {
409                errors.push(format!("Failed to load backup: {}", e));
410            }
411        }
412
413        let success = errors.is_empty();
414
415        VerificationResult {
416            backup_id: backup_id.to_string(),
417            success,
418            checksum_valid,
419            readable,
420            size_valid,
421            errors,
422            verified_at: Utc::now(),
423        }
424    }
425
426    /// Create a recovery point
427    pub fn create_recovery_point(
428        &mut self,
429        package_id: &str,
430        version: &str,
431        description: String,
432    ) -> Result<String, TorshError> {
433        let id = uuid::Uuid::new_v4().to_string();
434
435        // Find all backups in the chain
436        let backup_chain = self.build_backup_chain(package_id, version)?;
437
438        let recovery_point = RecoveryPoint {
439            id: id.clone(),
440            package_id: package_id.to_string(),
441            version: version.to_string(),
442            timestamp: Utc::now(),
443            backup_chain,
444            description,
445        };
446
447        self.recovery_points.push(recovery_point);
448
449        Ok(id)
450    }
451
452    /// Restore to recovery point
453    pub fn restore_to_recovery_point(
454        &self,
455        recovery_point_id: &str,
456    ) -> Result<Vec<u8>, TorshError> {
457        let recovery_point = self
458            .recovery_points
459            .iter()
460            .find(|rp| rp.id == recovery_point_id)
461            .ok_or_else(|| {
462                TorshError::InvalidArgument(format!(
463                    "Recovery point {} not found",
464                    recovery_point_id
465                ))
466            })?;
467
468        // Restore from the last backup in the chain
469        if let Some(last_backup) = recovery_point.backup_chain.last() {
470            self.restore_backup(last_backup)
471        } else {
472            Err(TorshError::InvalidArgument(
473                "Recovery point has no backups".to_string(),
474            ))
475        }
476    }
477
478    /// List all backups for a package
479    pub fn list_backups(&self, package_id: &str) -> Vec<&BackupMetadata> {
480        self.backups
481            .values()
482            .filter(|m| m.package_id == package_id)
483            .collect()
484    }
485
486    /// Get backup statistics
487    pub fn get_statistics(&self) -> &BackupStatistics {
488        &self.statistics
489    }
490
491    /// Delete a backup
492    pub fn delete_backup(&mut self, backup_id: &str) -> Result<(), TorshError> {
493        self.backups.remove(backup_id).ok_or_else(|| {
494            TorshError::InvalidArgument(format!("Backup {} not found", backup_id))
495        })?;
496
497        // Remove backup data
498        self.backup_data.remove(backup_id);
499
500        // Update statistics
501        self.update_statistics();
502
503        Ok(())
504    }
505
506    /// Apply retention policy
507    pub fn apply_retention_policy(&mut self) -> Result<(), TorshError> {
508        let now = Utc::now();
509        let mut to_delete = Vec::new();
510
511        match self.config.retention {
512            RetentionPolicy::KeepDays(days) => {
513                let cutoff = now - ChronoDuration::days(days as i64);
514                for (id, metadata) in &self.backups {
515                    if metadata.created_at < cutoff {
516                        to_delete.push(id.clone());
517                    }
518                }
519            }
520            RetentionPolicy::KeepLast(count) => {
521                // Group backups by package
522                let mut by_package: HashMap<String, Vec<&BackupMetadata>> = HashMap::new();
523                for metadata in self.backups.values() {
524                    by_package
525                        .entry(metadata.package_id.clone())
526                        .or_insert_with(Vec::new)
527                        .push(metadata);
528                }
529
530                for backups in by_package.values_mut() {
531                    // Sort by creation time (newest first)
532                    backups.sort_by(|a, b| b.created_at.cmp(&a.created_at));
533
534                    // Mark old backups for deletion
535                    for metadata in backups.iter().skip(count) {
536                        to_delete.push(metadata.backup_id.clone());
537                    }
538                }
539            }
540            RetentionPolicy::KeepAll => {
541                // No deletions
542            }
543            RetentionPolicy::Custom {
544                daily,
545                weekly,
546                monthly,
547            } => {
548                // Implement GFS (Grandfather-Father-Son) retention
549                self.apply_gfs_retention(daily, weekly, monthly, &mut to_delete);
550            }
551        }
552
553        // Delete marked backups
554        for backup_id in to_delete {
555            self.delete_backup(&backup_id)?;
556        }
557
558        Ok(())
559    }
560
561    // Private helper methods
562
563    fn generate_backup_id(&self, package_id: &str, version: &str) -> String {
564        // Use UUID to ensure uniqueness even for rapid backups
565        format!(
566            "{}-{}-{}",
567            package_id,
568            version,
569            uuid::Uuid::new_v4().to_string()
570        )
571    }
572
573    fn calculate_checksum(&self, data: &[u8]) -> String {
574        let mut hasher = Sha256::new();
575        hasher.update(data);
576        format!("{:x}", hasher.finalize())
577    }
578
579    fn compress_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
580        use flate2::write::GzEncoder;
581        use flate2::Compression;
582        use std::io::Write;
583
584        let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
585        encoder
586            .write_all(data)
587            .map_err(|e| TorshError::RuntimeError(e.to_string()))?;
588        encoder
589            .finish()
590            .map_err(|e| TorshError::RuntimeError(e.to_string()))
591    }
592
593    fn decompress_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
594        use flate2::read::GzDecoder;
595        use std::io::Read;
596
597        let mut decoder = GzDecoder::new(data);
598        let mut decompressed = Vec::new();
599        decoder
600            .read_to_end(&mut decompressed)
601            .map_err(|e| TorshError::RuntimeError(e.to_string()))?;
602        Ok(decompressed)
603    }
604
605    fn encrypt_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
606        // Mock encryption (in production, use proper encryption)
607        Ok(data.to_vec())
608    }
609
610    fn decrypt_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
611        // Mock decryption (in production, use proper decryption)
612        Ok(data.to_vec())
613    }
614
615    fn store_backup(&mut self, backup_id: &str, data: &[u8]) -> Result<(), TorshError> {
616        // Mock storage (in production, write to destination)
617        self.backup_data
618            .insert(backup_id.to_string(), data.to_vec());
619        Ok(())
620    }
621
622    fn load_backup(&self, backup_id: &str) -> Result<Vec<u8>, TorshError> {
623        // Mock loading (in production, read from destination)
624        self.backup_data.get(backup_id).cloned().ok_or_else(|| {
625            TorshError::InvalidArgument(format!("Backup data {} not found", backup_id))
626        })
627    }
628
629    fn merge_backup_data(&self, _base: &[u8], delta: &[u8]) -> Result<Vec<u8>, TorshError> {
630        // Mock merge (in production, apply delta)
631        Ok(delta.to_vec())
632    }
633
634    fn get_last_backup_id(&self, package_id: &str, version: &str) -> Option<String> {
635        self.backups
636            .values()
637            .filter(|m| m.package_id == package_id && m.version == version)
638            .max_by_key(|m| m.created_at)
639            .map(|m| m.backup_id.clone())
640    }
641
642    fn get_last_full_backup_id(&self, package_id: &str, version: &str) -> Option<String> {
643        self.backups
644            .values()
645            .filter(|m| {
646                m.package_id == package_id
647                    && m.version == version
648                    && m.strategy == BackupStrategy::Full
649            })
650            .max_by_key(|m| m.created_at)
651            .map(|m| m.backup_id.clone())
652    }
653
654    fn build_backup_chain(
655        &self,
656        package_id: &str,
657        version: &str,
658    ) -> Result<Vec<String>, TorshError> {
659        let mut chain = Vec::new();
660
661        // Find latest backup
662        if let Some(latest) = self
663            .backups
664            .values()
665            .filter(|m| m.package_id == package_id && m.version == version)
666            .max_by_key(|m| m.created_at)
667        {
668            chain.push(latest.backup_id.clone());
669
670            // Follow parent chain
671            let mut current = latest;
672            while let Some(parent_id) = &current.parent_backup_id {
673                chain.push(parent_id.clone());
674                current = self.backups.get(parent_id).ok_or_else(|| {
675                    TorshError::InvalidArgument(format!("Parent backup {} not found", parent_id))
676                })?;
677            }
678        }
679
680        chain.reverse();
681        Ok(chain)
682    }
683
684    fn update_statistics_after_backup(&mut self) {
685        self.update_statistics();
686    }
687
688    fn update_statistics(&mut self) {
689        let mut stats = BackupStatistics::default();
690
691        stats.total_backups = self.backups.len();
692
693        for metadata in self.backups.values() {
694            match metadata.strategy {
695                BackupStrategy::Full => stats.full_backups += 1,
696                BackupStrategy::Incremental => stats.incremental_backups += 1,
697                BackupStrategy::Differential => stats.differential_backups += 1,
698            }
699
700            stats.total_storage_bytes += metadata.size_bytes;
701            if let Some(compressed) = metadata.compressed_size_bytes {
702                stats.compressed_storage_bytes += compressed;
703            }
704
705            if stats.oldest_backup.is_none() || Some(metadata.created_at) < stats.oldest_backup {
706                stats.oldest_backup = Some(metadata.created_at);
707            }
708
709            if stats.newest_backup.is_none() || Some(metadata.created_at) > stats.newest_backup {
710                stats.newest_backup = Some(metadata.created_at);
711            }
712        }
713
714        if stats.total_storage_bytes > 0 {
715            stats.compression_ratio =
716                stats.compressed_storage_bytes as f64 / stats.total_storage_bytes as f64;
717        }
718
719        self.statistics = stats;
720    }
721
722    fn apply_gfs_retention(
723        &self,
724        _daily: u32,
725        _weekly: u32,
726        _monthly: u32,
727        _to_delete: &mut Vec<String>,
728    ) {
729        // Mock GFS implementation (in production, implement proper GFS logic)
730    }
731}
732
733#[cfg(test)]
734mod tests {
735    use super::*;
736
737    fn create_test_config() -> BackupConfig {
738        BackupConfig {
739            destination: std::env::temp_dir().join("backups"),
740            strategy: BackupStrategy::Full,
741            compression: true,
742            encryption: false,
743            retention: RetentionPolicy::KeepLast(5),
744        }
745    }
746
747    #[test]
748    fn test_backup_manager_creation() {
749        let config = create_test_config();
750        let manager = BackupManager::new(config);
751        let stats = manager.get_statistics();
752        assert_eq!(stats.total_backups, 0);
753    }
754
755    #[test]
756    fn test_create_backup() {
757        let config = create_test_config();
758        let mut manager = BackupManager::new(config);
759
760        let data = b"test package data";
761        let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
762
763        assert!(!backup_id.is_empty());
764        let stats = manager.get_statistics();
765        assert_eq!(stats.total_backups, 1);
766        assert_eq!(stats.full_backups, 1);
767    }
768
769    #[test]
770    fn test_restore_backup() {
771        let config = create_test_config();
772        let mut manager = BackupManager::new(config);
773
774        let data = b"test package data";
775        let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
776
777        let restored = manager.restore_backup(&backup_id).unwrap();
778        assert_eq!(restored, data);
779    }
780
781    #[test]
782    fn test_list_backups() {
783        let config = create_test_config();
784        let mut manager = BackupManager::new(config);
785
786        manager.create_backup("pkg1", "1.0.0", b"data1").unwrap();
787        manager.create_backup("pkg1", "2.0.0", b"data2").unwrap();
788        manager.create_backup("pkg2", "1.0.0", b"data3").unwrap();
789
790        let backups = manager.list_backups("pkg1");
791        assert_eq!(backups.len(), 2);
792
793        let backups = manager.list_backups("pkg2");
794        assert_eq!(backups.len(), 1);
795    }
796
797    #[test]
798    fn test_verify_backup() {
799        let config = create_test_config();
800        let mut manager = BackupManager::new(config);
801
802        let data = b"test package data";
803        let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
804
805        let result = manager.verify_backup(&backup_id);
806        assert!(result.success);
807        assert!(result.readable);
808    }
809
810    #[test]
811    fn test_delete_backup() {
812        let config = create_test_config();
813        let mut manager = BackupManager::new(config);
814
815        let backup_id = manager.create_backup("test-pkg", "1.0.0", b"data").unwrap();
816
817        assert_eq!(manager.get_statistics().total_backups, 1);
818
819        manager.delete_backup(&backup_id).unwrap();
820        assert_eq!(manager.get_statistics().total_backups, 0);
821    }
822
823    #[test]
824    fn test_retention_policy_keep_last() {
825        let mut config = create_test_config();
826        config.retention = RetentionPolicy::KeepLast(3);
827        let mut manager = BackupManager::new(config);
828
829        // Create 5 backups
830        for i in 0..5 {
831            manager
832                .create_backup("test-pkg", "1.0.0", format!("data{}", i).as_bytes())
833                .unwrap();
834        }
835
836        // Should only keep last 3
837        assert_eq!(manager.get_statistics().total_backups, 3);
838    }
839
840    #[test]
841    fn test_incremental_backup() {
842        let mut config = create_test_config();
843        config.strategy = BackupStrategy::Incremental;
844        let mut manager = BackupManager::new(config);
845
846        // Create full backup first
847        let mut config2 = create_test_config();
848        config2.strategy = BackupStrategy::Full;
849        let mut manager2 = BackupManager::new(config2);
850        let full_id = manager2
851            .create_backup("test-pkg", "1.0.0", b"base data")
852            .unwrap();
853
854        // Copy to incremental manager
855        if let Some(metadata) = manager2.backups.get(&full_id) {
856            manager.backups.insert(full_id.clone(), metadata.clone());
857        }
858
859        // Create incremental backup
860        let inc_id = manager
861            .create_backup("test-pkg", "1.0.0", b"delta data")
862            .unwrap();
863
864        let metadata = manager.backups.get(&inc_id).unwrap();
865        assert_eq!(metadata.strategy, BackupStrategy::Incremental);
866        assert!(metadata.parent_backup_id.is_some());
867    }
868
869    #[test]
870    fn test_create_recovery_point() {
871        let config = create_test_config();
872        let mut manager = BackupManager::new(config);
873
874        manager.create_backup("test-pkg", "1.0.0", b"data").unwrap();
875
876        let rp_id = manager
877            .create_recovery_point("test-pkg", "1.0.0", "Before update".to_string())
878            .unwrap();
879
880        assert!(!rp_id.is_empty());
881        assert_eq!(manager.recovery_points.len(), 1);
882    }
883
884    #[test]
885    fn test_backup_statistics() {
886        let config = create_test_config();
887        let mut manager = BackupManager::new(config);
888
889        let data = b"test data with some content";
890        manager.create_backup("pkg1", "1.0.0", data).unwrap();
891        manager.create_backup("pkg2", "1.0.0", data).unwrap();
892
893        let stats = manager.get_statistics();
894        assert_eq!(stats.total_backups, 2);
895        assert_eq!(stats.full_backups, 2);
896        assert!(stats.total_storage_bytes > 0);
897        assert!(stats.newest_backup.is_some());
898        assert!(stats.oldest_backup.is_some());
899    }
900}