1use chrono::{DateTime, Duration as ChronoDuration, Utc};
44use serde::{Deserialize, Serialize};
45use sha2::{Digest, Sha256};
46use std::collections::HashMap;
47use std::path::PathBuf;
48use torsh_core::error::TorshError;
49
50#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
52pub enum BackupStrategy {
53 Full,
55 Incremental,
57 Differential,
59}
60
61#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
63pub enum RetentionPolicy {
64 KeepDays(u32),
66 KeepLast(usize),
68 KeepAll,
70 Custom {
72 daily: u32,
74 weekly: u32,
76 monthly: u32,
78 },
79}
80
81#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
83pub enum BackupDestination {
84 Local(PathBuf),
86 S3 {
88 bucket: String,
90 region: String,
92 path: String,
94 },
95 Gcs {
97 bucket: String,
99 path: String,
101 },
102 Azure {
104 container: String,
106 path: String,
108 },
109}
110
111#[derive(Debug, Clone, Serialize, Deserialize)]
113pub struct BackupConfig {
114 pub destination: PathBuf,
116 pub strategy: BackupStrategy,
118 pub compression: bool,
120 pub encryption: bool,
122 pub retention: RetentionPolicy,
124}
125
126#[derive(Debug, Clone, Serialize, Deserialize)]
128pub struct BackupMetadata {
129 pub backup_id: String,
131 pub package_id: String,
133 pub version: String,
135 pub strategy: BackupStrategy,
137 pub created_at: DateTime<Utc>,
139 pub size_bytes: u64,
141 pub compressed_size_bytes: Option<u64>,
143 pub checksum: String,
145 pub parent_backup_id: Option<String>,
147 pub compressed: bool,
149 pub encrypted: bool,
151 pub metadata: HashMap<String, String>,
153}
154
155#[derive(Debug, Clone, Serialize, Deserialize)]
157pub struct VerificationResult {
158 pub backup_id: String,
160 pub success: bool,
162 pub checksum_valid: bool,
164 pub readable: bool,
166 pub size_valid: bool,
168 pub errors: Vec<String>,
170 pub verified_at: DateTime<Utc>,
172}
173
174#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct RecoveryPoint {
177 pub id: String,
179 pub package_id: String,
181 pub version: String,
183 pub timestamp: DateTime<Utc>,
185 pub backup_chain: Vec<String>,
187 pub description: String,
189}
190
191#[derive(Debug, Clone, Default, Serialize, Deserialize)]
193pub struct BackupStatistics {
194 pub total_backups: usize,
196 pub full_backups: usize,
198 pub incremental_backups: usize,
200 pub differential_backups: usize,
202 pub total_storage_bytes: u64,
204 pub compressed_storage_bytes: u64,
206 pub compression_ratio: f64,
208 pub oldest_backup: Option<DateTime<Utc>>,
210 pub newest_backup: Option<DateTime<Utc>>,
212 pub failed_backups: usize,
214}
215
216pub struct BackupManager {
221 config: BackupConfig,
223 backups: HashMap<String, BackupMetadata>,
225 recovery_points: Vec<RecoveryPoint>,
227 statistics: BackupStatistics,
229 backup_data: HashMap<String, Vec<u8>>,
231}
232
233impl BackupManager {
234 pub fn new(config: BackupConfig) -> Self {
236 Self {
237 config,
238 backups: HashMap::new(),
239 recovery_points: Vec::new(),
240 statistics: BackupStatistics::default(),
241 backup_data: HashMap::new(),
242 }
243 }
244
245 pub fn create_backup(
247 &mut self,
248 package_id: &str,
249 version: &str,
250 data: &[u8],
251 ) -> Result<String, TorshError> {
252 let backup_id = self.generate_backup_id(package_id, version);
253 let created_at = Utc::now();
254
255 let checksum = self.calculate_checksum(data);
257
258 let parent_backup_id = match self.config.strategy {
260 BackupStrategy::Full => None,
261 BackupStrategy::Incremental => self.get_last_backup_id(package_id, version),
262 BackupStrategy::Differential => self.get_last_full_backup_id(package_id, version),
263 };
264
265 let (final_data, compressed_size) = if self.config.compression {
267 let compressed = self.compress_data(data)?;
268 let size = compressed.len() as u64;
269 (compressed, Some(size))
270 } else {
271 (data.to_vec(), None)
272 };
273
274 let final_data = if self.config.encryption {
276 self.encrypt_data(&final_data)?
277 } else {
278 final_data
279 };
280
281 self.store_backup(&backup_id, &final_data)?;
283
284 let metadata = BackupMetadata {
286 backup_id: backup_id.clone(),
287 package_id: package_id.to_string(),
288 version: version.to_string(),
289 strategy: self.config.strategy,
290 created_at,
291 size_bytes: data.len() as u64,
292 compressed_size_bytes: compressed_size,
293 checksum,
294 parent_backup_id,
295 compressed: self.config.compression,
296 encrypted: self.config.encryption,
297 metadata: HashMap::new(),
298 };
299
300 self.backups.insert(backup_id.clone(), metadata);
301
302 self.update_statistics_after_backup();
304
305 self.apply_retention_policy()?;
307
308 Ok(backup_id)
309 }
310
311 pub fn restore_backup(&self, backup_id: &str) -> Result<Vec<u8>, TorshError> {
313 let metadata = self.backups.get(backup_id).ok_or_else(|| {
314 TorshError::InvalidArgument(format!("Backup {} not found", backup_id))
315 })?;
316
317 let mut data = self.load_backup(backup_id)?;
319
320 if metadata.encrypted {
322 data = self.decrypt_data(&data)?;
323 }
324
325 if metadata.compressed {
327 data = self.decompress_data(&data)?;
328 }
329
330 let checksum = self.calculate_checksum(&data);
332 if checksum != metadata.checksum {
333 return Err(TorshError::RuntimeError(
334 "Backup checksum mismatch".to_string(),
335 ));
336 }
337
338 if let Some(parent_id) = &metadata.parent_backup_id {
340 let parent_data = self.restore_backup(parent_id)?;
341 data = self.merge_backup_data(&parent_data, &data)?;
342 }
343
344 Ok(data)
345 }
346
347 pub fn verify_backup(&self, backup_id: &str) -> VerificationResult {
349 let metadata = match self.backups.get(backup_id) {
350 Some(m) => m,
351 None => {
352 return VerificationResult {
353 backup_id: backup_id.to_string(),
354 success: false,
355 checksum_valid: false,
356 readable: false,
357 size_valid: false,
358 errors: vec!["Backup not found".to_string()],
359 verified_at: Utc::now(),
360 }
361 }
362 };
363
364 let mut errors = Vec::new();
365 let mut checksum_valid = false;
366 let mut readable = false;
367 let mut size_valid = false;
368
369 match self.load_backup(backup_id) {
371 Ok(data) => {
372 readable = true;
373
374 let expected_size = if metadata.compressed {
376 metadata
377 .compressed_size_bytes
378 .unwrap_or(metadata.size_bytes)
379 } else {
380 metadata.size_bytes
381 };
382
383 if data.len() as u64 == expected_size {
384 size_valid = true;
385 } else {
386 errors.push(format!(
387 "Size mismatch: expected {}, got {}",
388 expected_size,
389 data.len()
390 ));
391 }
392
393 match self.restore_backup(backup_id) {
395 Ok(restored) => {
396 let checksum = self.calculate_checksum(&restored);
397 if checksum == metadata.checksum {
398 checksum_valid = true;
399 } else {
400 errors.push("Checksum mismatch".to_string());
401 }
402 }
403 Err(e) => {
404 errors.push(format!("Restoration failed: {}", e));
405 }
406 }
407 }
408 Err(e) => {
409 errors.push(format!("Failed to load backup: {}", e));
410 }
411 }
412
413 let success = errors.is_empty();
414
415 VerificationResult {
416 backup_id: backup_id.to_string(),
417 success,
418 checksum_valid,
419 readable,
420 size_valid,
421 errors,
422 verified_at: Utc::now(),
423 }
424 }
425
426 pub fn create_recovery_point(
428 &mut self,
429 package_id: &str,
430 version: &str,
431 description: String,
432 ) -> Result<String, TorshError> {
433 let id = uuid::Uuid::new_v4().to_string();
434
435 let backup_chain = self.build_backup_chain(package_id, version)?;
437
438 let recovery_point = RecoveryPoint {
439 id: id.clone(),
440 package_id: package_id.to_string(),
441 version: version.to_string(),
442 timestamp: Utc::now(),
443 backup_chain,
444 description,
445 };
446
447 self.recovery_points.push(recovery_point);
448
449 Ok(id)
450 }
451
452 pub fn restore_to_recovery_point(
454 &self,
455 recovery_point_id: &str,
456 ) -> Result<Vec<u8>, TorshError> {
457 let recovery_point = self
458 .recovery_points
459 .iter()
460 .find(|rp| rp.id == recovery_point_id)
461 .ok_or_else(|| {
462 TorshError::InvalidArgument(format!(
463 "Recovery point {} not found",
464 recovery_point_id
465 ))
466 })?;
467
468 if let Some(last_backup) = recovery_point.backup_chain.last() {
470 self.restore_backup(last_backup)
471 } else {
472 Err(TorshError::InvalidArgument(
473 "Recovery point has no backups".to_string(),
474 ))
475 }
476 }
477
478 pub fn list_backups(&self, package_id: &str) -> Vec<&BackupMetadata> {
480 self.backups
481 .values()
482 .filter(|m| m.package_id == package_id)
483 .collect()
484 }
485
486 pub fn get_statistics(&self) -> &BackupStatistics {
488 &self.statistics
489 }
490
491 pub fn delete_backup(&mut self, backup_id: &str) -> Result<(), TorshError> {
493 self.backups.remove(backup_id).ok_or_else(|| {
494 TorshError::InvalidArgument(format!("Backup {} not found", backup_id))
495 })?;
496
497 self.backup_data.remove(backup_id);
499
500 self.update_statistics();
502
503 Ok(())
504 }
505
506 pub fn apply_retention_policy(&mut self) -> Result<(), TorshError> {
508 let now = Utc::now();
509 let mut to_delete = Vec::new();
510
511 match self.config.retention {
512 RetentionPolicy::KeepDays(days) => {
513 let cutoff = now - ChronoDuration::days(days as i64);
514 for (id, metadata) in &self.backups {
515 if metadata.created_at < cutoff {
516 to_delete.push(id.clone());
517 }
518 }
519 }
520 RetentionPolicy::KeepLast(count) => {
521 let mut by_package: HashMap<String, Vec<&BackupMetadata>> = HashMap::new();
523 for metadata in self.backups.values() {
524 by_package
525 .entry(metadata.package_id.clone())
526 .or_insert_with(Vec::new)
527 .push(metadata);
528 }
529
530 for backups in by_package.values_mut() {
531 backups.sort_by(|a, b| b.created_at.cmp(&a.created_at));
533
534 for metadata in backups.iter().skip(count) {
536 to_delete.push(metadata.backup_id.clone());
537 }
538 }
539 }
540 RetentionPolicy::KeepAll => {
541 }
543 RetentionPolicy::Custom {
544 daily,
545 weekly,
546 monthly,
547 } => {
548 self.apply_gfs_retention(daily, weekly, monthly, &mut to_delete);
550 }
551 }
552
553 for backup_id in to_delete {
555 self.delete_backup(&backup_id)?;
556 }
557
558 Ok(())
559 }
560
561 fn generate_backup_id(&self, package_id: &str, version: &str) -> String {
564 format!(
566 "{}-{}-{}",
567 package_id,
568 version,
569 uuid::Uuid::new_v4().to_string()
570 )
571 }
572
573 fn calculate_checksum(&self, data: &[u8]) -> String {
574 let mut hasher = Sha256::new();
575 hasher.update(data);
576 format!("{:x}", hasher.finalize())
577 }
578
579 fn compress_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
580 use flate2::write::GzEncoder;
581 use flate2::Compression;
582 use std::io::Write;
583
584 let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
585 encoder
586 .write_all(data)
587 .map_err(|e| TorshError::RuntimeError(e.to_string()))?;
588 encoder
589 .finish()
590 .map_err(|e| TorshError::RuntimeError(e.to_string()))
591 }
592
593 fn decompress_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
594 use flate2::read::GzDecoder;
595 use std::io::Read;
596
597 let mut decoder = GzDecoder::new(data);
598 let mut decompressed = Vec::new();
599 decoder
600 .read_to_end(&mut decompressed)
601 .map_err(|e| TorshError::RuntimeError(e.to_string()))?;
602 Ok(decompressed)
603 }
604
605 fn encrypt_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
606 Ok(data.to_vec())
608 }
609
610 fn decrypt_data(&self, data: &[u8]) -> Result<Vec<u8>, TorshError> {
611 Ok(data.to_vec())
613 }
614
615 fn store_backup(&mut self, backup_id: &str, data: &[u8]) -> Result<(), TorshError> {
616 self.backup_data
618 .insert(backup_id.to_string(), data.to_vec());
619 Ok(())
620 }
621
622 fn load_backup(&self, backup_id: &str) -> Result<Vec<u8>, TorshError> {
623 self.backup_data.get(backup_id).cloned().ok_or_else(|| {
625 TorshError::InvalidArgument(format!("Backup data {} not found", backup_id))
626 })
627 }
628
629 fn merge_backup_data(&self, _base: &[u8], delta: &[u8]) -> Result<Vec<u8>, TorshError> {
630 Ok(delta.to_vec())
632 }
633
634 fn get_last_backup_id(&self, package_id: &str, version: &str) -> Option<String> {
635 self.backups
636 .values()
637 .filter(|m| m.package_id == package_id && m.version == version)
638 .max_by_key(|m| m.created_at)
639 .map(|m| m.backup_id.clone())
640 }
641
642 fn get_last_full_backup_id(&self, package_id: &str, version: &str) -> Option<String> {
643 self.backups
644 .values()
645 .filter(|m| {
646 m.package_id == package_id
647 && m.version == version
648 && m.strategy == BackupStrategy::Full
649 })
650 .max_by_key(|m| m.created_at)
651 .map(|m| m.backup_id.clone())
652 }
653
654 fn build_backup_chain(
655 &self,
656 package_id: &str,
657 version: &str,
658 ) -> Result<Vec<String>, TorshError> {
659 let mut chain = Vec::new();
660
661 if let Some(latest) = self
663 .backups
664 .values()
665 .filter(|m| m.package_id == package_id && m.version == version)
666 .max_by_key(|m| m.created_at)
667 {
668 chain.push(latest.backup_id.clone());
669
670 let mut current = latest;
672 while let Some(parent_id) = ¤t.parent_backup_id {
673 chain.push(parent_id.clone());
674 current = self.backups.get(parent_id).ok_or_else(|| {
675 TorshError::InvalidArgument(format!("Parent backup {} not found", parent_id))
676 })?;
677 }
678 }
679
680 chain.reverse();
681 Ok(chain)
682 }
683
684 fn update_statistics_after_backup(&mut self) {
685 self.update_statistics();
686 }
687
688 fn update_statistics(&mut self) {
689 let mut stats = BackupStatistics::default();
690
691 stats.total_backups = self.backups.len();
692
693 for metadata in self.backups.values() {
694 match metadata.strategy {
695 BackupStrategy::Full => stats.full_backups += 1,
696 BackupStrategy::Incremental => stats.incremental_backups += 1,
697 BackupStrategy::Differential => stats.differential_backups += 1,
698 }
699
700 stats.total_storage_bytes += metadata.size_bytes;
701 if let Some(compressed) = metadata.compressed_size_bytes {
702 stats.compressed_storage_bytes += compressed;
703 }
704
705 if stats.oldest_backup.is_none() || Some(metadata.created_at) < stats.oldest_backup {
706 stats.oldest_backup = Some(metadata.created_at);
707 }
708
709 if stats.newest_backup.is_none() || Some(metadata.created_at) > stats.newest_backup {
710 stats.newest_backup = Some(metadata.created_at);
711 }
712 }
713
714 if stats.total_storage_bytes > 0 {
715 stats.compression_ratio =
716 stats.compressed_storage_bytes as f64 / stats.total_storage_bytes as f64;
717 }
718
719 self.statistics = stats;
720 }
721
722 fn apply_gfs_retention(
723 &self,
724 _daily: u32,
725 _weekly: u32,
726 _monthly: u32,
727 _to_delete: &mut Vec<String>,
728 ) {
729 }
731}
732
733#[cfg(test)]
734mod tests {
735 use super::*;
736
737 fn create_test_config() -> BackupConfig {
738 BackupConfig {
739 destination: std::env::temp_dir().join("backups"),
740 strategy: BackupStrategy::Full,
741 compression: true,
742 encryption: false,
743 retention: RetentionPolicy::KeepLast(5),
744 }
745 }
746
747 #[test]
748 fn test_backup_manager_creation() {
749 let config = create_test_config();
750 let manager = BackupManager::new(config);
751 let stats = manager.get_statistics();
752 assert_eq!(stats.total_backups, 0);
753 }
754
755 #[test]
756 fn test_create_backup() {
757 let config = create_test_config();
758 let mut manager = BackupManager::new(config);
759
760 let data = b"test package data";
761 let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
762
763 assert!(!backup_id.is_empty());
764 let stats = manager.get_statistics();
765 assert_eq!(stats.total_backups, 1);
766 assert_eq!(stats.full_backups, 1);
767 }
768
769 #[test]
770 fn test_restore_backup() {
771 let config = create_test_config();
772 let mut manager = BackupManager::new(config);
773
774 let data = b"test package data";
775 let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
776
777 let restored = manager.restore_backup(&backup_id).unwrap();
778 assert_eq!(restored, data);
779 }
780
781 #[test]
782 fn test_list_backups() {
783 let config = create_test_config();
784 let mut manager = BackupManager::new(config);
785
786 manager.create_backup("pkg1", "1.0.0", b"data1").unwrap();
787 manager.create_backup("pkg1", "2.0.0", b"data2").unwrap();
788 manager.create_backup("pkg2", "1.0.0", b"data3").unwrap();
789
790 let backups = manager.list_backups("pkg1");
791 assert_eq!(backups.len(), 2);
792
793 let backups = manager.list_backups("pkg2");
794 assert_eq!(backups.len(), 1);
795 }
796
797 #[test]
798 fn test_verify_backup() {
799 let config = create_test_config();
800 let mut manager = BackupManager::new(config);
801
802 let data = b"test package data";
803 let backup_id = manager.create_backup("test-pkg", "1.0.0", data).unwrap();
804
805 let result = manager.verify_backup(&backup_id);
806 assert!(result.success);
807 assert!(result.readable);
808 }
809
810 #[test]
811 fn test_delete_backup() {
812 let config = create_test_config();
813 let mut manager = BackupManager::new(config);
814
815 let backup_id = manager.create_backup("test-pkg", "1.0.0", b"data").unwrap();
816
817 assert_eq!(manager.get_statistics().total_backups, 1);
818
819 manager.delete_backup(&backup_id).unwrap();
820 assert_eq!(manager.get_statistics().total_backups, 0);
821 }
822
823 #[test]
824 fn test_retention_policy_keep_last() {
825 let mut config = create_test_config();
826 config.retention = RetentionPolicy::KeepLast(3);
827 let mut manager = BackupManager::new(config);
828
829 for i in 0..5 {
831 manager
832 .create_backup("test-pkg", "1.0.0", format!("data{}", i).as_bytes())
833 .unwrap();
834 }
835
836 assert_eq!(manager.get_statistics().total_backups, 3);
838 }
839
840 #[test]
841 fn test_incremental_backup() {
842 let mut config = create_test_config();
843 config.strategy = BackupStrategy::Incremental;
844 let mut manager = BackupManager::new(config);
845
846 let mut config2 = create_test_config();
848 config2.strategy = BackupStrategy::Full;
849 let mut manager2 = BackupManager::new(config2);
850 let full_id = manager2
851 .create_backup("test-pkg", "1.0.0", b"base data")
852 .unwrap();
853
854 if let Some(metadata) = manager2.backups.get(&full_id) {
856 manager.backups.insert(full_id.clone(), metadata.clone());
857 }
858
859 let inc_id = manager
861 .create_backup("test-pkg", "1.0.0", b"delta data")
862 .unwrap();
863
864 let metadata = manager.backups.get(&inc_id).unwrap();
865 assert_eq!(metadata.strategy, BackupStrategy::Incremental);
866 assert!(metadata.parent_backup_id.is_some());
867 }
868
869 #[test]
870 fn test_create_recovery_point() {
871 let config = create_test_config();
872 let mut manager = BackupManager::new(config);
873
874 manager.create_backup("test-pkg", "1.0.0", b"data").unwrap();
875
876 let rp_id = manager
877 .create_recovery_point("test-pkg", "1.0.0", "Before update".to_string())
878 .unwrap();
879
880 assert!(!rp_id.is_empty());
881 assert_eq!(manager.recovery_points.len(), 1);
882 }
883
884 #[test]
885 fn test_backup_statistics() {
886 let config = create_test_config();
887 let mut manager = BackupManager::new(config);
888
889 let data = b"test data with some content";
890 manager.create_backup("pkg1", "1.0.0", data).unwrap();
891 manager.create_backup("pkg2", "1.0.0", data).unwrap();
892
893 let stats = manager.get_statistics();
894 assert_eq!(stats.total_backups, 2);
895 assert_eq!(stats.full_backups, 2);
896 assert!(stats.total_storage_bytes > 0);
897 assert!(stats.newest_backup.is_some());
898 assert!(stats.oldest_backup.is_some());
899 }
900}