#![allow(dead_code, unused_imports, unused_variables, clippy::ptr_arg)]
use super::{ApplicationVersion, UpgradeConfig};
use anyhow::Result;
use chrono::{DateTime, Utc};
use flate2::{Compression, read::GzDecoder, write::GzEncoder};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
use tar::{Archive, Builder};
use tracing::{debug, error, info, warn};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupMetadata {
pub id: String,
pub version: ApplicationVersion,
pub created_at: DateTime<Utc>,
pub backup_type: BackupType,
pub file_path: PathBuf,
pub compressed_size: u64,
pub uncompressed_size: u64,
pub checksum: String,
pub compression_method: CompressionMethod,
pub includes_config: bool,
pub includes_data: bool,
pub description: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BackupType {
Full,
ConfigOnly,
DataOnly,
Custom { paths: Vec<PathBuf> },
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CompressionMethod {
None,
Gzip,
Zstd,
}
pub struct BackupManager {
config: UpgradeConfig,
backup_dir: PathBuf,
metadata_file: PathBuf,
}
impl BackupManager {
pub fn new(config: &UpgradeConfig) -> Result<Self> {
let backup_dir = config.backup_dir.clone();
fs::create_dir_all(&backup_dir)?;
let metadata_file = backup_dir.join("backup_metadata.json");
Ok(Self {
config: config.clone(),
backup_dir,
metadata_file,
})
}
pub async fn create_backup(&self) -> Result<PathBuf> {
info!("Creating full application backup");
let backup_id = self.generate_backup_id();
let current_version = ApplicationVersion::current();
let paths_to_backup = self.get_backup_paths(BackupType::Full)?;
let backup_filename = format!(
"inferno_backup_{}_{}.tar.gz",
current_version.to_string().replace('.', "_"),
backup_id
);
let backup_path = self.backup_dir.join(&backup_filename);
let (compressed_size, uncompressed_size, checksum) = self
.create_compressed_archive(&paths_to_backup, &backup_path)
.await?;
let metadata = BackupMetadata {
id: backup_id,
version: current_version,
created_at: Utc::now(),
backup_type: BackupType::Full,
file_path: backup_path.clone(),
compressed_size,
uncompressed_size,
checksum,
compression_method: CompressionMethod::Gzip,
includes_config: true,
includes_data: true,
description: "Pre-upgrade full backup".to_string(),
};
self.save_backup_metadata(&metadata).await?;
self.cleanup_old_backups().await?;
info!("Backup created successfully: {:?}", backup_path);
Ok(backup_path)
}
pub async fn create_selective_backup(
&self,
backup_type: BackupType,
description: String,
) -> Result<PathBuf> {
info!("Creating selective backup: {:?}", backup_type);
let backup_id = self.generate_backup_id();
let current_version = ApplicationVersion::current();
let paths_to_backup = self.get_backup_paths(backup_type.clone())?;
let backup_filename = format!(
"inferno_selective_{}_{}.tar.gz",
current_version.to_string().replace('.', "_"),
backup_id
);
let backup_path = self.backup_dir.join(&backup_filename);
let (compressed_size, uncompressed_size, checksum) = self
.create_compressed_archive(&paths_to_backup, &backup_path)
.await?;
let (includes_config, includes_data) = match &backup_type {
BackupType::Full => (true, true),
BackupType::ConfigOnly => (true, false),
BackupType::DataOnly => (false, true),
BackupType::Custom { .. } => (true, true), };
let metadata = BackupMetadata {
id: backup_id,
version: current_version,
created_at: Utc::now(),
backup_type,
file_path: backup_path.clone(),
compressed_size,
uncompressed_size,
checksum,
compression_method: CompressionMethod::Gzip,
includes_config,
includes_data,
description,
};
self.save_backup_metadata(&metadata).await?;
self.cleanup_old_backups().await?;
info!("Selective backup created successfully: {:?}", backup_path);
Ok(backup_path)
}
pub async fn restore_backup(&self, backup_path: &PathBuf) -> Result<()> {
info!("Restoring from backup: {:?}", backup_path);
if !backup_path.exists() {
return Err(anyhow::anyhow!("Backup file not found: {:?}", backup_path));
}
let metadata = self.get_backup_metadata_by_path(backup_path).await?;
self.verify_backup_integrity(&metadata).await?;
let restore_point = self.create_restore_point().await?;
match self.perform_restore(&metadata).await {
Ok(_) => {
info!("Backup restored successfully");
if let Err(e) = fs::remove_file(&restore_point) {
warn!("Failed to cleanup restore point: {}", e);
}
Ok(())
}
Err(e) => {
error!("Restore failed: {}", e);
warn!("Restore point preserved at: {:?}", restore_point);
Err(e)
}
}
}
pub async fn list_backups(&self) -> Result<Vec<BackupMetadata>> {
let all_metadata = self.load_all_backup_metadata().await?;
let mut backups = all_metadata;
backups.sort_by(|a, b| b.created_at.cmp(&a.created_at));
Ok(backups)
}
pub async fn get_backup_by_id(&self, backup_id: &str) -> Result<Option<BackupMetadata>> {
let all_backups = self.list_backups().await?;
Ok(all_backups.into_iter().find(|b| b.id == backup_id))
}
pub async fn delete_backup(&self, backup_id: &str) -> Result<()> {
info!("Deleting backup: {}", backup_id);
if let Some(metadata) = self.get_backup_by_id(backup_id).await? {
if metadata.file_path.exists() {
fs::remove_file(&metadata.file_path)?;
debug!("Removed backup file: {:?}", metadata.file_path);
}
self.remove_backup_metadata(backup_id).await?;
info!("Backup deleted successfully: {}", backup_id);
} else {
warn!("Backup not found: {}", backup_id);
}
Ok(())
}
pub async fn verify_backup_integrity(&self, metadata: &BackupMetadata) -> Result<()> {
debug!("Verifying backup integrity: {}", metadata.id);
if !metadata.file_path.exists() {
return Err(anyhow::anyhow!(
"Backup file missing: {:?}",
metadata.file_path
));
}
let file_size = fs::metadata(&metadata.file_path)?.len();
if file_size != metadata.compressed_size {
return Err(anyhow::anyhow!(
"Backup file size mismatch: expected {}, got {}",
metadata.compressed_size,
file_size
));
}
let calculated_checksum = self.calculate_file_checksum(&metadata.file_path).await?;
if calculated_checksum != metadata.checksum {
return Err(anyhow::anyhow!(
"Backup checksum mismatch: expected {}, got {}",
metadata.checksum,
calculated_checksum
));
}
match metadata.compression_method {
CompressionMethod::Gzip => {
let file = File::open(&metadata.file_path)?;
let decoder = GzDecoder::new(file);
let mut archive = Archive::new(decoder);
for entry in archive.entries()? {
let entry = entry?;
debug!("Archive entry: {:?}", entry.path()?);
}
}
CompressionMethod::None => {
let file = File::open(&metadata.file_path)?;
let mut archive = Archive::new(file);
for entry in archive.entries()? {
let entry = entry?;
debug!("Archive entry: {:?}", entry.path()?);
}
}
CompressionMethod::Zstd => {
warn!("Zstd verification not yet implemented");
}
}
debug!("Backup integrity verification passed");
Ok(())
}
pub async fn get_storage_stats(&self) -> Result<BackupStorageStats> {
let backups = self.list_backups().await?;
let total_backups = backups.len();
let total_size = backups.iter().map(|b| b.compressed_size).sum();
let oldest_backup = backups.iter().map(|b| b.created_at).min();
let newest_backup = backups.iter().map(|b| b.created_at).max();
Ok(BackupStorageStats {
total_backups,
total_size_bytes: total_size,
oldest_backup,
newest_backup,
backup_dir: self.backup_dir.clone(),
})
}
async fn cleanup_old_backups(&self) -> Result<()> {
let backups = self.list_backups().await?;
if backups.len() <= self.config.max_backups as usize {
return Ok(());
}
let mut sorted_backups = backups;
sorted_backups.sort_by(|a, b| a.created_at.cmp(&b.created_at));
let total_backups = sorted_backups.len();
if total_backups > self.config.max_backups as usize {
let to_delete: Vec<_> = sorted_backups
.into_iter()
.take(total_backups - self.config.max_backups as usize)
.collect();
for backup in to_delete {
info!(
"Cleaning up old backup: {} ({})",
backup.id, backup.created_at
);
if let Err(e) = self.delete_backup(&backup.id).await {
warn!("Failed to delete old backup {}: {}", backup.id, e);
}
}
}
Ok(())
}
fn generate_backup_id(&self) -> String {
use uuid::Uuid;
Uuid::new_v4().to_string()[..8].to_string()
}
fn get_backup_paths(&self, backup_type: BackupType) -> Result<Vec<PathBuf>> {
let current_exe = std::env::current_exe()?;
let app_dir = current_exe
.parent()
.ok_or_else(|| anyhow::anyhow!("Cannot determine application directory"))?;
match backup_type {
BackupType::Full => {
let mut paths = vec![current_exe.clone()];
if let Some(home) = dirs::home_dir() {
let config_paths = vec![
home.join(".inferno.toml"),
home.join(".config/inferno/config.toml"),
];
for path in config_paths {
if path.exists() {
paths.push(path);
}
}
}
if let Some(data_dir) = &self.config.download_dir.parent() {
if data_dir.exists() {
paths.push(data_dir.to_path_buf());
}
}
Ok(paths)
}
BackupType::ConfigOnly => {
let mut paths = vec![];
if let Some(home) = dirs::home_dir() {
let config_paths = vec![
home.join(".inferno.toml"),
home.join(".config/inferno/config.toml"),
];
for path in config_paths {
if path.exists() {
paths.push(path);
}
}
}
Ok(paths)
}
BackupType::DataOnly => {
let mut paths = vec![];
if let Some(data_dir) = &self.config.download_dir.parent() {
if data_dir.exists() {
paths.push(data_dir.to_path_buf());
}
}
Ok(paths)
}
BackupType::Custom { paths } => Ok(paths),
}
}
async fn create_compressed_archive(
&self,
paths: &[PathBuf],
output_path: &PathBuf,
) -> Result<(u64, u64, String)> {
let file = File::create(output_path)?;
let encoder = GzEncoder::new(file, Compression::default());
let mut archive = Builder::new(encoder);
let mut uncompressed_size = 0u64;
for path in paths {
if path.is_file() {
debug!("Adding file to backup: {:?}", path);
let file_size = fs::metadata(path)?.len();
uncompressed_size += file_size;
let relative_path = path
.file_name()
.ok_or_else(|| anyhow::anyhow!("Invalid file path: {:?}", path))?;
archive.append_file(relative_path, &mut File::open(path)?)?;
} else if path.is_dir() {
debug!("Adding directory to backup: {:?}", path);
self.add_directory_to_archive(&mut archive, path, &mut uncompressed_size)?;
}
}
archive.finish()?;
let compressed_size = fs::metadata(output_path)?.len();
let checksum = self.calculate_file_checksum(output_path).await?;
Ok((compressed_size, uncompressed_size, checksum))
}
fn add_directory_to_archive(
&self,
archive: &mut Builder<GzEncoder<File>>,
dir_path: &Path,
uncompressed_size: &mut u64,
) -> Result<()> {
for entry in fs::read_dir(dir_path)? {
let entry = entry?;
let path = entry.path();
if path.is_file() {
let file_size = fs::metadata(&path)?.len();
*uncompressed_size += file_size;
let relative_path = path.strip_prefix(dir_path.parent().unwrap_or(dir_path))?;
archive.append_file(relative_path, &mut File::open(&path)?)?;
} else if path.is_dir() {
self.add_directory_to_archive(archive, &path, uncompressed_size)?;
}
}
Ok(())
}
async fn perform_restore(&self, metadata: &BackupMetadata) -> Result<()> {
info!("Performing restore from backup: {}", metadata.id);
let file = File::open(&metadata.file_path)?;
match metadata.compression_method {
CompressionMethod::Gzip => {
let decoder = GzDecoder::new(file);
let mut archive = Archive::new(decoder);
let temp_dir = tempfile::TempDir::new()?;
archive.unpack(temp_dir.path())?;
self.move_restored_files(temp_dir.path()).await?;
}
CompressionMethod::None => {
let mut archive = Archive::new(file);
let temp_dir = tempfile::TempDir::new()?;
archive.unpack(temp_dir.path())?;
self.move_restored_files(temp_dir.path()).await?;
}
CompressionMethod::Zstd => {
return Err(anyhow::anyhow!("Zstd decompression not yet implemented"));
}
}
Ok(())
}
async fn move_restored_files(&self, temp_dir: &Path) -> Result<()> {
for entry in fs::read_dir(temp_dir)? {
let entry = entry?;
let source_path = entry.path();
if let Some(filename) = source_path.file_name() {
if filename == "inferno" || filename == "inferno.exe" {
let current_exe = std::env::current_exe()?;
#[cfg(target_os = "windows")]
{
let backup_exe = current_exe.with_extension("exe.old");
fs::rename(¤t_exe, &backup_exe)?;
}
fs::copy(&source_path, ¤t_exe)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(¤t_exe)?.permissions();
perms.set_mode(0o755);
fs::set_permissions(¤t_exe, perms)?;
}
} else if filename.to_string_lossy().contains("config") {
if let Some(home) = dirs::home_dir() {
let config_path = home.join(".inferno.toml");
fs::copy(&source_path, &config_path)?;
}
}
}
}
Ok(())
}
async fn create_restore_point(&self) -> Result<PathBuf> {
let restore_point_path = self.backup_dir.join(format!(
"restore_point_{}.tar.gz",
chrono::Utc::now().timestamp()
));
let current_exe = std::env::current_exe()?;
let paths = vec![current_exe];
let (_, _, _) = self
.create_compressed_archive(&paths, &restore_point_path)
.await?;
Ok(restore_point_path)
}
async fn calculate_file_checksum(&self, file_path: &PathBuf) -> Result<String> {
let file_path = file_path.clone();
tokio::task::spawn_blocking(move || {
let mut file = File::open(&file_path)?;
let mut hasher = Sha256::new();
let mut buffer = [0; 8192];
loop {
let bytes_read = file.read(&mut buffer)?;
if bytes_read == 0 {
break;
}
hasher.update(&buffer[..bytes_read]);
}
let hash = hasher.finalize();
Ok(format!("{:x}", hash))
})
.await?
}
async fn save_backup_metadata(&self, metadata: &BackupMetadata) -> Result<()> {
let mut all_metadata = self.load_all_backup_metadata().await.unwrap_or_default();
all_metadata.push(metadata.clone());
let json_data = serde_json::to_string_pretty(&all_metadata)?;
fs::write(&self.metadata_file, json_data)?;
Ok(())
}
async fn load_all_backup_metadata(&self) -> Result<Vec<BackupMetadata>> {
if !self.metadata_file.exists() {
return Ok(vec![]);
}
let json_data = fs::read_to_string(&self.metadata_file)?;
let metadata: Vec<BackupMetadata> = serde_json::from_str(&json_data)?;
Ok(metadata)
}
async fn get_backup_metadata_by_path(&self, backup_path: &PathBuf) -> Result<BackupMetadata> {
let all_metadata = self.load_all_backup_metadata().await?;
all_metadata
.into_iter()
.find(|m| m.file_path == *backup_path)
.ok_or_else(|| anyhow::anyhow!("Backup metadata not found for path: {:?}", backup_path))
}
async fn remove_backup_metadata(&self, backup_id: &str) -> Result<()> {
let mut all_metadata = self.load_all_backup_metadata().await?;
all_metadata.retain(|m| m.id != backup_id);
let json_data = serde_json::to_string_pretty(&all_metadata)?;
fs::write(&self.metadata_file, json_data)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct BackupStorageStats {
pub total_backups: usize,
pub total_size_bytes: u64,
pub oldest_backup: Option<DateTime<Utc>>,
pub newest_backup: Option<DateTime<Utc>>,
pub backup_dir: PathBuf,
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn create_test_config() -> UpgradeConfig {
let temp_dir = TempDir::new().unwrap();
UpgradeConfig {
backup_dir: temp_dir.path().to_path_buf(),
max_backups: 3,
..Default::default()
}
}
#[tokio::test]
async fn test_backup_manager_creation() {
let config = create_test_config();
let manager = BackupManager::new(&config);
assert!(manager.is_ok());
}
#[test]
fn test_backup_id_generation() {
let config = create_test_config();
let manager = BackupManager::new(&config).unwrap();
let id1 = manager.generate_backup_id();
let id2 = manager.generate_backup_id();
assert_ne!(id1, id2);
assert_eq!(id1.len(), 8);
}
#[tokio::test]
async fn test_backup_paths() {
let config = create_test_config();
let manager = BackupManager::new(&config).unwrap();
let full_paths = manager.get_backup_paths(BackupType::Full).unwrap();
assert!(!full_paths.is_empty());
let config_paths = manager.get_backup_paths(BackupType::ConfigOnly).unwrap();
println!("Config paths: {:?}", config_paths);
}
#[tokio::test]
async fn test_storage_stats() {
let config = create_test_config();
let manager = BackupManager::new(&config).unwrap();
let stats = manager.get_storage_stats().await.unwrap();
assert_eq!(stats.total_backups, 0);
assert_eq!(stats.total_size_bytes, 0);
}
}