use crate::config::BackupConfig;
use crate::error::{UpgradeError, UpgradeResult};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use sublime_standard_tools::filesystem::AsyncFileSystem;
#[derive(Debug, Clone)]
pub struct BackupManager<F: AsyncFileSystem> {
workspace_root: PathBuf,
config: BackupConfig,
fs: F,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct BackupMetadata {
pub id: String,
pub created_at: DateTime<Utc>,
pub operation: String,
pub files: Vec<PathBuf>,
pub success: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)]
struct BackupMetadataCollection {
backups: Vec<BackupMetadata>,
}
impl<F: AsyncFileSystem> BackupManager<F> {
#[must_use]
pub fn new(workspace_root: PathBuf, config: BackupConfig, fs: F) -> Self {
Self { workspace_root, config, fs }
}
pub async fn create_backup(&self, files: &[PathBuf], operation: &str) -> UpgradeResult<String> {
if !self.config.enabled {
return Err(UpgradeError::BackupFailed {
path: self.backup_dir(),
reason: "Backups are disabled in configuration".to_string(),
});
}
let backup_id = self.generate_backup_id(operation);
let backup_path = self.normalize_fs_path(&self.backup_path(&backup_id));
self.fs.create_dir_all(&backup_path).await.map_err(|e| UpgradeError::BackupFailed {
path: backup_path.clone(),
reason: format!("Failed to create backup directory: {}", e),
})?;
let mut backed_up_files = Vec::new();
for file in files {
let absolute_path = self.normalize_fs_path(&self.resolve_path(file));
let exists = self.fs.exists(&absolute_path).await;
if !exists {
return Err(UpgradeError::FileSystemError {
path: absolute_path,
reason: "File does not exist".to_string(),
});
}
let relative = if absolute_path.is_absolute() {
let normalize = |p: &Path| -> PathBuf {
#[cfg(windows)]
{
PathBuf::from(p.to_string_lossy().replace('\\', "/"))
}
#[cfg(not(windows))]
{
p.to_path_buf()
}
};
let normalized_workspace = normalize(&self.workspace_root);
let normalized_file = normalize(&absolute_path);
match normalized_file.strip_prefix(&normalized_workspace) {
Ok(rel) => rel.to_path_buf(),
Err(_) => {
let canonical_workspace = self
.workspace_root
.canonicalize()
.unwrap_or_else(|_| self.workspace_root.clone());
let canonical_file =
absolute_path.canonicalize().unwrap_or_else(|_| absolute_path.clone());
let normalized_canonical_workspace = normalize(&canonical_workspace);
let normalized_canonical_file = normalize(&canonical_file);
normalized_canonical_file
.strip_prefix(&normalized_canonical_workspace)
.map_err(|_| UpgradeError::FileSystemError {
path: absolute_path.clone(),
reason: format!(
"File is not within workspace. File: {:?}, Workspace: {:?}",
normalized_canonical_file, normalized_canonical_workspace
),
})?
.to_path_buf()
}
}
} else {
absolute_path.clone()
};
let target = self.normalize_fs_path(&backup_path.join(&relative));
if let Some(parent) = target.parent() {
self.fs.create_dir_all(parent).await.map_err(|e| UpgradeError::BackupFailed {
path: parent.to_path_buf(),
reason: format!("Failed to create parent directory: {}", e),
})?;
}
let content = self.fs.read_file(&absolute_path).await.map_err(|e| {
UpgradeError::BackupFailed {
path: absolute_path.clone(),
reason: format!("Failed to read file: {}", e),
}
})?;
self.fs.write_file(&target, &content).await.map_err(|e| {
UpgradeError::BackupFailed {
path: target.clone(),
reason: format!("Failed to write backup file: {}", e),
}
})?;
backed_up_files.push(absolute_path);
}
let metadata = BackupMetadata {
id: backup_id.clone(),
created_at: Utc::now(),
operation: operation.to_string(),
files: backed_up_files,
success: false, };
self.add_backup_metadata(metadata).await?;
self.cleanup_old_backups().await?;
Ok(backup_id)
}
pub async fn restore_backup(&self, backup_id: &str) -> UpgradeResult<()> {
let backup_path = self.normalize_fs_path(&self.backup_path(backup_id));
let exists = self.fs.exists(&backup_path).await;
if !exists {
return Err(UpgradeError::NoBackup { path: backup_path });
}
let metadata = self.get_backup_metadata(backup_id).await?;
for file_path in &metadata.files {
let relative = file_path.strip_prefix(&self.workspace_root).map_err(|_| {
UpgradeError::RollbackFailed {
reason: format!("File path not within workspace: {}", file_path.display()),
}
})?;
let backup_file = self.normalize_fs_path(&backup_path.join(relative));
let target_file = self.normalize_fs_path(file_path);
if let Some(parent) = target_file.parent() {
let parent_normalized = self.normalize_fs_path(parent);
self.fs.create_dir_all(&parent_normalized).await.map_err(|e| {
UpgradeError::RollbackFailed {
reason: format!("Failed to create parent directory: {}", e),
}
})?;
}
let content = self.fs.read_file(&backup_file).await.map_err(|e| {
UpgradeError::RollbackFailed {
reason: format!("Failed to read backup file {}: {}", backup_file.display(), e),
}
})?;
self.fs.write_file(&target_file, &content).await.map_err(|e| {
UpgradeError::RollbackFailed {
reason: format!("Failed to restore file {}: {}", target_file.display(), e),
}
})?;
}
Ok(())
}
pub async fn restore_last_backup(&self) -> UpgradeResult<()> {
let backups = self.list_backups().await?;
if backups.is_empty() {
return Err(UpgradeError::NoBackup { path: self.backup_dir() });
}
let last_backup = &backups[0]; self.restore_backup(&last_backup.id).await
}
pub async fn list_backups(&self) -> UpgradeResult<Vec<BackupMetadata>> {
let collection = self.load_metadata_collection().await?;
Ok(collection.backups)
}
pub async fn delete_backup(&self, backup_id: &str) -> UpgradeResult<()> {
let backup_path = self.normalize_fs_path(&self.backup_path(backup_id));
let exists = self.fs.exists(&backup_path).await;
if !exists {
return Err(UpgradeError::NoBackup { path: backup_path });
}
self.fs.remove(&backup_path).await.map_err(|e| UpgradeError::FileSystemError {
path: backup_path.clone(),
reason: format!("Failed to delete backup: {}", e),
})?;
self.remove_backup_metadata(backup_id).await?;
Ok(())
}
pub async fn mark_success(&self, backup_id: &str) -> UpgradeResult<()> {
let mut collection = self.load_metadata_collection().await?;
let backup = collection
.backups
.iter_mut()
.find(|b| b.id == backup_id)
.ok_or_else(|| UpgradeError::NoBackup { path: self.backup_path(backup_id) })?;
backup.success = true;
self.save_metadata_collection(&collection).await?;
Ok(())
}
pub async fn cleanup_old_backups(&self) -> UpgradeResult<()> {
let mut collection = self.load_metadata_collection().await?;
let mut to_remove_all: Vec<String> = Vec::new();
if !self.config.keep_after_success {
let to_remove: Vec<String> =
collection.backups.iter().filter(|b| b.success).map(|b| b.id.clone()).collect();
to_remove_all.extend(to_remove);
}
let remaining_after_success = collection.backups.len() - to_remove_all.len();
if remaining_after_success > self.config.max_backups {
let to_remove_count = remaining_after_success - self.config.max_backups;
let to_remove: Vec<String> = collection
.backups
.iter()
.rev() .filter(|b| !to_remove_all.contains(&b.id))
.take(to_remove_count)
.map(|b| b.id.clone())
.collect();
to_remove_all.extend(to_remove);
}
for backup_id in &to_remove_all {
let backup_path = self.normalize_fs_path(&self.backup_path(backup_id));
if self.fs.exists(&backup_path).await {
let _ = self.fs.remove(&backup_path).await;
}
}
collection.backups.retain(|b| !to_remove_all.contains(&b.id));
self.save_metadata_collection(&collection).await?;
Ok(())
}
fn backup_dir(&self) -> PathBuf {
self.workspace_root.join(&self.config.backup_dir)
}
fn backup_path(&self, backup_id: &str) -> PathBuf {
self.backup_dir().join(backup_id)
}
fn metadata_path(&self) -> PathBuf {
self.backup_dir().join("metadata.json")
}
fn generate_backup_id(&self, operation: &str) -> String {
let now = Utc::now();
format!("{}-{}", now.format("%Y-%m-%dT%H-%M-%S-%3f"), operation)
}
fn resolve_path(&self, path: &Path) -> PathBuf {
if path.is_absolute() { path.to_path_buf() } else { self.workspace_root.join(path) }
}
fn normalize_fs_path(&self, path: &Path) -> PathBuf {
#[cfg(windows)]
{
PathBuf::from(path.to_string_lossy().replace('\\', "/"))
}
#[cfg(not(windows))]
{
path.to_path_buf()
}
}
async fn load_metadata_collection(&self) -> UpgradeResult<BackupMetadataCollection> {
let metadata_path = self.normalize_fs_path(&self.metadata_path());
let exists = self.fs.exists(&metadata_path).await;
if !exists {
return Ok(BackupMetadataCollection::default());
}
let content = self.fs.read_file_string(&metadata_path).await.map_err(|e| {
UpgradeError::FileSystemError {
path: metadata_path.clone(),
reason: format!("Failed to read metadata: {}", e),
}
})?;
serde_json::from_str(&content).map_err(|e| UpgradeError::BackupCorrupted {
path: metadata_path,
reason: format!("Failed to parse metadata: {}", e),
})
}
async fn save_metadata_collection(
&self,
collection: &BackupMetadataCollection,
) -> UpgradeResult<()> {
let metadata_path = self.normalize_fs_path(&self.metadata_path());
let backup_dir = self.normalize_fs_path(&self.backup_dir());
self.fs.create_dir_all(&backup_dir).await.map_err(|e| UpgradeError::BackupFailed {
path: backup_dir,
reason: format!("Failed to create backup directory: {}", e),
})?;
let content =
serde_json::to_string_pretty(collection).map_err(|e| UpgradeError::BackupFailed {
path: metadata_path.clone(),
reason: format!("Failed to serialize metadata: {}", e),
})?;
self.fs.write_file_string(&metadata_path, &content).await.map_err(|e| {
UpgradeError::BackupFailed {
path: metadata_path,
reason: format!("Failed to write metadata: {}", e),
}
})?;
Ok(())
}
async fn add_backup_metadata(&self, metadata: BackupMetadata) -> UpgradeResult<()> {
let mut collection = self.load_metadata_collection().await?;
collection.backups.insert(0, metadata);
self.save_metadata_collection(&collection).await
}
async fn remove_backup_metadata(&self, backup_id: &str) -> UpgradeResult<()> {
let mut collection = self.load_metadata_collection().await?;
collection.backups.retain(|b| b.id != backup_id);
self.save_metadata_collection(&collection).await
}
async fn get_backup_metadata(&self, backup_id: &str) -> UpgradeResult<BackupMetadata> {
let collection = self.load_metadata_collection().await?;
collection
.backups
.into_iter()
.find(|b| b.id == backup_id)
.ok_or_else(|| UpgradeError::NoBackup { path: self.backup_path(backup_id) })
}
}
#[cfg(test)]
mod tests;