1use crate::{
2 container::DockerManager,
3 database::{BackupRecord, BackupStatus, BackupType, Database},
4 error::DuckError,
5};
6use anyhow::Result;
7use chrono::Utc;
8use flate2::Compression;
9use flate2::read::GzDecoder;
10use flate2::write::GzEncoder;
11use std::path::{Path, PathBuf};
12use std::{fs::File, sync::Arc};
13use tar::Archive;
14use tar::Builder;
15use tracing::{debug, error, info, warn};
16use walkdir::WalkDir;
17
18#[derive(Debug, Clone)]
20pub struct BackupManager {
21 storage_dir: PathBuf,
22 database: Arc<Database>,
23 docker_manager: Arc<DockerManager>,
24}
25
26#[derive(Debug, Clone)]
28pub struct BackupOptions {
29 pub backup_type: BackupType,
31 pub service_version: String,
33 pub work_dir: PathBuf,
35 pub source_paths: Vec<PathBuf>,
37 pub compression_level: u32,
39}
40
41#[derive(Debug, Clone)]
43pub struct RestoreOptions {
44 pub target_dir: PathBuf,
46 pub force_overwrite: bool,
48}
49
50impl BackupManager {
51 pub fn new(
53 storage_dir: PathBuf,
54 database: Arc<Database>,
55 docker_manager: Arc<DockerManager>,
56 ) -> Result<Self> {
57 if !storage_dir.exists() {
58 std::fs::create_dir_all(&storage_dir)?;
59 }
60
61 Ok(Self {
62 storage_dir,
63 database,
64 docker_manager,
65 })
66 }
67
68 pub async fn create_backup(&self, options: BackupOptions) -> Result<BackupRecord> {
70 let need_backup_paths = options.source_paths;
72
73 let timestamp = Utc::now().format("%Y-%m-%d_%H-%M-%S");
75 let backup_type_str = match options.backup_type {
76 BackupType::Manual => "manual",
77 BackupType::PreUpgrade => "pre-upgrade",
78 };
79
80 let backup_filename = format!(
81 "backup_{}_v{}_{}.tar.gz",
82 backup_type_str, options.service_version, timestamp
83 );
84
85 let backup_path = self.storage_dir.join(&backup_filename);
86
87 info!("Starting to create backup: {}", backup_path.display());
88
89 match self
91 .perform_backup(&need_backup_paths, &backup_path, options.compression_level)
92 .await
93 {
94 Ok(_) => {
95 info!("Backup created successfully: {}", backup_path.display());
96
97 let record_id = self
99 .database
100 .create_backup_record(
101 backup_path.to_string_lossy().to_string(),
102 options.service_version,
103 options.backup_type,
104 BackupStatus::Completed,
105 )
106 .await?;
107
108 self.database
110 .get_backup_by_id(record_id)
111 .await?
112 .ok_or_else(|| anyhow::anyhow!("Cannot get the backup record just created"))
113 }
114 Err(e) => {
115 error!("Backup creation failed: {}", e);
116
117 self.database
119 .create_backup_record(
120 backup_path.to_string_lossy().to_string(),
121 options.service_version,
122 options.backup_type,
123 BackupStatus::Failed,
124 )
125 .await?;
126
127 Err(e)
128 }
129 }
130 }
131
132 async fn perform_backup(
138 &self,
139 source_paths: &[PathBuf],
140 backup_path: &Path,
141 compression_level: u32,
142 ) -> Result<()> {
143 if let Some(parent) = backup_path.parent() {
145 tokio::fs::create_dir_all(parent).await?;
146 }
147
148 let source_paths = source_paths.to_vec();
150 let backup_path = backup_path.to_path_buf();
151
152 tokio::task::spawn_blocking(move || {
153 let file = File::create(&backup_path)?;
154 let compression = Compression::new(compression_level);
155 let encoder = GzEncoder::new(file, compression);
156 let mut archive = Builder::new(encoder);
157
158 for source_path in &source_paths {
160 if source_path.is_file() {
161 add_file_to_archive(&mut archive, source_path, None)?;
163 } else if source_path.is_dir() {
164 let dir_name = source_path
165 .file_name()
166 .ok_or_else(|| anyhow::anyhow!("Cannot get directory name"))?
167 .to_string_lossy()
168 .to_string();
169
170 for entry in WalkDir::new(source_path) {
172 let entry = entry.map_err(|e| anyhow::anyhow!("Failed to traverse directory: {e}"))?;
173 let path = entry.path();
174
175 if path.is_file() {
176 add_file_to_archive(
177 &mut archive,
178 path,
179 Some((source_path, &dir_name)),
180 )?;
181 }
182 }
183 } else {
184 info!("File or directory does not exist, no need to backup: {}", source_path.display());
186 }
187 }
188
189 archive
190 .finish()
191 .map_err(|e| anyhow::anyhow!("Failed to finish archive: {e}"))?;
192
193 Ok::<(), anyhow::Error>(())
194 })
195 .await??;
196
197 Ok(())
198 }
199
200 pub async fn restore_data_from_backup_with_exculde(
202 &self,
203 backup_id: i64,
204 target_dir: &Path,
205 auto_start_service: bool,
206 dirs_to_exculde: &[&str],
207 ) -> Result<()> {
208 let backup_record = self
210 .database
211 .get_backup_by_id(backup_id)
212 .await?
213 .ok_or_else(|| anyhow::anyhow!("Backup record does not exist: {backup_id}"))?;
214
215 let backup_path = PathBuf::from(&backup_record.file_path);
216 if !backup_path.exists() {
217 return Err(anyhow::anyhow!("Backup file does not exist: {}", backup_path.display()));
218 }
219
220 info!("Starting intelligent data restore: {}", backup_path.display());
221 info!("Target directory: {}", target_dir.display());
222
223 info!("Stopping services...");
225 self.docker_manager.stop_services().await?;
226
227 self.clear_data_directories(target_dir, dirs_to_exculde)
229 .await?;
230
231 self.perform_restore(&backup_path, target_dir, dirs_to_exculde)
233 .await?;
234
235 if auto_start_service {
237 info!("Data restore completed, starting services...");
238 self.docker_manager.start_services().await?;
239 info!("Data restored and started successfully: {}", target_dir.display());
240 } else {
241 info!("Data restore completed, skipping service start (controlled by parent process)");
242 info!("Data restored successfully: {}", target_dir.display());
243 }
244
245 Ok(())
246 }
247
248 pub async fn restore_data_directory_only(
250 &self,
251 backup_id: i64,
252 target_dir: &Path,
253 auto_start_service: bool,
254 dirs_to_restore: &[&str],
255 ) -> Result<()> {
256 let backup_record = self
258 .database
259 .get_backup_by_id(backup_id)
260 .await?
261 .ok_or_else(|| anyhow::anyhow!("Backup record does not exist: {backup_id}"))?;
262
263 let backup_path = PathBuf::from(&backup_record.file_path);
264 if !backup_path.exists() {
265 return Err(anyhow::anyhow!("Backup file does not exist: {}", backup_path.display()));
266 }
267
268 info!("Starting data directory restore: {}", backup_path.display());
269 info!("Target directory: {}", target_dir.display());
270
271 info!("Stopping services...");
273 self.docker_manager.stop_services().await?;
274
275 self.clear_data_directory_only(target_dir).await?;
277
278 self.perform_selective_restore(&backup_path, target_dir, dirs_to_restore)
280 .await?;
281
282 if auto_start_service {
284 info!("Data directory restore completed, starting services...");
285 self.docker_manager.start_services().await?;
286 info!("Data directory restored and started successfully: {}", target_dir.display());
287 } else {
288 info!("Data directory restore completed, skipping service start (controlled by parent process)");
289 info!("Data directory restored successfully: {}", target_dir.display());
290 }
291
292 Ok(())
293 }
294
295 async fn clear_data_directories(
297 &self,
298 docker_dir: &Path,
299 dirs_to_exculde: &[&str],
300 ) -> Result<()> {
301 let mut data_dirs_to_clear: Vec<String> = vec!["data".to_string(), "app".to_string()];
302 data_dirs_to_clear.retain(|dir| !dirs_to_exculde.contains(&dir.as_str()));
304
305 for dir_name in data_dirs_to_clear.iter() {
306 let dir_path = docker_dir.join(dir_name);
307 if dir_path.exists() {
308 info!("Cleaning data directory: {}", dir_path.display());
309 self.force_remove_directory(&dir_path).await?;
310 }
311 }
312
313 info!("Data directory cleanup completed, config files preserved");
314 Ok(())
315 }
316
317 async fn force_remove_directory(&self, path: &Path) -> Result<()> {
319 if !path.exists() {
320 return Ok(());
321 }
322
323 info!("Force cleaning directory: {}", path.display());
324
325 if path.is_symlink() {
327 info!("Removing symbolic link: {}", path.display());
328 tokio::fs::remove_file(path).await?;
329 return Ok(());
330 }
331
332 let mut entries = match tokio::fs::read_dir(path).await {
334 Ok(entries) => entries,
335 Err(e) => {
336 warn!("Failed to read directory: {} - {}", path.display(), e);
337 return tokio::fs::remove_dir_all(path)
339 .await
340 .map_err(|e| anyhow::anyhow!("Failed to delete directory: {} - {}", path.display(), e));
341 }
342 };
343
344 while let Some(entry) = entries.next_entry().await? {
345 let entry_path = entry.path();
346
347 if entry_path.is_symlink() {
348 info!("Removing symbolic link: {}", entry_path.display());
349 tokio::fs::remove_file(&entry_path).await?;
350 } else if entry_path.is_dir() {
351 Box::pin(self.force_remove_directory(&entry_path)).await?;
353
354 if let Err(e) = tokio::fs::remove_dir(&entry_path).await {
356 if e.kind() != std::io::ErrorKind::NotFound {
357 warn!("Failed to remove empty directory: {} - {}", entry_path.display(), e);
358 }
359 }
360 } else {
361 if let Err(e) = tokio::fs::remove_file(&entry_path).await {
362 if e.kind() != std::io::ErrorKind::NotFound {
363 warn!("Failed to remove file: {} - {}", entry_path.display(), e);
364 }
365 }
366 }
367 }
368
369 if let Err(e) = tokio::fs::remove_dir(path).await {
371 if e.kind() != std::io::ErrorKind::NotFound {
372 warn!("Failed to remove root directory: {} - {}", path.display(), e);
373 }
374 }
375
376 Ok(())
377 }
378
379 async fn clear_data_directory_only(&self, docker_dir: &Path) -> Result<()> {
381 let data_dir = docker_dir.join("data");
382 if data_dir.exists() {
383 info!("Cleaning data directory: {}", data_dir.display());
384 tokio::fs::remove_dir_all(&data_dir).await?;
385 }
386
387 info!("Data directory cleanup completed, app directory and config files preserved");
388 Ok(())
389 }
390
391 async fn perform_selective_restore(
393 &self,
394 backup_path: &Path,
395 target_dir: &Path,
396 dirs_to_restore: &[&str],
397 ) -> Result<()> {
398 use flate2::read::GzDecoder;
399 use std::fs::File;
400 use tar::Archive;
401
402 tokio::fs::create_dir_all(target_dir).await?;
404
405 let backup_path = backup_path.to_path_buf();
406 let target_dir = target_dir.to_path_buf();
407 let dirs_to_restore: Vec<String> = dirs_to_restore.iter().map(|s| s.to_string()).collect();
408
409 tokio::task::spawn_blocking(move || {
411 let file = File::open(&backup_path)?;
412 let decoder = GzDecoder::new(file);
413 let mut archive = Archive::new(decoder);
414
415 for entry in archive.entries()? {
417 let mut entry =
418 entry.map_err(|e| DuckError::Backup(format!("Failed to read archive entry: {e}")))?;
419
420 let entry_path = entry
422 .path()
423 .map_err(|e| DuckError::Backup(format!("Failed to get entry path: {e}")))?;
424 let entry_path_str = entry_path.to_string_lossy();
425
426 let should_restore = dirs_to_restore
428 .iter()
429 .any(|dir| entry_path_str.starts_with(&format!("{dir}/")));
430
431 if should_restore {
432 let target_path = target_dir.join(&*entry_path);
434
435 if let Some(parent) = target_path.parent() {
437 std::fs::create_dir_all(parent)?;
438 }
439
440 entry.unpack(&target_path).map_err(|e| {
442 DuckError::Backup(format!("Failed to unpack file {}: {e}", target_path.display()))
443 })?;
444
445 debug!("Restoring file: {}", target_path.display());
446 }
447 }
448
449 Ok::<(), DuckError>(())
450 })
451 .await??;
452
453 Ok(())
454 }
455
456 async fn perform_restore(
458 &self,
459 backup_path: &Path,
460 target_dir: &Path,
461 dirs_to_exculde: &[&str],
462 ) -> Result<()> {
463 tokio::fs::create_dir_all(target_dir).await?;
465
466 let backup_path = backup_path.to_path_buf();
467 let target_dir = target_dir.to_path_buf();
468 let dirs_to_exclude: Vec<String> = dirs_to_exculde.iter().map(|s| s.to_string()).collect();
469
470 tokio::task::spawn_blocking(move || {
472 let file = File::open(&backup_path)?;
473 let decoder = GzDecoder::new(file);
474 let mut archive = Archive::new(decoder);
475
476 let mut debug_dirs = std::collections::HashSet::new();
477
478 for entry in archive.entries()? {
480 let mut entry =
481 entry.map_err(|e| DuckError::Backup(format!("Failed to read archive entry: {e}")))?;
482
483 let entry_path = entry
485 .path()
486 .map_err(|e| DuckError::Backup(format!("Failed to get entry path: {e}")))?;
487 let entry_path_str = entry_path.to_string_lossy();
488
489 let path_components: Vec<&str> = entry_path_str.split('/').collect();
491
492 let should_exclude = if !path_components.is_empty() {
494 let first_level_dir = path_components[0];
495 debug_dirs.insert(first_level_dir.to_string());
496
497 dirs_to_exclude
498 .iter()
499 .any(|dir| dir.as_str() == first_level_dir)
500 } else {
501 false };
503
504 if !should_exclude {
505 let target_path = target_dir.join(&*entry_path);
507
508 if let Some(parent) = target_path.parent() {
510 std::fs::create_dir_all(parent)?;
511 }
512
513 entry.unpack(&target_path).map_err(|e| {
515 DuckError::Backup(format!("Failed to unpack file {}: {e}", target_path.display()))
516 })?;
517
518 debug!("Restoring file: {}", target_path.display());
519 }
520 }
521
522 debug!("Test log, restore directories: {:?}", debug_dirs);
523
524 Ok::<(), DuckError>(())
525 })
526 .await??;
527
528 Ok(())
529 }
530
531 pub async fn list_backups(&self) -> Result<Vec<BackupRecord>> {
533 self.database.get_all_backups().await
534 }
535
536 pub async fn delete_backup(&self, backup_id: i64) -> Result<()> {
538 let backup_record = self
540 .database
541 .get_backup_by_id(backup_id)
542 .await?
543 .ok_or_else(|| DuckError::Backup(format!("Backup record does not exist: {backup_id}")))?;
544
545 let backup_path = PathBuf::from(&backup_record.file_path);
546
547 if backup_path.exists() {
549 tokio::fs::remove_file(&backup_path).await?;
550 info!("Deleting backup file: {}", backup_path.display());
551 }
552
553 self.database.delete_backup_record(backup_id).await?;
555
556 Ok(())
557 }
558
559 pub async fn migrate_storage_directory(&self, new_storage_dir: &Path) -> Result<()> {
561 if new_storage_dir == self.storage_dir {
562 return Ok(()); }
564
565 info!(
566 "Starting to migrate backup storage directory: {} -> {}",
567 self.storage_dir.display(),
568 new_storage_dir.display()
569 );
570
571 tokio::fs::create_dir_all(new_storage_dir).await?;
573
574 let backups = self.list_backups().await?;
576
577 for backup in backups {
578 let old_path = PathBuf::from(&backup.file_path);
579 if old_path.exists() {
580 let filename = old_path
581 .file_name()
582 .ok_or_else(|| DuckError::Backup("Cannot get backup filename".to_string()))?;
583 let new_path = new_storage_dir.join(filename);
584
585 tokio::fs::rename(&old_path, &new_path).await?;
587 info!(
588 "Migrating backup file: {} -> {}",
589 old_path.display(),
590 new_path.display()
591 );
592
593 self.database
595 .update_backup_file_path(backup.id, new_path.to_string_lossy().to_string())
596 .await?;
597 }
598 }
599
600 info!("Backup storage directory migration completed");
601 Ok(())
602 }
603
604 pub fn get_storage_dir(&self) -> &Path {
606 &self.storage_dir
607 }
608
609 pub async fn estimate_backup_size(&self, source_dir: &Path) -> Result<u64> {
611 let source_dir = source_dir.to_path_buf();
612
613 let total_size = tokio::task::spawn_blocking(move || {
614 let mut total = 0u64;
615
616 for entry in WalkDir::new(&source_dir).into_iter().flatten() {
617 if entry.path().is_file() {
618 if let Ok(metadata) = entry.metadata() {
619 total += metadata.len();
620 }
621 }
622 }
623
624 total
625 })
626 .await?;
627
628 Ok(total_size / 2)
630 }
631}
632
633fn add_file_to_archive(
635 archive: &mut Builder<GzEncoder<File>>,
636 file_path: &Path,
637 base_info: Option<(&Path, &str)>,
638) -> Result<()> {
639 let archive_path = if let Some((base_dir, dir_name)) = base_info {
640 let relative_path = file_path
642 .strip_prefix(base_dir)
643 .map_err(|e| DuckError::Backup(format!("Failed to calculate relative path: {e}")))?;
644
645 if cfg!(windows) {
647 format!(
648 "{}/{}",
649 dir_name,
650 relative_path.display().to_string().replace('\\', "/")
651 )
652 } else {
653 format!("{}/{}", dir_name, relative_path.display())
654 }
655 } else {
656 let path_str = file_path.to_string_lossy().to_string();
658
659 let path_str = if cfg!(windows) {
661 path_str.replace('\\', "/")
662 } else {
663 path_str
664 };
665
666 if path_str.starts_with("./") {
668 path_str[2..].to_string()
669 } else {
670 path_str
671 }
672 };
673
674 debug!(
675 "添加文件到归档: {} -> {}",
676 file_path.display(),
677 archive_path
678 );
679
680 archive
681 .append_path_with_name(file_path, archive_path)
682 .map_err(|e| DuckError::Backup(format!("Failed to add file to archive: {e}")))?;
683
684 Ok(())
685}