1use anyhow::{Context, Result};
2use rayon::prelude::*;
3use std::path::PathBuf;
4use std::sync::atomic::{AtomicUsize, Ordering};
5use std::sync::Arc;
6use walkdir::WalkDir;
7
8use super::copy_engine::CopyEngine;
9use super::filter::FileFilter;
10use super::incremental::{BackupType, IncrementalBackupEngine};
11use super::integrity::IntegrityChecker;
12use super::pipeline::{PipelineConfig, ProcessingPipeline};
13use super::{Config, Priority, Target, TargetType};
14use crate::compression::CompressionType;
15use crate::crypto::{EncryptionConfig, KeyManager};
16use crate::i18n::{get_message, MessageKey};
17use crate::security::{safe_join, AuditEvent, AuditLog};
18use crate::ui::progress::BackupProgress;
19
20#[derive(Debug)]
51pub struct BackupResult {
52 pub total_files: usize,
53 pub successful: usize,
54 pub failed: usize,
55 pub total_bytes: u64,
56 pub errors: Vec<String>,
57 pub backup_name: String,
58}
59
60impl BackupResult {
61 fn new() -> Self {
62 Self {
63 total_files: 0,
64 successful: 0,
65 failed: 0,
66 total_bytes: 0,
67 errors: Vec::new(),
68 backup_name: String::new(),
69 }
70 }
71}
72
73#[allow(clippy::struct_excessive_bools)]
101pub struct BackupRunner {
102 config: Config,
103 dry_run: bool,
104 show_progress: bool,
105 enable_encryption: bool,
106 password: Option<String>,
107 compression_type: CompressionType,
108 compression_level: i32,
109 verify_integrity: bool,
110 audit_log: Option<AuditLog>,
111 incremental: bool,
112 lang: crate::i18n::Language,
113}
114
115impl BackupRunner {
116 #[must_use]
136 pub fn new(config: Config, dry_run: bool) -> Self {
137 let audit_log = AuditLog::new()
139 .map_err(|e| eprintln!("警告: 監査ログの初期化に失敗しました: {e}"))
140 .ok();
141
142 Self {
143 config,
144 dry_run,
145 show_progress: true, enable_encryption: false,
147 password: None,
148 compression_type: CompressionType::Zstd,
149 compression_level: 3,
150 verify_integrity: true, audit_log,
152 incremental: false,
153 lang: crate::i18n::Language::detect(),
154 }
155 }
156
157 #[must_use]
177 pub fn with_progress(mut self, show_progress: bool) -> Self {
178 self.show_progress = show_progress;
179 self
180 }
181
182 #[must_use]
184 pub fn with_encryption(mut self, password: String) -> Self {
185 self.enable_encryption = true;
186 self.password = Some(password);
187 self
188 }
189
190 #[must_use]
192 pub fn with_compression(mut self, compression_type: CompressionType, level: i32) -> Self {
193 self.compression_type = compression_type;
194 self.compression_level = level;
195 self
196 }
197
198 #[must_use]
200 pub fn with_verification(mut self, verify: bool) -> Self {
201 self.verify_integrity = verify;
202 self
203 }
204
205 #[must_use]
207 pub fn with_incremental(mut self, incremental: bool) -> Self {
208 self.incremental = incremental;
209 self
210 }
211
212 #[must_use]
214 pub fn with_language(mut self, lang: crate::i18n::Language) -> Self {
215 self.lang = lang;
216 self
217 }
218
219 pub fn run(
259 &mut self,
260 priority_filter: Option<&Priority>,
261 category_filter: Option<&str>,
262 ) -> Result<BackupResult> {
263 let user = AuditLog::current_user();
264 let target_desc = format!("priority={priority_filter:?}, category={category_filter:?}");
265
266 if let Some(ref mut audit_log) = self.audit_log {
268 let _ = audit_log
269 .log(AuditEvent::backup_started(&target_desc, &user))
270 .map_err(|e| eprintln!("警告: 監査ログの記録に失敗しました: {e}"));
271 }
272
273 let mut targets: Vec<&Target> = if let Some(priority) = priority_filter {
275 self.config.filter_by_priority(priority)
276 } else {
277 self.config.targets.iter().collect()
278 };
279
280 if let Some(category) = category_filter {
282 targets.retain(|t| t.category == category);
283 }
284
285 if targets.is_empty() {
286 return Ok(BackupResult::new());
287 }
288
289 let dest_base = &self.config.backup.destination;
291 let now = chrono::Local::now();
292 let timestamp = now.format("%Y%m%d_%H%M%S");
293 let backup_name = format!("backup_{timestamp}");
294 let backup_base = dest_base.join(&backup_name);
295
296 let (_key_manager, master_key, encryption_salt) =
298 if self.enable_encryption && self.password.is_some() {
299 let km = KeyManager::default();
300 let password = self.password.as_ref().ok_or_else(|| {
301 anyhow::anyhow!("暗号化が有効ですがパスワードが設定されていません")
302 })?;
303 let (mk, salt) = km
304 .create_master_key(password)
305 .context("マスターキー生成失敗")?;
306 (Some(km), Some(Arc::new(mk)), Some(salt))
307 } else {
308 (None, None, None)
309 };
310
311 let mut all_files: Vec<(PathBuf, PathBuf)> = Vec::new();
313
314 let collection_spinner = if self.show_progress {
316 let spinner = BackupProgress::new_spinner();
317 spinner.set_message("Collecting backup target files...");
318 Some(spinner)
319 } else {
320 None
321 };
322
323 for target in &targets {
324 let category = target.category.clone();
327 let backup_dir = backup_base.join(&category);
328
329 std::fs::create_dir_all(&backup_dir)
331 .context("バックアップディレクトリ作成失敗: backup_dir.display()".to_string())?;
332
333 let filter = if !target.exclude_patterns.is_empty() {
335 match FileFilter::new(&target.exclude_patterns) {
336 Ok(f) => Some(f),
337 Err(e) => {
338 eprintln!("警告: 除外パターンの処理に失敗: {e}");
339 None
340 }
341 }
342 } else {
343 None
344 };
345
346 match target.target_type {
347 TargetType::File => {
348 if target.path.exists() {
349 if let Some(ref f) = filter {
351 if f.should_exclude(&target.path) {
352 continue;
353 }
354 }
355
356 if let Ok(metadata) = target.path.metadata() {
358 let file_size = metadata.len();
359 const LARGE_FILE_THRESHOLD: u64 = 100 * 1024 * 1024 * 1024; if file_size > LARGE_FILE_THRESHOLD {
362 eprintln!(
363 "⚠️ 警告: 大容量ファイル検出 ({}GB): {:?}",
364 file_size / (1024 * 1024 * 1024),
365 target.path
366 );
367 eprintln!(" メモリ不足のリスクがあります。処理を続行しますが、システム監視を推奨します。");
368 }
369 }
370
371 if let Some(file_name) = target.path.file_name() {
373 match safe_join(&backup_dir, std::path::Path::new(file_name)) {
375 Ok(dest) => all_files.push((target.path.clone(), dest)),
376 Err(e) => eprintln!("警告: ファイルパス処理エラー: {e}"),
377 }
378 }
379 }
380 }
381 TargetType::Directory => {
382 let base_path = target.path.parent().unwrap_or(&target.path);
385
386 for entry in WalkDir::new(&target.path)
387 .into_iter()
388 .filter_map(std::result::Result::ok)
389 {
390 if entry.file_type().is_file() {
391 let source = entry.path().to_path_buf();
392
393 if let Ok(metadata) = entry.metadata() {
395 let file_size = metadata.len();
396 const LARGE_FILE_THRESHOLD: u64 = 100 * 1024 * 1024 * 1024; if file_size > LARGE_FILE_THRESHOLD {
399 eprintln!(
400 "⚠️ 警告: 大容量ファイル検出 ({}GB): {:?}",
401 file_size / (1024 * 1024 * 1024),
402 source
403 );
404 eprintln!(" メモリ不足のリスクがあります。処理を続行しますが、システム監視を推奨します。");
405 }
406 }
407
408 match source.strip_prefix(base_path) {
410 Ok(relative) => {
411 if let Some(ref f) = filter {
413 if f.should_exclude(relative) {
414 continue;
415 }
416 }
417
418 match safe_join(&backup_dir, relative) {
420 Ok(dest) => all_files.push((source, dest)),
421 Err(e) => {
422 eprintln!("警告: パストラバーサル検出、スキップ: {e}")
423 }
424 }
425 }
426 Err(e) => {
427 eprintln!("警告: パスのstrip_prefixに失敗: {e}");
428 }
429 }
430 }
431 }
432 }
433 }
434 }
435
436 if let Some(spinner) = collection_spinner {
438 spinner.finish(&format!(
439 "{} {}",
440 all_files.len(),
441 get_message(MessageKey::FilesDetected, self.lang)
442 ));
443 }
444
445 let inc_engine = IncrementalBackupEngine::new(dest_base.clone());
447 let backup_type = if self.incremental {
448 inc_engine.determine_backup_type()?
449 } else {
450 BackupType::Full
451 };
452
453 let (actual_backup_type, parent_backup_name, files_to_backup) =
455 if backup_type == BackupType::Incremental {
456 match inc_engine.load_previous_metadata() {
457 Ok(previous_metadata) => {
458 println!(
459 "{}",
460 get_message(MessageKey::IncrementalBackupMode, self.lang)
461 );
462
463 let files_with_relative: Vec<(PathBuf, PathBuf)> = all_files
465 .iter()
466 .filter_map(|(source, dest)| {
467 dest.strip_prefix(&backup_base)
468 .ok()
469 .map(|rel| (rel.to_path_buf(), source.clone()))
470 })
471 .collect();
472
473 let changed_files_relative = inc_engine
474 .detect_changed_files(&files_with_relative, &previous_metadata)?;
475
476 let changed_files: Vec<(PathBuf, PathBuf)> = changed_files_relative
478 .iter()
479 .filter_map(|(_relative_path, source_path)| {
480 all_files
481 .iter()
482 .find(|(src, _)| src == source_path)
483 .cloned()
484 })
485 .collect();
486
487 let parent_name = inc_engine.get_previous_backup_name()?;
488 println!(
489 " {}: {parent_name:?}",
490 get_message(MessageKey::PreviousBackupLabel, self.lang)
491 );
492 println!(
493 " {}: {}/{}",
494 get_message(MessageKey::ChangedFilesLabel, self.lang),
495 changed_files.len(),
496 all_files.len()
497 );
498
499 (BackupType::Incremental, parent_name, changed_files)
500 }
501 Err(e) => {
502 let error_msg = e.to_string();
504 if error_msg.contains("前回のバックアップが見つかりません")
505 || error_msg.contains("前回のバックアップメタデータ読み込み失敗")
506 {
507 println!("{}", get_message(MessageKey::NoBackupsFound, self.lang));
509 } else {
510 eprintln!("{}", get_message(MessageKey::FullBackupFallback, self.lang));
512 eprintln!(
513 "{}: {e}",
514 get_message(MessageKey::MetadataLoadFailed, self.lang)
515 );
516 }
517 println!("{}", get_message(MessageKey::FullBackupMode, self.lang));
518 (BackupType::Full, None, all_files.clone())
519 }
520 }
521 } else {
522 if self.incremental {
524 println!("{}", get_message(MessageKey::NoBackupsFound, self.lang));
525 }
526 println!("{}", get_message(MessageKey::FullBackupMode, self.lang));
527 (BackupType::Full, None, all_files.clone())
528 };
529
530 let total_files = files_to_backup.len();
531
532 if self.dry_run {
533 println!(
534 "{}",
535 get_message(MessageKey::DryRunMode, self.lang)
536 .replace("{}", &total_files.to_string())
537 );
538 for (source, dest) in &files_to_backup {
539 println!(" {} → {}", source.display(), dest.display());
540 }
541 return Ok(BackupResult {
542 total_files,
543 successful: 0,
544 failed: 0,
545 total_bytes: 0,
546 errors: Vec::new(),
547 backup_name,
548 });
549 }
550
551 let pipeline = if self.enable_encryption || self.compression_type != CompressionType::None {
553 let mut compression_config = match self.compression_type {
555 CompressionType::Zstd => crate::compression::CompressionConfig::zstd_default(),
556 CompressionType::Gzip => crate::compression::CompressionConfig::gzip_default(),
557 CompressionType::None => crate::compression::CompressionConfig::none(),
558 };
559 compression_config.level = self.compression_level;
560
561 let mut config = PipelineConfig::default()
562 .with_compression(self.compression_type, compression_config);
563
564 if self.enable_encryption {
565 config = config.with_encryption(EncryptionConfig::default());
566 }
567
568 Some(Arc::new(ProcessingPipeline::new(config)))
569 } else {
570 None
571 };
572
573 let progress = if self.show_progress {
575 Some(Arc::new(BackupProgress::with_language(
576 total_files as u64,
577 self.lang,
578 )))
579 } else {
580 None
581 };
582
583 let copy_engine = Arc::new(CopyEngine::new());
585
586 let integrity_checker = if self.verify_integrity {
588 Some(Arc::new(std::sync::Mutex::new(IntegrityChecker::new())))
589 } else {
590 None
591 };
592
593 let success_count = AtomicUsize::new(0);
595 let failed_count = AtomicUsize::new(0);
596 let total_bytes = AtomicUsize::new(0);
597
598 let errors: Vec<String> = files_to_backup
599 .par_iter()
600 .filter_map(|(source, dest)| {
601 if let Some(ref pb) = progress {
603 if let Some(file_name) = source.file_name() {
604 pb.set_message(&format!("処理中: {file_name:?}"));
605 }
606 }
607
608 let relative_path = dest.strip_prefix(&backup_base).ok();
610
611 if let Some(parent) = dest.parent() {
613 if let Err(e) = std::fs::create_dir_all(parent) {
614 failed_count.fetch_add(1, Ordering::Relaxed);
615 if let Some(ref pb) = progress {
616 pb.inc(1);
617 }
618 return Some(format!("ディレクトリ作成失敗 parent.display(): {e}"));
619 }
620 }
621
622 let copy_result = if let Some(ref pipeline) = pipeline {
624 match pipeline.process_file(
626 source,
627 master_key.as_ref().map(std::convert::AsRef::as_ref),
628 encryption_salt,
629 ) {
630 Ok(processed) => {
631 match std::fs::write(dest, &processed.data) {
633 Ok(_) => {
634 success_count.fetch_add(1, Ordering::Relaxed);
635 total_bytes.fetch_add(
636 processed.metadata.final_size as usize,
637 Ordering::Relaxed,
638 );
639 if let Some(ref pb) = progress {
640 pb.inc(1);
641 }
642 Ok(())
643 }
644 Err(e) => {
645 failed_count.fetch_add(1, Ordering::Relaxed);
646 if let Some(ref pb) = progress {
647 pb.inc(1);
648 }
649 Err(format!("書き込み失敗 dest.display(): {e}"))
650 }
651 }
652 }
653 Err(e) => {
654 failed_count.fetch_add(1, Ordering::Relaxed);
655 if let Some(ref pb) = progress {
656 pb.inc(1);
657 }
658 Err(format!("処理失敗 source.display(): {e}"))
659 }
660 }
661 } else {
662 match copy_engine.copy_file(source, dest) {
664 Ok(bytes) => {
665 success_count.fetch_add(1, Ordering::Relaxed);
666 total_bytes.fetch_add(bytes as usize, Ordering::Relaxed);
667 if let Some(ref pb) = progress {
668 pb.inc(1);
669 }
670 Ok(())
671 }
672 Err(e) => {
673 failed_count.fetch_add(1, Ordering::Relaxed);
674 if let Some(ref pb) = progress {
675 pb.inc(1);
676 }
677 Err(format!("コピー失敗 source.display(): {e}"))
678 }
679 }
680 };
681
682 if copy_result.is_ok() {
684 if let Some(ref checker) = integrity_checker {
685 if let Some(rel_path) = relative_path {
686 if let Ok(mut guard) = checker.lock() {
687 if let Ok(hash) = guard.compute_hash(source) {
688 guard.add_file_hash(rel_path.to_path_buf(), hash);
689 }
690 }
691 }
692 }
693 }
694
695 copy_result.err()
696 })
697 .collect();
698
699 if let Some(pb) = progress {
701 let failed = failed_count.load(Ordering::Relaxed);
702 if failed == 0 {
703 pb.finish(get_message(MessageKey::BackupComplete, self.lang));
704 } else {
705 pb.finish(&format!(
706 "{} ({} {})",
707 get_message(MessageKey::BackupCompleteWithFailures, self.lang)
708 .replace("(失敗あり)", "")
709 .replace("(有失败)", "")
710 .replace("(有失敗)", "")
711 .replace("(with failures)", ""),
712 failed,
713 get_message(MessageKey::FailedLabel, self.lang)
714 ));
715 }
716 }
717
718 if let Some(ref checker) = integrity_checker {
720 if let Ok(mut guard) = checker.lock() {
721 guard.metadata.backup_type = actual_backup_type;
723 guard.metadata.parent_backup = parent_backup_name;
724 guard.metadata.changed_files = files_to_backup
725 .iter()
726 .filter_map(|(_, dest)| {
727 dest.strip_prefix(&backup_base)
728 .ok()
729 .map(std::path::Path::to_path_buf)
730 })
731 .collect();
732
733 if actual_backup_type == BackupType::Incremental {
736 for (source, dest) in &all_files {
737 if let Ok(rel_path) = dest.strip_prefix(&backup_base) {
738 if !guard.metadata.file_hashes.contains_key(rel_path) {
740 if let Ok(hash) = guard.compute_hash(source) {
741 guard.add_file_hash(rel_path.to_path_buf(), hash);
742 }
743 }
744 }
745 }
746 }
747
748 if let Err(e) = guard.save_metadata(&backup_base) {
749 eprintln!("警告: 整合性メタデータの保存に失敗しました: {e}");
750 }
751 }
752 }
753
754 let result = BackupResult {
755 total_files,
756 successful: success_count.load(Ordering::Relaxed),
757 failed: failed_count.load(Ordering::Relaxed),
758 total_bytes: total_bytes.load(Ordering::Relaxed) as u64,
759 errors,
760 backup_name,
761 };
762
763 let success = result.failed == 0;
765 if let Err(e) = super::BackupHistory::save(&super::BackupHistory::new(
766 backup_base.clone(),
767 result.total_files,
768 result.total_bytes,
769 success,
770 self.compression_type != CompressionType::None,
771 self.enable_encryption,
772 )) {
773 eprintln!("履歴保存失敗: {e}");
774 }
775
776 if let Some(ref mut audit_log) = self.audit_log {
778 let metadata = serde_json::json!({
779 "total_files": result.total_files,
780 "successful": result.successful,
781 "failed": result.failed,
782 "total_bytes": result.total_bytes,
783 "backup_name": result.backup_name,
784 });
785
786 let event = if success {
787 AuditEvent::backup_completed(&target_desc, &user, metadata)
788 } else {
789 AuditEvent::backup_failed(
790 &target_desc,
791 &user,
792 format!("{}件のファイルでエラーが発生しました", result.failed),
793 )
794 };
795
796 let _ = audit_log
797 .log(event)
798 .map_err(|e| eprintln!("警告: 監査ログの記録に失敗しました: {e}"));
799 }
800
801 Ok(result)
802 }
803}
804
805#[cfg(test)]
806mod tests {
807 use super::*;
808 use std::fs::File;
809 use std::io::Write;
810 use tempfile::TempDir;
811
812 #[test]
813 fn test_backup_single_file() {
814 let temp = TempDir::new().unwrap();
815 let source = temp.path().join("test.txt");
816 let mut file = File::create(&source).unwrap();
817 file.write_all(b"test content").unwrap();
818
819 let mut config = Config::default();
820 let target = Target::new(source.clone(), Priority::High, "test".to_string());
821 config.add_target(target);
822 config.backup.destination = temp.path().join("backups");
823
824 let mut runner = BackupRunner::new(config, false);
825 let result = runner.run(None, None).unwrap();
826
827 assert_eq!(result.total_files, 1);
828 assert_eq!(result.successful, 1);
829 assert_eq!(result.failed, 0);
830 assert!(result.total_bytes > 0);
831 }
832
833 #[test]
834 fn test_backup_dry_run() {
835 let temp = TempDir::new().unwrap();
836 let source = temp.path().join("test.txt");
837 File::create(&source).unwrap();
838
839 let mut config = Config::default();
840 let target = Target::new(source.clone(), Priority::High, "test".to_string());
841 config.add_target(target);
842 config.backup.destination = temp.path().join("backups");
843
844 let mut runner = BackupRunner::new(config, true);
845 let result = runner.run(None, None).unwrap();
846
847 assert_eq!(result.total_files, 1);
848 assert_eq!(result.successful, 0); assert_eq!(result.total_bytes, 0);
850 }
851}