1use crate::Result;
24use crate::cli::output::{OutputMode, active_mode, emit_success};
25use crate::cli::{ApplyArgs, CacheArgs, ClearArgs, ClearType, RollbackArgs, StatusArgs};
26use crate::config::ConfigService;
27use crate::core::lock::acquire_subx_lock;
28use crate::core::matcher::cache::CacheData;
29use crate::core::matcher::engine::{FileRelocationMode, MatchConfig, apply_cached_operations};
30use crate::core::matcher::journal::{
31 JournalData, JournalEntry, JournalEntryStatus, JournalOperationType,
32};
33use crate::error::SubXError;
34use serde::Serialize;
35use std::io::IsTerminal;
36use std::path::{Path, PathBuf};
37use std::time::{SystemTime, UNIX_EPOCH};
38
39#[derive(Debug, Serialize)]
47pub struct CacheItemError {
48 pub category: String,
51 pub code: String,
54 pub message: String,
56}
57
58impl CacheItemError {
59 fn from_error(err: &SubXError) -> Self {
60 Self {
61 category: err.category().to_string(),
62 code: err.machine_code().to_string(),
63 message: err.user_friendly_message(),
64 }
65 }
66}
67
68#[derive(Debug, Serialize)]
70pub struct StaleFileInfo {
71 pub path: String,
73 pub reason: String,
75}
76
77#[derive(Debug, Serialize)]
83pub struct CacheStatusPayload {
84 pub path: String,
86 pub exists: bool,
88 pub journal_present: bool,
90 pub total: u64,
92 pub pending: u64,
94 pub applied: u64,
96 #[serde(skip_serializing_if = "Option::is_none")]
98 pub size_bytes: Option<u64>,
99 #[serde(skip_serializing_if = "Option::is_none")]
101 pub created_at: Option<u64>,
102 #[serde(skip_serializing_if = "Option::is_none")]
104 pub age_seconds: Option<u64>,
105 #[serde(skip_serializing_if = "Option::is_none")]
107 pub cache_version: Option<String>,
108 #[serde(skip_serializing_if = "Option::is_none")]
110 pub ai_model: Option<String>,
111 #[serde(skip_serializing_if = "Option::is_none")]
113 pub operation_count: Option<usize>,
114 #[serde(skip_serializing_if = "Option::is_none")]
116 pub config_hash: Option<String>,
117 #[serde(skip_serializing_if = "Option::is_none")]
119 pub current_config_hash: Option<String>,
120 #[serde(skip_serializing_if = "Option::is_none")]
122 pub config_hash_match: Option<bool>,
123 #[serde(skip_serializing_if = "Option::is_none")]
125 pub snapshot_status: Option<&'static str>,
126 #[serde(skip_serializing_if = "Option::is_none")]
128 pub stale_files: Option<Vec<StaleFileInfo>>,
129}
130
131#[derive(Debug, Serialize)]
137pub struct CacheClearPayload {
138 pub removed: u64,
140 pub kind: &'static str,
142 pub cache_path: String,
144 pub cache_removed: bool,
146 pub journal_path: String,
148 pub journal_removed: bool,
150}
151
152#[derive(Debug, Serialize)]
154pub struct CacheRollbackPayload {
155 pub rolled_back: u64,
157}
158
159#[derive(Debug, Serialize)]
161pub struct CacheApplyItem {
162 pub id: String,
165 pub status: &'static str,
167 #[serde(skip_serializing_if = "Option::is_none")]
169 pub error: Option<CacheItemError>,
170}
171
172#[derive(Debug, Serialize)]
178pub struct CacheApplyPayload {
179 pub applied: u64,
181 pub failed: u64,
183 pub items: Vec<CacheApplyItem>,
185}
186
187async fn journal_counters(path: &Path) -> (u64, u64) {
190 if !path.exists() {
191 return (0, 0);
192 }
193 match JournalData::load(path).await {
194 Ok(j) => {
195 let mut pending = 0u64;
196 let mut applied = 0u64;
197 for entry in &j.entries {
198 match entry.status {
199 JournalEntryStatus::Pending => pending += 1,
200 JournalEntryStatus::Completed => applied += 1,
201 }
202 }
203 (pending, applied)
204 }
205 Err(_) => (0, 0),
206 }
207}
208
209fn get_config_dir() -> Result<PathBuf> {
215 if let Some(xdg_config) = std::env::var_os("XDG_CONFIG_HOME") {
216 Ok(PathBuf::from(xdg_config))
217 } else {
218 dirs::config_dir().ok_or_else(|| SubXError::config("Unable to determine config directory"))
219 }
220}
221
222fn cache_path() -> Result<PathBuf> {
224 Ok(get_config_dir()?.join("subx").join("match_cache.json"))
225}
226
227fn journal_path() -> Result<PathBuf> {
229 Ok(get_config_dir()?.join("subx").join("match_journal.json"))
230}
231
232fn clear_file(path: &Path, label: &str) -> Result<bool> {
238 let json_mode = active_mode().is_json();
239 if path.exists() {
240 std::fs::remove_file(path)?;
241 if !json_mode {
242 println!("{} cleared: {}", label, path.display());
243 }
244 Ok(true)
245 } else {
246 if !json_mode {
247 println!("{} not found: {}", label, path.display());
248 }
249 Ok(false)
250 }
251}
252
253async fn execute_clear(args: &ClearArgs) -> Result<()> {
259 let _lock = acquire_subx_lock().await?;
260 let config_dir = get_config_dir()?;
261 let cache_file = config_dir.join("subx").join("match_cache.json");
262 let journal_file = config_dir.join("subx").join("match_journal.json");
263
264 let json_mode = active_mode().is_json();
265 let mut cache_removed = false;
266 let mut journal_removed = false;
267
268 match args.r#type {
269 ClearType::Cache => {
270 cache_removed = clear_file(&cache_file, "Cache")?;
271 }
272 ClearType::Journal => {
273 journal_removed = clear_file(&journal_file, "Journal")?;
274 }
275 ClearType::All => {
276 cache_removed = clear_file(&cache_file, "Cache")?;
277 journal_removed = clear_file(&journal_file, "Journal")?;
278 }
279 }
280
281 let removed = u64::from(cache_removed) + u64::from(journal_removed);
282
283 if json_mode {
284 let kind = match args.r#type {
285 ClearType::Cache => "cache",
286 ClearType::Journal => "journal",
287 ClearType::All => "all",
288 };
289 let payload = CacheClearPayload {
290 removed,
291 kind,
292 cache_path: cache_file.to_string_lossy().into_owned(),
293 cache_removed,
294 journal_path: journal_file.to_string_lossy().into_owned(),
295 journal_removed,
296 };
297 emit_success(OutputMode::Json, "cache", payload);
298 } else if removed == 0 {
299 println!("No cache files found to clear.");
300 }
301 Ok(())
302}
303
304fn compute_config_hash(relocation_mode_debug: &str, backup_enabled: bool) -> String {
311 use std::collections::hash_map::DefaultHasher;
312 use std::hash::{Hash, Hasher};
313 let mut hasher = DefaultHasher::new();
314 relocation_mode_debug.hash(&mut hasher);
315 backup_enabled.hash(&mut hasher);
316 format!("{:016x}", hasher.finish())
317}
318
319fn current_config_hash(config_service: &dyn ConfigService) -> Result<String> {
323 let config = config_service.get_config()?;
324 Ok(compute_config_hash("None", config.general.backup_enabled))
325}
326
327fn format_size(bytes: u64) -> String {
329 const KB: f64 = 1024.0;
330 const MB: f64 = KB * 1024.0;
331 const GB: f64 = MB * 1024.0;
332 let b = bytes as f64;
333 if b >= GB {
334 format!("{:.1} GB", b / GB)
335 } else if b >= MB {
336 format!("{:.1} MB", b / MB)
337 } else if b >= KB {
338 format!("{:.1} KB", b / KB)
339 } else {
340 format!("{} B", bytes)
341 }
342}
343
344fn format_age(age_secs: u64) -> String {
346 const MIN: u64 = 60;
347 const HOUR: u64 = 60 * MIN;
348 const DAY: u64 = 24 * HOUR;
349 if age_secs < MIN {
350 format!("{} seconds ago", age_secs)
351 } else if age_secs < HOUR {
352 format!("{} minutes ago", age_secs / MIN)
353 } else if age_secs < DAY {
354 format!("{} hours ago", age_secs / HOUR)
355 } else {
356 format!("{} days ago", age_secs / DAY)
357 }
358}
359
360fn describe_snapshot(cache: &CacheData) -> (String, &'static str) {
366 if cache.has_empty_snapshot() {
367 ("Empty (legacy cache)".to_string(), "empty")
368 } else {
369 let stale = cache.validate_snapshot();
370 if stale.is_empty() {
371 ("Valid".to_string(), "valid")
372 } else {
373 (format!("Stale ({} files changed)", stale.len()), "stale")
374 }
375 }
376}
377
378pub async fn execute_status(args: &StatusArgs, config_service: &dyn ConfigService) -> Result<()> {
403 let cache_file = cache_path()?;
404 let journal_file = journal_path()?;
405 let json_mode = active_mode().is_json() || args.json;
409
410 if !cache_file.exists() {
411 let journal_present = journal_file.exists();
412 let (pending, applied) = journal_counters(&journal_file).await;
413 if json_mode {
414 let payload = CacheStatusPayload {
415 path: cache_file.to_string_lossy().into_owned(),
416 exists: false,
417 journal_present,
418 total: 0,
419 pending,
420 applied,
421 size_bytes: None,
422 created_at: None,
423 age_seconds: None,
424 cache_version: None,
425 ai_model: None,
426 operation_count: None,
427 config_hash: None,
428 current_config_hash: None,
429 config_hash_match: None,
430 snapshot_status: None,
431 stale_files: None,
432 };
433 emit_success(OutputMode::Json, "cache", payload);
434 } else {
435 println!("No cache found at {}", cache_file.display());
436 }
437 return Ok(());
438 }
439
440 let cache = CacheData::load(&cache_file).map_err(|e| {
441 SubXError::config(format!(
442 "Failed to load cache at {}: {}",
443 cache_file.display(),
444 e
445 ))
446 })?;
447
448 let metadata = std::fs::metadata(&cache_file)?;
449 let size_bytes = metadata.len();
450
451 let now_secs = SystemTime::now()
452 .duration_since(UNIX_EPOCH)
453 .map(|d| d.as_secs())
454 .unwrap_or(0);
455 let age_secs = now_secs.saturating_sub(cache.created_at);
456
457 let current_hash = current_config_hash(config_service)?;
458 let hash_match = current_hash == cache.config_hash;
459
460 let (snapshot_label, snapshot_status) = describe_snapshot(&cache);
461 let stale_entries = if snapshot_status == "stale" {
462 cache.validate_snapshot()
463 } else {
464 Vec::new()
465 };
466 let journal_present = journal_file.exists();
467 let (pending, applied) = journal_counters(&journal_file).await;
468 let total = cache.match_operations.len() as u64;
469
470 if json_mode {
471 let stale_files: Vec<StaleFileInfo> = stale_entries
472 .iter()
473 .map(|s| StaleFileInfo {
474 path: s.path.clone(),
475 reason: s.reason.clone(),
476 })
477 .collect();
478 let payload = CacheStatusPayload {
479 path: cache_file.to_string_lossy().into_owned(),
480 exists: true,
481 journal_present,
482 total,
483 pending,
484 applied,
485 size_bytes: Some(size_bytes),
486 created_at: Some(cache.created_at),
487 age_seconds: Some(age_secs),
488 cache_version: Some(cache.cache_version.clone()),
489 ai_model: Some(cache.ai_model_used.clone()),
490 operation_count: Some(cache.match_operations.len()),
491 config_hash: Some(cache.config_hash.clone()),
492 current_config_hash: Some(current_hash),
493 config_hash_match: Some(hash_match),
494 snapshot_status: Some(snapshot_status),
495 stale_files: Some(stale_files),
496 };
497 emit_success(OutputMode::Json, "cache", payload);
498 } else {
499 let config_line = if hash_match {
500 "✓ (matches current)".to_string()
501 } else {
502 format!("✗ (differs from current: {})", current_hash)
503 };
504 let journal_line = if journal_present {
505 "Present"
506 } else {
507 "Not found"
508 };
509
510 println!("Cache Status");
511 println!("============");
512 println!("Path: {}", cache_file.display());
513 println!("Size: {}", format_size(size_bytes));
514 println!("Age: {}", format_age(age_secs));
515 println!("Cache version: {}", cache.cache_version);
516 println!("AI model: {}", cache.ai_model_used);
517 println!("Operations: {}", cache.match_operations.len());
518 println!("Config hash: {}", cache.config_hash);
519 println!("Config match: {}", config_line);
520 println!("Snapshot: {}", snapshot_label);
521 println!("Journal: {}", journal_line);
522 }
523
524 Ok(())
525}
526
527pub async fn execute_apply(args: &ApplyArgs, config_service: &dyn ConfigService) -> Result<()> {
555 let _lock = acquire_subx_lock().await?;
556 let json_mode = active_mode().is_json();
557
558 let cache_file = cache_path()?;
559 if !cache_file.exists() {
560 if json_mode {
561 emit_success(
563 OutputMode::Json,
564 "cache",
565 CacheApplyPayload {
566 applied: 0,
567 failed: 0,
568 items: Vec::new(),
569 },
570 );
571 } else {
572 println!(
573 "No cache found at {}. Run a dry-run match first.",
574 cache_file.display()
575 );
576 }
577 return Ok(());
578 }
579
580 let mut cache = CacheData::load(&cache_file).map_err(|e| {
581 SubXError::config(format!(
582 "Failed to load cache at {}: {}",
583 cache_file.display(),
584 e
585 ))
586 })?;
587
588 let config = config_service.get_config()?;
590 let apply_hash = compute_config_hash(
591 &cache.original_relocation_mode,
592 config.general.backup_enabled,
593 );
594 if apply_hash != cache.config_hash && !args.force {
595 return Err(SubXError::config(format!(
596 "Configuration has changed since the cache was created.\n\
597 Cache hash: {}\n\
598 Current hash: {}\n\
599 Use --force to bypass this check.",
600 cache.config_hash, apply_hash
601 )));
602 }
603
604 if cache.has_empty_snapshot() && !args.force {
606 return Err(SubXError::config(
607 "Cache was created without file snapshot data (legacy format).\n\
608 Cannot verify file integrity. Use --force to apply anyway."
609 .to_string(),
610 ));
611 }
612
613 if !args.force && !cache.has_empty_snapshot() {
615 let stale = cache.validate_snapshot();
616 if !stale.is_empty() {
617 let mut msg = format!(
618 "{} source file(s) have changed since the cache was created:\n",
619 stale.len()
620 );
621 for s in &stale {
622 msg.push_str(&format!(" - {} ({})\n", s.path, s.reason));
623 }
624 msg.push_str("Use --force to apply anyway.");
625 return Err(SubXError::config(msg));
626 }
627 }
628
629 if !args.force {
631 let conflicts = cache.validate_target_paths();
632 if !conflicts.is_empty() {
633 let mut msg = format!("{} target path(s) already exist:\n", conflicts.len());
634 for p in &conflicts {
635 msg.push_str(&format!(" - {}\n", p.display()));
636 }
637 msg.push_str("Use --force to apply anyway.");
638 return Err(SubXError::config(msg));
639 }
640 }
641
642 if let Some(min_conf) = args.confidence {
644 let threshold = f32::from(min_conf) / 100.0;
645 let before = cache.match_operations.len();
646 cache
647 .match_operations
648 .retain(|op| op.confidence >= threshold);
649 let after = cache.match_operations.len();
650 if before != after && !json_mode {
651 println!(
652 "Filtered {} operation(s) below {}% confidence.",
653 before - after,
654 min_conf
655 );
656 }
657 }
658
659 if cache.match_operations.is_empty() {
660 if json_mode {
661 emit_success(
662 OutputMode::Json,
663 "cache",
664 CacheApplyPayload {
665 applied: 0,
666 failed: 0,
667 items: Vec::new(),
668 },
669 );
670 } else {
671 println!("No operations to apply.");
672 }
673 return Ok(());
674 }
675
676 if !json_mode {
677 println!("Cache Apply Summary");
679 println!("===================");
680 println!("Operations: {}", cache.match_operations.len());
681 println!("AI model: {}", cache.ai_model_used);
682 println!("Relocation mode: {}", cache.original_relocation_mode);
683 println!();
684 for (i, op) in cache.match_operations.iter().enumerate() {
685 println!(
686 " {}. {} → {} (confidence: {:.0}%)",
687 i + 1,
688 op.subtitle_file,
689 op.new_subtitle_name,
690 op.confidence * 100.0
691 );
692 }
693 println!();
694 }
695
696 if !args.yes {
700 if json_mode {
701 return Err(SubXError::CommandExecution(
702 "cache apply in JSON output mode requires --yes (interactive confirmation \
703 would write to stdout and corrupt the JSON envelope)."
704 .to_string(),
705 ));
706 }
707 if !std::io::stdin().is_terminal() {
708 return Err(SubXError::config(
709 "Non-interactive terminal detected. Use --yes to skip confirmation.".to_string(),
710 ));
711 }
712 print!("Proceed with apply? [y/N] ");
713 use std::io::Write;
714 std::io::stdout().flush()?;
715 let mut input = String::new();
716 std::io::stdin().read_line(&mut input)?;
717 if !input.trim().eq_ignore_ascii_case("y") {
718 println!("Apply cancelled.");
719 return Ok(());
720 }
721 }
722
723 let config = config_service.get_config()?;
725 let relocation_mode = parse_relocation_mode(&cache.original_relocation_mode);
726 let match_config = MatchConfig {
727 confidence_threshold: 0.0,
728 max_sample_length: 2000,
729 enable_content_analysis: true,
730 backup_enabled: cache.original_backup_enabled,
731 relocation_mode,
732 conflict_resolution: crate::core::matcher::engine::ConflictResolution::Skip,
733 ai_model: cache.ai_model_used.clone(),
734 max_subtitle_bytes: config.general.max_subtitle_bytes,
735 };
736
737 if json_mode {
738 let mut items: Vec<CacheApplyItem> = Vec::with_capacity(cache.match_operations.len());
742 let mut applied = 0u64;
743 let mut failed = 0u64;
744
745 for op in &cache.match_operations {
746 let id = op.subtitle_file.clone();
747 let video_exists = std::path::Path::new(&op.video_file).exists();
748 let sub_exists = std::path::Path::new(&op.subtitle_file).exists();
749 if !video_exists || !sub_exists {
750 let missing = if !sub_exists {
751 op.subtitle_file.clone()
752 } else {
753 op.video_file.clone()
754 };
755 let err = SubXError::FileNotFound(missing);
756 items.push(CacheApplyItem {
757 id,
758 status: "error",
759 error: Some(CacheItemError::from_error(&err)),
760 });
761 failed += 1;
762 continue;
763 }
764
765 let mut single = cache.clone();
766 single.match_operations = vec![op.clone()];
767 match apply_cached_operations(&single, &match_config).await {
768 Ok(()) => {
769 applied += 1;
770 items.push(CacheApplyItem {
771 id,
772 status: "ok",
773 error: None,
774 });
775 }
776 Err(e) => {
777 failed += 1;
778 items.push(CacheApplyItem {
779 id,
780 status: "error",
781 error: Some(CacheItemError::from_error(&e)),
782 });
783 }
784 }
785 }
786
787 emit_success(
788 OutputMode::Json,
789 "cache",
790 CacheApplyPayload {
791 applied,
792 failed,
793 items,
794 },
795 );
796 } else {
797 apply_cached_operations(&cache, &match_config).await?;
798 println!("Apply complete.");
799 }
800 Ok(())
801}
802
803fn parse_relocation_mode(s: &str) -> FileRelocationMode {
805 match s {
806 "Copy" => FileRelocationMode::Copy,
807 "Move" => FileRelocationMode::Move,
808 _ => FileRelocationMode::None,
809 }
810}
811
812fn verify_destination_integrity(entry: &JournalEntry) -> Result<()> {
820 let metadata = match std::fs::metadata(&entry.destination) {
821 Ok(m) => m,
822 Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
823 return Err(SubXError::config(format!(
824 "Destination file {} no longer exists. Use --force to override.",
825 entry.destination.display()
826 )));
827 }
828 Err(e) => return Err(SubXError::Io(e)),
829 };
830
831 if metadata.len() != entry.file_size {
832 return Err(SubXError::config(format!(
833 "Destination file {} has been modified since the operation (size differs). \
834 Use --force to override.",
835 entry.destination.display()
836 )));
837 }
838
839 let mtime_secs = metadata
840 .modified()
841 .ok()
842 .and_then(|m| m.duration_since(UNIX_EPOCH).ok())
843 .map(|d| d.as_secs());
844
845 if let Some(actual) = mtime_secs {
846 if actual != entry.file_mtime {
847 return Err(SubXError::config(format!(
848 "Destination file {} has been modified since the operation (mtime differs). \
849 Use --force to override.",
850 entry.destination.display()
851 )));
852 }
853 }
854
855 Ok(())
856}
857
858fn rollback_entry(entry: &JournalEntry, force: bool) -> Result<()> {
872 let json_mode = active_mode().is_json();
873 match entry.operation_type {
874 JournalOperationType::Copied => {
875 std::fs::remove_file(&entry.destination)?;
876 if !json_mode {
877 println!("Removed copy: {}", entry.destination.display());
878 }
879 }
880 JournalOperationType::Moved | JournalOperationType::Renamed => {
881 if entry.source.exists() && !force {
882 return Err(SubXError::config(format!(
883 "Original source path {} already exists. \
884 Rollback would overwrite it. Use --force to override.",
885 entry.source.display()
886 )));
887 }
888 if let Some(parent) = entry.source.parent() {
889 if !parent.as_os_str().is_empty() {
890 std::fs::create_dir_all(parent)?;
891 }
892 }
893 std::fs::rename(&entry.destination, &entry.source)?;
894 if !json_mode {
895 println!(
896 "Rolled back: {} \u{2190} {}",
897 entry.source.display(),
898 entry.destination.display()
899 );
900 }
901 }
902 }
903
904 if let Some(backup) = &entry.backup_path {
905 if backup.exists() {
906 std::fs::remove_file(backup)?;
907 if !json_mode {
908 println!("Removed backup: {}", backup.display());
909 }
910 }
911 }
912
913 Ok(())
914}
915
916pub async fn execute_rollback(args: &RollbackArgs) -> Result<()> {
928 let _lock = acquire_subx_lock().await?;
929 let json_mode = active_mode().is_json();
930
931 let journal_file = journal_path()?;
932 if !journal_file.exists() {
933 if json_mode {
934 emit_success(
935 OutputMode::Json,
936 "cache",
937 CacheRollbackPayload { rolled_back: 0 },
938 );
939 } else {
940 println!("No operation journal found. Nothing to rollback.");
941 }
942 return Ok(());
943 }
944
945 let journal = JournalData::load(&journal_file).await?;
946
947 let reversed: Vec<&JournalEntry> = journal
948 .entries
949 .iter()
950 .filter(|e| e.status == JournalEntryStatus::Completed)
951 .rev()
952 .collect();
953
954 if reversed.is_empty() {
955 if json_mode {
956 emit_success(
957 OutputMode::Json,
958 "cache",
959 CacheRollbackPayload { rolled_back: 0 },
960 );
961 } else {
962 println!("Journal has no completed operations to rollback.");
963 }
964 return Ok(());
965 }
966
967 if !json_mode {
968 println!(
969 "Rolling back {} operations from batch {}...",
970 reversed.len(),
971 journal.batch_id
972 );
973 }
974
975 let mut rolled_back: u64 = 0;
976 for entry in &reversed {
977 if !args.force {
978 verify_destination_integrity(entry)?;
979 }
980 rollback_entry(entry, args.force)?;
981 rolled_back += 1;
982 }
983
984 std::fs::remove_file(&journal_file)?;
985
986 if json_mode {
987 emit_success(
988 OutputMode::Json,
989 "cache",
990 CacheRollbackPayload { rolled_back },
991 );
992 } else {
993 println!("Rollback complete. Journal deleted.");
994 }
995 Ok(())
996}
997
998pub async fn execute(args: CacheArgs) -> Result<()> {
1003 match args.action {
1004 crate::cli::CacheAction::Clear(clear_args) => {
1005 execute_clear(&clear_args).await?;
1006 }
1007 crate::cli::CacheAction::Status(status_args) => {
1008 let config_service = crate::config::ProductionConfigService::new()?;
1012 execute_status(&status_args, &config_service).await?;
1013 }
1014 crate::cli::CacheAction::Apply(ref apply_args) => {
1015 let config_service = crate::config::ProductionConfigService::new()?;
1016 execute_apply(apply_args, &config_service).await?;
1017 }
1018 crate::cli::CacheAction::Rollback(rollback_args) => {
1019 execute_rollback(&rollback_args).await?;
1020 }
1021 }
1022 Ok(())
1023}
1024
1025pub async fn execute_with_config(
1039 args: CacheArgs,
1040 config_service: std::sync::Arc<dyn ConfigService>,
1041) -> Result<()> {
1042 match args.action {
1043 crate::cli::CacheAction::Status(status_args) => {
1044 execute_status(&status_args, config_service.as_ref()).await
1045 }
1046 crate::cli::CacheAction::Apply(apply_args) => {
1047 execute_apply(&apply_args, config_service.as_ref()).await
1048 }
1049 other => execute(CacheArgs { action: other }).await,
1050 }
1051}
1052
1053#[cfg(test)]
1054mod tests {
1055 use super::*;
1056 use crate::config::TestConfigService;
1057 use crate::core::matcher::cache::{CacheData, SnapshotItem};
1058 use crate::core::matcher::journal::{JournalEntry, JournalEntryStatus, JournalOperationType};
1059 use std::path::PathBuf;
1060 use tempfile::TempDir;
1061
1062 fn isolated_config_dir() -> (TempDir, PathBuf) {
1069 let tmp = TempDir::new().expect("tempdir");
1070 unsafe {
1071 std::env::set_var("XDG_CONFIG_HOME", tmp.path());
1072 }
1073 let subx_dir = tmp.path().join("subx");
1074 std::fs::create_dir_all(&subx_dir).expect("create subx dir");
1075 (tmp, subx_dir)
1076 }
1077
1078 fn make_journal_entry(
1081 op_type: JournalOperationType,
1082 source: PathBuf,
1083 destination: PathBuf,
1084 ) -> JournalEntry {
1085 let meta = std::fs::metadata(&destination).expect("destination must exist");
1086 let mtime = meta
1087 .modified()
1088 .unwrap()
1089 .duration_since(std::time::UNIX_EPOCH)
1090 .unwrap()
1091 .as_secs();
1092 JournalEntry {
1093 operation_type: op_type,
1094 source,
1095 destination,
1096 backup_path: None,
1097 status: JournalEntryStatus::Completed,
1098 file_size: meta.len(),
1099 file_mtime: mtime,
1100 }
1101 }
1102
1103 fn empty_snapshot_cache() -> CacheData {
1105 CacheData {
1106 cache_version: "1.0".into(),
1107 directory: "/tmp".into(),
1108 file_snapshot: vec![],
1109 match_operations: vec![],
1110 created_at: 0,
1111 ai_model_used: "test-model".into(),
1112 config_hash: "abc123".into(),
1113 original_relocation_mode: "None".into(),
1114 original_backup_enabled: false,
1115 }
1116 }
1117
1118 #[test]
1123 fn format_size_bytes() {
1124 assert_eq!(format_size(0), "0 B");
1125 assert_eq!(format_size(512), "512 B");
1126 assert_eq!(format_size(1023), "1023 B");
1127 }
1128
1129 #[test]
1130 fn format_size_kilobytes() {
1131 assert_eq!(format_size(1024), "1.0 KB");
1132 assert_eq!(format_size(2048), "2.0 KB");
1133 let just_below_mb = (1024.0 * 1024.0 - 1.0) as u64;
1135 let result = format_size(just_below_mb);
1136 assert!(result.ends_with("KB"), "expected KB, got {result}");
1137 }
1138
1139 #[test]
1140 fn format_size_megabytes() {
1141 assert_eq!(format_size(1024 * 1024), "1.0 MB");
1142 assert_eq!(format_size(5 * 1024 * 1024), "5.0 MB");
1143 let just_below_gb = (1024.0 * 1024.0 * 1024.0 - 1.0) as u64;
1145 let result = format_size(just_below_gb);
1146 assert!(result.ends_with("MB"), "expected MB, got {result}");
1147 }
1148
1149 #[test]
1150 fn format_size_gigabytes() {
1151 assert_eq!(format_size(1024 * 1024 * 1024), "1.0 GB");
1152 assert_eq!(format_size(2 * 1024 * 1024 * 1024), "2.0 GB");
1153 }
1154
1155 #[test]
1160 fn format_age_seconds() {
1161 assert_eq!(format_age(0), "0 seconds ago");
1162 assert_eq!(format_age(30), "30 seconds ago");
1163 assert_eq!(format_age(59), "59 seconds ago");
1164 }
1165
1166 #[test]
1167 fn format_age_minutes() {
1168 assert_eq!(format_age(60), "1 minutes ago");
1169 assert_eq!(format_age(90), "1 minutes ago");
1170 assert_eq!(format_age(3599), "59 minutes ago");
1171 }
1172
1173 #[test]
1174 fn format_age_hours() {
1175 assert_eq!(format_age(3600), "1 hours ago");
1176 assert_eq!(format_age(7200), "2 hours ago");
1177 assert_eq!(format_age(86399), "23 hours ago");
1178 }
1179
1180 #[test]
1181 fn format_age_days() {
1182 assert_eq!(format_age(86400), "1 days ago");
1183 assert_eq!(format_age(172800), "2 days ago");
1184 assert_eq!(format_age(604800), "7 days ago");
1185 }
1186
1187 #[test]
1192 fn compute_config_hash_is_deterministic() {
1193 let h1 = compute_config_hash("None", false);
1194 let h2 = compute_config_hash("None", false);
1195 assert_eq!(h1, h2);
1196 }
1197
1198 #[test]
1199 fn compute_config_hash_differs_for_different_modes() {
1200 let h_none = compute_config_hash("None", false);
1201 let h_copy = compute_config_hash("Copy", false);
1202 let h_move = compute_config_hash("Move", false);
1203 assert_ne!(h_none, h_copy);
1204 assert_ne!(h_none, h_move);
1205 assert_ne!(h_copy, h_move);
1206 }
1207
1208 #[test]
1209 fn compute_config_hash_differs_for_backup_flag() {
1210 let h_off = compute_config_hash("None", false);
1211 let h_on = compute_config_hash("None", true);
1212 assert_ne!(h_off, h_on);
1213 }
1214
1215 #[test]
1216 fn compute_config_hash_is_16_hex_chars() {
1217 let h = compute_config_hash("None", false);
1218 assert_eq!(h.len(), 16);
1219 assert!(h.chars().all(|c| c.is_ascii_hexdigit()));
1220 }
1221
1222 #[test]
1223 fn current_config_hash_returns_string() {
1224 let svc = TestConfigService::with_defaults();
1225 let h = current_config_hash(&svc).expect("should succeed");
1226 assert_eq!(h.len(), 16);
1227 }
1228
1229 #[test]
1234 fn parse_relocation_mode_copy() {
1235 assert!(matches!(
1236 parse_relocation_mode("Copy"),
1237 FileRelocationMode::Copy
1238 ));
1239 }
1240
1241 #[test]
1242 fn parse_relocation_mode_move() {
1243 assert!(matches!(
1244 parse_relocation_mode("Move"),
1245 FileRelocationMode::Move
1246 ));
1247 }
1248
1249 #[test]
1250 fn parse_relocation_mode_none_keyword() {
1251 assert!(matches!(
1252 parse_relocation_mode("None"),
1253 FileRelocationMode::None
1254 ));
1255 }
1256
1257 #[test]
1258 fn parse_relocation_mode_unknown_falls_back_to_none() {
1259 assert!(matches!(
1260 parse_relocation_mode("UnknownVariant"),
1261 FileRelocationMode::None
1262 ));
1263 }
1264
1265 #[test]
1270 fn describe_snapshot_empty_is_reported_as_legacy() {
1271 let cache = empty_snapshot_cache();
1272 let (label, status) = describe_snapshot(&cache);
1273 assert_eq!(status, "empty");
1274 assert!(label.contains("legacy"), "label: {label}");
1275 }
1276
1277 #[test]
1278 fn describe_snapshot_valid_when_files_match_on_disk() {
1279 let tmp = TempDir::new().unwrap();
1280 let file = tmp.path().join("video.srt");
1281 std::fs::write(&file, "content").unwrap();
1282 let meta = std::fs::metadata(&file).unwrap();
1283 let mtime = meta
1284 .modified()
1285 .unwrap()
1286 .duration_since(std::time::UNIX_EPOCH)
1287 .unwrap()
1288 .as_secs();
1289
1290 let mut cache = empty_snapshot_cache();
1291 cache.file_snapshot = vec![SnapshotItem {
1292 path: file.to_string_lossy().into_owned(),
1293 name: "video.srt".into(),
1294 size: meta.len(),
1295 mtime,
1296 file_type: "subtitle".into(),
1297 }];
1298
1299 let (label, status) = describe_snapshot(&cache);
1300 assert_eq!(status, "valid", "label: {label}");
1301 assert_eq!(label, "Valid");
1302 }
1303
1304 #[test]
1305 fn describe_snapshot_stale_when_file_missing() {
1306 let tmp = TempDir::new().unwrap();
1307 let missing = tmp.path().join("gone.srt");
1308
1309 let mut cache = empty_snapshot_cache();
1310 cache.file_snapshot = vec![SnapshotItem {
1311 path: missing.to_string_lossy().into_owned(),
1312 name: "gone.srt".into(),
1313 size: 100,
1314 mtime: 999,
1315 file_type: "subtitle".into(),
1316 }];
1317
1318 let (label, status) = describe_snapshot(&cache);
1319 assert_eq!(status, "stale", "label: {label}");
1320 assert!(label.starts_with("Stale"), "label: {label}");
1321 }
1322
1323 #[test]
1328 fn clear_file_returns_true_and_removes_existing_file() {
1329 let tmp = TempDir::new().unwrap();
1330 let target = tmp.path().join("to_delete.txt");
1331 std::fs::write(&target, "data").unwrap();
1332 assert!(target.exists());
1333
1334 let result = clear_file(&target, "Cache").expect("should succeed");
1335 assert!(result, "should return true when file existed");
1336 assert!(!target.exists(), "file should be removed");
1337 }
1338
1339 #[test]
1340 fn clear_file_returns_false_when_file_absent() {
1341 let tmp = TempDir::new().unwrap();
1342 let missing = tmp.path().join("nonexistent.txt");
1343 assert!(!missing.exists());
1344
1345 let result = clear_file(&missing, "Cache").expect("should succeed");
1346 assert!(!result, "should return false when file was absent");
1347 }
1348
1349 #[test]
1354 fn get_config_dir_uses_xdg_config_home_when_set() {
1355 let tmp = TempDir::new().unwrap();
1356 unsafe {
1357 std::env::set_var("XDG_CONFIG_HOME", tmp.path());
1358 }
1359 let dir = get_config_dir().expect("should succeed");
1360 assert_eq!(dir, tmp.path());
1361 }
1362
1363 #[test]
1364 fn cache_path_ends_with_expected_components() {
1365 let tmp = TempDir::new().unwrap();
1366 unsafe {
1367 std::env::set_var("XDG_CONFIG_HOME", tmp.path());
1368 }
1369 let p = cache_path().expect("should succeed");
1370 assert!(p.ends_with("subx/match_cache.json"));
1371 }
1372
1373 #[test]
1374 fn journal_path_ends_with_expected_components() {
1375 let tmp = TempDir::new().unwrap();
1376 unsafe {
1377 std::env::set_var("XDG_CONFIG_HOME", tmp.path());
1378 }
1379 let p = journal_path().expect("should succeed");
1380 assert!(p.ends_with("subx/match_journal.json"));
1381 }
1382
1383 #[test]
1388 fn verify_destination_integrity_ok_when_metadata_matches() {
1389 let tmp = TempDir::new().unwrap();
1390 let dst = tmp.path().join("dest.srt");
1391 std::fs::write(&dst, "hello").unwrap();
1392
1393 let entry = make_journal_entry(
1394 JournalOperationType::Copied,
1395 tmp.path().join("src.srt"),
1396 dst,
1397 );
1398
1399 verify_destination_integrity(&entry).expect("should pass integrity check");
1400 }
1401
1402 #[test]
1403 fn verify_destination_integrity_errors_when_file_missing() {
1404 let tmp = TempDir::new().unwrap();
1405 let dst = tmp.path().join("missing.srt");
1406 let entry = JournalEntry {
1409 operation_type: JournalOperationType::Copied,
1410 source: tmp.path().join("src.srt"),
1411 destination: dst,
1412 backup_path: None,
1413 status: JournalEntryStatus::Completed,
1414 file_size: 5,
1415 file_mtime: 1_700_000_000,
1416 };
1417
1418 let err = verify_destination_integrity(&entry).expect_err("should fail");
1419 let msg = format!("{err}");
1420 assert!(
1421 msg.contains("no longer exists"),
1422 "error should mention missing file: {msg}"
1423 );
1424 }
1425
1426 #[test]
1427 fn verify_destination_integrity_errors_on_size_mismatch() {
1428 let tmp = TempDir::new().unwrap();
1429 let dst = tmp.path().join("sized.srt");
1430 std::fs::write(&dst, "hello").unwrap(); let meta = std::fs::metadata(&dst).unwrap();
1433 let mtime = meta
1434 .modified()
1435 .unwrap()
1436 .duration_since(std::time::UNIX_EPOCH)
1437 .unwrap()
1438 .as_secs();
1439
1440 let entry = JournalEntry {
1441 operation_type: JournalOperationType::Copied,
1442 source: tmp.path().join("src.srt"),
1443 destination: dst,
1444 backup_path: None,
1445 status: JournalEntryStatus::Completed,
1446 file_size: 999, file_mtime: mtime,
1448 };
1449
1450 let err = verify_destination_integrity(&entry).expect_err("should fail on size mismatch");
1451 let msg = format!("{err}");
1452 assert!(
1453 msg.contains("size differs"),
1454 "error should mention size: {msg}"
1455 );
1456 }
1457
1458 #[test]
1459 fn verify_destination_integrity_errors_on_mtime_mismatch() {
1460 let tmp = TempDir::new().unwrap();
1461 let dst = tmp.path().join("mtimed.srt");
1462 std::fs::write(&dst, "hello").unwrap();
1463 let meta = std::fs::metadata(&dst).unwrap();
1464
1465 let entry = JournalEntry {
1466 operation_type: JournalOperationType::Copied,
1467 source: tmp.path().join("src.srt"),
1468 destination: dst,
1469 backup_path: None,
1470 status: JournalEntryStatus::Completed,
1471 file_size: meta.len(),
1472 file_mtime: 1, };
1474
1475 let err = verify_destination_integrity(&entry).expect_err("should fail on mtime mismatch");
1476 let msg = format!("{err}");
1477 assert!(
1478 msg.contains("mtime differs"),
1479 "error should mention mtime: {msg}"
1480 );
1481 }
1482
1483 #[test]
1488 fn rollback_entry_copied_removes_destination() {
1489 let tmp = TempDir::new().unwrap();
1490 let src = tmp.path().join("src.srt");
1491 let dst = tmp.path().join("dst.srt");
1492 std::fs::write(&src, "original").unwrap();
1493 std::fs::write(&dst, "copy").unwrap();
1494
1495 let entry = make_journal_entry(JournalOperationType::Copied, src.clone(), dst.clone());
1496 rollback_entry(&entry, false).expect("rollback copy");
1497
1498 assert!(!dst.exists(), "copy destination must be removed");
1499 assert!(src.exists(), "source must remain");
1500 }
1501
1502 #[test]
1503 fn rollback_entry_moved_restores_source() {
1504 let tmp = TempDir::new().unwrap();
1505 let src = tmp.path().join("original.srt");
1506 let dst = tmp.path().join("moved.srt");
1507 std::fs::write(&dst, "payload").unwrap();
1509
1510 let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
1511 rollback_entry(&entry, false).expect("rollback move");
1512
1513 assert!(src.exists(), "source must be restored");
1514 assert!(!dst.exists(), "destination must be removed");
1515 assert_eq!(std::fs::read_to_string(&src).unwrap(), "payload");
1516 }
1517
1518 #[test]
1519 fn rollback_entry_renamed_restores_source() {
1520 let tmp = TempDir::new().unwrap();
1521 let src = tmp.path().join("old_name.srt");
1522 let dst = tmp.path().join("new_name.srt");
1523 std::fs::write(&dst, "content").unwrap();
1524
1525 let entry = make_journal_entry(JournalOperationType::Renamed, src.clone(), dst.clone());
1526 rollback_entry(&entry, false).expect("rollback rename");
1527
1528 assert!(src.exists(), "original name must be restored");
1529 assert!(!dst.exists(), "new name must be gone");
1530 }
1531
1532 #[test]
1533 fn rollback_entry_moved_errors_when_source_exists_without_force() {
1534 let tmp = TempDir::new().unwrap();
1535 let src = tmp.path().join("exists.srt");
1536 let dst = tmp.path().join("dest.srt");
1537 std::fs::write(&src, "already here").unwrap();
1539 std::fs::write(&dst, "moved here").unwrap();
1540
1541 let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
1542 let err = rollback_entry(&entry, false).expect_err("should abort when source exists");
1543 let msg = format!("{err}");
1544 assert!(
1545 msg.contains("already exists"),
1546 "error should mention conflict: {msg}"
1547 );
1548 }
1549
1550 #[test]
1551 fn rollback_entry_moved_with_force_overwrites_existing_source() {
1552 let tmp = TempDir::new().unwrap();
1553 let src = tmp.path().join("src_force.srt");
1554 let dst = tmp.path().join("dst_force.srt");
1555 std::fs::write(&src, "old").unwrap();
1556 std::fs::write(&dst, "new content").unwrap();
1557
1558 let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
1559 rollback_entry(&entry, true).expect("force rollback should succeed");
1560
1561 assert!(src.exists(), "source must exist after force rollback");
1562 assert!(!dst.exists(), "destination must be gone");
1563 assert_eq!(std::fs::read_to_string(&src).unwrap(), "new content");
1564 }
1565
1566 #[test]
1567 fn rollback_entry_removes_existing_backup() {
1568 let tmp = TempDir::new().unwrap();
1569 let src = tmp.path().join("src_bak.srt");
1570 let dst = tmp.path().join("dst_bak.srt");
1571 let backup = tmp.path().join("src_bak.srt.bak");
1572 std::fs::write(&dst, "copy").unwrap();
1573 std::fs::write(&backup, "backup content").unwrap();
1574
1575 let meta = std::fs::metadata(&dst).unwrap();
1576 let mtime = meta
1577 .modified()
1578 .unwrap()
1579 .duration_since(std::time::UNIX_EPOCH)
1580 .unwrap()
1581 .as_secs();
1582 let entry = JournalEntry {
1583 operation_type: JournalOperationType::Copied,
1584 source: src,
1585 destination: dst.clone(),
1586 backup_path: Some(backup.clone()),
1587 status: JournalEntryStatus::Completed,
1588 file_size: meta.len(),
1589 file_mtime: mtime,
1590 };
1591
1592 rollback_entry(&entry, false).expect("rollback with backup");
1593 assert!(!dst.exists(), "copy destination must be removed");
1594 assert!(!backup.exists(), "backup must be deleted");
1595 }
1596
1597 #[test]
1598 fn rollback_entry_tolerates_missing_backup_file() {
1599 let tmp = TempDir::new().unwrap();
1600 let src = tmp.path().join("src_nobak.srt");
1601 let dst = tmp.path().join("dst_nobak.srt");
1602 let backup = tmp.path().join("missing_backup.srt.bak");
1603 std::fs::write(&dst, "copy").unwrap();
1604 let meta = std::fs::metadata(&dst).unwrap();
1607 let mtime = meta
1608 .modified()
1609 .unwrap()
1610 .duration_since(std::time::UNIX_EPOCH)
1611 .unwrap()
1612 .as_secs();
1613 let entry = JournalEntry {
1614 operation_type: JournalOperationType::Copied,
1615 source: src,
1616 destination: dst.clone(),
1617 backup_path: Some(backup),
1618 status: JournalEntryStatus::Completed,
1619 file_size: meta.len(),
1620 file_mtime: mtime,
1621 };
1622
1623 rollback_entry(&entry, false).expect("missing backup should not cause error");
1624 assert!(!dst.exists());
1625 }
1626
1627 #[tokio::test]
1632 async fn execute_status_no_cache_json_output_contains_exists_false() {
1633 let (_tmp, subx_dir) = isolated_config_dir();
1634 let cache_file = subx_dir.join("match_cache.json");
1635 assert!(!cache_file.exists());
1636
1637 let svc = TestConfigService::with_defaults();
1638 let args = crate::cli::StatusArgs { json: true };
1639 execute_status(&args, &svc)
1640 .await
1641 .expect("status must succeed without cache");
1642 }
1643
1644 #[tokio::test]
1645 async fn execute_status_no_cache_plain_output_is_ok() {
1646 let (_tmp, subx_dir) = isolated_config_dir();
1647 let cache_file = subx_dir.join("match_cache.json");
1648 assert!(!cache_file.exists());
1649
1650 let svc = TestConfigService::with_defaults();
1651 let args = crate::cli::StatusArgs { json: false };
1652 execute_status(&args, &svc)
1653 .await
1654 .expect("status must succeed without cache (plain)");
1655 }
1656
1657 #[tokio::test]
1658 async fn execute_status_valid_cache_plain_succeeds() {
1659 let (_tmp, subx_dir) = isolated_config_dir();
1660 let cache_file = subx_dir.join("match_cache.json");
1661
1662 let svc = TestConfigService::with_defaults();
1663 let config = svc.get_config().unwrap();
1664 let hash = compute_config_hash("None", config.general.backup_enabled);
1665
1666 let now = std::time::SystemTime::now()
1667 .duration_since(std::time::UNIX_EPOCH)
1668 .unwrap()
1669 .as_secs();
1670 let cache = serde_json::json!({
1671 "cache_version": "1.0",
1672 "directory": "/some/dir",
1673 "file_snapshot": [],
1674 "match_operations": [
1675 {
1676 "video_file": "/some/video.mkv",
1677 "subtitle_file": "/some/sub.srt",
1678 "new_subtitle_name": "video.srt",
1679 "confidence": 0.95,
1680 "reasoning": []
1681 }
1682 ],
1683 "created_at": now,
1684 "ai_model_used": "gpt-4",
1685 "config_hash": hash,
1686 "original_relocation_mode": "None",
1687 "original_backup_enabled": false,
1688 });
1689 std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
1690
1691 let args = crate::cli::StatusArgs { json: false };
1692 execute_status(&args, &svc)
1693 .await
1694 .expect("status with matching hash must succeed");
1695 }
1696
1697 #[tokio::test]
1698 async fn execute_status_valid_cache_json_mode_succeeds() {
1699 let (_tmp, subx_dir) = isolated_config_dir();
1700 let cache_file = subx_dir.join("match_cache.json");
1701 let journal_file = subx_dir.join("match_journal.json");
1702
1703 let svc = TestConfigService::with_defaults();
1704 let config = svc.get_config().unwrap();
1705 let hash = compute_config_hash("None", config.general.backup_enabled);
1706
1707 let now = std::time::SystemTime::now()
1708 .duration_since(std::time::UNIX_EPOCH)
1709 .unwrap()
1710 .as_secs();
1711 let cache = serde_json::json!({
1712 "cache_version": "1.0",
1713 "directory": "/some/dir",
1714 "file_snapshot": [],
1715 "match_operations": [],
1716 "created_at": now,
1717 "ai_model_used": "gpt-4",
1718 "config_hash": hash,
1719 "original_relocation_mode": "None",
1720 "original_backup_enabled": false,
1721 });
1722 std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
1723 std::fs::write(&journal_file, "{}").unwrap();
1724
1725 let args = crate::cli::StatusArgs { json: true };
1726 execute_status(&args, &svc)
1727 .await
1728 .expect("JSON status must succeed with matching hash");
1729 }
1730
1731 #[tokio::test]
1732 async fn execute_status_mismatched_hash_shows_in_plain_output() {
1733 let (_tmp, subx_dir) = isolated_config_dir();
1734 let cache_file = subx_dir.join("match_cache.json");
1735
1736 let now = std::time::SystemTime::now()
1737 .duration_since(std::time::UNIX_EPOCH)
1738 .unwrap()
1739 .as_secs();
1740 let cache = serde_json::json!({
1741 "cache_version": "1.0",
1742 "directory": "/some/dir",
1743 "file_snapshot": [],
1744 "match_operations": [],
1745 "created_at": now,
1746 "ai_model_used": "gpt-4",
1747 "config_hash": "00000000deadbeef",
1748 "original_relocation_mode": "None",
1749 "original_backup_enabled": false,
1750 });
1751 std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
1752
1753 let svc = TestConfigService::with_defaults();
1754 let args = crate::cli::StatusArgs { json: false };
1755 execute_status(&args, &svc)
1757 .await
1758 .expect("status succeeds even with mismatched config hash");
1759 }
1760
1761 #[tokio::test]
1766 async fn execute_rollback_journal_with_only_pending_entries_is_noop() {
1767 use crate::core::matcher::journal::JournalData;
1768
1769 let (_tmp, subx_dir) = isolated_config_dir();
1770 let journal_file = subx_dir.join("match_journal.json");
1771
1772 let tmp2 = TempDir::new().unwrap();
1773 let dst = tmp2.path().join("file.srt");
1774 std::fs::write(&dst, "data").unwrap();
1775
1776 let pending_entry = JournalEntry {
1777 operation_type: JournalOperationType::Copied,
1778 source: tmp2.path().join("src.srt"),
1779 destination: dst.clone(),
1780 backup_path: None,
1781 status: JournalEntryStatus::Pending,
1782 file_size: 4,
1783 file_mtime: 0,
1784 };
1785
1786 let journal = JournalData {
1787 batch_id: "pending-only".into(),
1788 created_at: 0,
1789 entries: vec![pending_entry],
1790 };
1791 journal.save(&journal_file).await.expect("save journal");
1792
1793 let args = RollbackArgs { force: false };
1794 execute_rollback(&args)
1795 .await
1796 .expect("should succeed with only pending entries");
1797
1798 assert!(
1801 journal_file.exists(),
1802 "journal kept when nothing was rolled back"
1803 );
1804 assert!(dst.exists(), "pending entry destination must be untouched");
1805 }
1806
1807 #[tokio::test]
1808 async fn execute_rollback_force_skips_integrity_check() {
1809 use crate::core::matcher::journal::JournalData;
1810
1811 let (_tmp, subx_dir) = isolated_config_dir();
1812 let journal_file = subx_dir.join("match_journal.json");
1813
1814 let tmp2 = TempDir::new().unwrap();
1815 let src = tmp2.path().join("orig.srt");
1816 let dst = tmp2.path().join("copy.srt");
1817 std::fs::write(&dst, "data").unwrap();
1818
1819 let entry = JournalEntry {
1821 operation_type: JournalOperationType::Copied,
1822 source: src.clone(),
1823 destination: dst.clone(),
1824 backup_path: None,
1825 status: JournalEntryStatus::Completed,
1826 file_size: 9999, file_mtime: 9999, };
1829
1830 let journal = JournalData {
1831 batch_id: "force-batch".into(),
1832 created_at: 0,
1833 entries: vec![entry],
1834 };
1835 journal.save(&journal_file).await.expect("save journal");
1836
1837 let args = RollbackArgs { force: true };
1838 execute_rollback(&args)
1839 .await
1840 .expect("force rollback should succeed despite integrity mismatch");
1841
1842 assert!(!dst.exists(), "copy destination must be removed");
1843 assert!(!journal_file.exists(), "journal must be deleted");
1844 }
1845
1846 #[tokio::test]
1851 async fn execute_with_config_clear_journal_type_works() {
1852 use std::sync::Arc;
1853 let (_tmp, subx_dir) = isolated_config_dir();
1854 let journal_file = subx_dir.join("match_journal.json");
1855 let cache_file = subx_dir.join("match_cache.json");
1856 std::fs::write(&journal_file, "{}").unwrap();
1857 std::fs::write(&cache_file, "{}").unwrap();
1858
1859 let svc = Arc::new(TestConfigService::with_defaults());
1860 let args = CacheArgs {
1861 action: crate::cli::CacheAction::Clear(crate::cli::ClearArgs {
1862 r#type: crate::cli::ClearType::Journal,
1863 }),
1864 };
1865 execute_with_config(args, svc)
1866 .await
1867 .expect("clear journal via execute_with_config");
1868
1869 assert!(!journal_file.exists(), "journal should be removed");
1870 assert!(cache_file.exists(), "cache should remain");
1871 }
1872
1873 #[tokio::test]
1878 async fn execute_apply_confidence_filter_removes_low_confidence_ops() {
1879 use crate::cli::ApplyArgs;
1880
1881 let (_tmp, subx_dir) = isolated_config_dir();
1882 let cache_file = subx_dir.join("match_cache.json");
1883
1884 let svc = TestConfigService::with_defaults();
1885 let config = svc.get_config().unwrap();
1886 let hash = compute_config_hash("None", config.general.backup_enabled);
1887
1888 let now = std::time::SystemTime::now()
1889 .duration_since(std::time::UNIX_EPOCH)
1890 .unwrap()
1891 .as_secs();
1892
1893 let cache = serde_json::json!({
1900 "cache_version": "1.0",
1901 "directory": "/dir",
1902 "file_snapshot": [],
1903 "match_operations": [
1904 {
1905 "video_file": "/dir/v1.mkv",
1906 "subtitle_file": "/dir/s1.srt",
1907 "new_subtitle_name": "v1.srt",
1908 "confidence": 0.5,
1909 "reasoning": []
1910 }
1911 ],
1912 "created_at": now,
1913 "ai_model_used": "gpt-4",
1914 "config_hash": hash,
1915 "original_relocation_mode": "None",
1916 "original_backup_enabled": false,
1917 });
1918 std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
1919
1920 let result = execute_apply(
1922 &ApplyArgs {
1923 yes: true,
1924 force: true,
1925 confidence: Some(80),
1926 },
1927 &svc,
1928 )
1929 .await;
1930 assert!(
1931 result.is_ok(),
1932 "confidence filter to empty ops should be Ok: {result:?}"
1933 );
1934 }
1935}