use crate::Result;
use crate::cli::output::{OutputMode, active_mode, emit_success};
use crate::cli::{ApplyArgs, CacheArgs, ClearArgs, ClearType, RollbackArgs, StatusArgs};
use crate::config::ConfigService;
use crate::core::lock::acquire_subx_lock;
use crate::core::matcher::cache::CacheData;
use crate::core::matcher::engine::{FileRelocationMode, MatchConfig, apply_cached_operations};
use crate::core::matcher::journal::{
JournalData, JournalEntry, JournalEntryStatus, JournalOperationType,
};
use crate::error::SubXError;
use serde::Serialize;
use std::io::IsTerminal;
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Debug, Serialize)]
pub struct CacheItemError {
pub category: String,
pub code: String,
pub message: String,
}
impl CacheItemError {
fn from_error(err: &SubXError) -> Self {
Self {
category: err.category().to_string(),
code: err.machine_code().to_string(),
message: err.user_friendly_message(),
}
}
}
#[derive(Debug, Serialize)]
pub struct StaleFileInfo {
pub path: String,
pub reason: String,
}
#[derive(Debug, Serialize)]
pub struct CacheStatusPayload {
pub path: String,
pub exists: bool,
pub journal_present: bool,
pub total: u64,
pub pending: u64,
pub applied: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub size_bytes: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub created_at: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub age_seconds: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cache_version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ai_model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub operation_count: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub config_hash: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub current_config_hash: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub config_hash_match: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub snapshot_status: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stale_files: Option<Vec<StaleFileInfo>>,
}
#[derive(Debug, Serialize)]
pub struct CacheClearPayload {
pub removed: u64,
pub kind: &'static str,
pub cache_path: String,
pub cache_removed: bool,
pub journal_path: String,
pub journal_removed: bool,
}
#[derive(Debug, Serialize)]
pub struct CacheRollbackPayload {
pub rolled_back: u64,
}
#[derive(Debug, Serialize)]
pub struct CacheApplyItem {
pub id: String,
pub status: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<CacheItemError>,
}
#[derive(Debug, Serialize)]
pub struct CacheApplyPayload {
pub applied: u64,
pub failed: u64,
pub items: Vec<CacheApplyItem>,
}
async fn journal_counters(path: &Path) -> (u64, u64) {
if !path.exists() {
return (0, 0);
}
match JournalData::load(path).await {
Ok(j) => {
let mut pending = 0u64;
let mut applied = 0u64;
for entry in &j.entries {
match entry.status {
JournalEntryStatus::Pending => pending += 1,
JournalEntryStatus::Completed => applied += 1,
}
}
(pending, applied)
}
Err(_) => (0, 0),
}
}
fn get_config_dir() -> Result<PathBuf> {
if let Some(xdg_config) = std::env::var_os("XDG_CONFIG_HOME") {
Ok(PathBuf::from(xdg_config))
} else {
dirs::config_dir().ok_or_else(|| SubXError::config("Unable to determine config directory"))
}
}
fn cache_path() -> Result<PathBuf> {
Ok(get_config_dir()?.join("subx").join("match_cache.json"))
}
fn journal_path() -> Result<PathBuf> {
Ok(get_config_dir()?.join("subx").join("match_journal.json"))
}
fn clear_file(path: &Path, label: &str) -> Result<bool> {
let json_mode = active_mode().is_json();
if path.exists() {
std::fs::remove_file(path)?;
if !json_mode {
println!("{} cleared: {}", label, path.display());
}
Ok(true)
} else {
if !json_mode {
println!("{} not found: {}", label, path.display());
}
Ok(false)
}
}
async fn execute_clear(args: &ClearArgs) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let config_dir = get_config_dir()?;
let cache_file = config_dir.join("subx").join("match_cache.json");
let journal_file = config_dir.join("subx").join("match_journal.json");
let json_mode = active_mode().is_json();
let mut cache_removed = false;
let mut journal_removed = false;
match args.r#type {
ClearType::Cache => {
cache_removed = clear_file(&cache_file, "Cache")?;
}
ClearType::Journal => {
journal_removed = clear_file(&journal_file, "Journal")?;
}
ClearType::All => {
cache_removed = clear_file(&cache_file, "Cache")?;
journal_removed = clear_file(&journal_file, "Journal")?;
}
}
let removed = u64::from(cache_removed) + u64::from(journal_removed);
if json_mode {
let kind = match args.r#type {
ClearType::Cache => "cache",
ClearType::Journal => "journal",
ClearType::All => "all",
};
let payload = CacheClearPayload {
removed,
kind,
cache_path: cache_file.to_string_lossy().into_owned(),
cache_removed,
journal_path: journal_file.to_string_lossy().into_owned(),
journal_removed,
};
emit_success(OutputMode::Json, "cache", payload);
} else if removed == 0 {
println!("No cache files found to clear.");
}
Ok(())
}
fn compute_config_hash(relocation_mode_debug: &str, backup_enabled: bool) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
relocation_mode_debug.hash(&mut hasher);
backup_enabled.hash(&mut hasher);
format!("{:016x}", hasher.finish())
}
fn current_config_hash(config_service: &dyn ConfigService) -> Result<String> {
let config = config_service.get_config()?;
Ok(compute_config_hash("None", config.general.backup_enabled))
}
fn format_size(bytes: u64) -> String {
const KB: f64 = 1024.0;
const MB: f64 = KB * 1024.0;
const GB: f64 = MB * 1024.0;
let b = bytes as f64;
if b >= GB {
format!("{:.1} GB", b / GB)
} else if b >= MB {
format!("{:.1} MB", b / MB)
} else if b >= KB {
format!("{:.1} KB", b / KB)
} else {
format!("{} B", bytes)
}
}
fn format_age(age_secs: u64) -> String {
const MIN: u64 = 60;
const HOUR: u64 = 60 * MIN;
const DAY: u64 = 24 * HOUR;
if age_secs < MIN {
format!("{} seconds ago", age_secs)
} else if age_secs < HOUR {
format!("{} minutes ago", age_secs / MIN)
} else if age_secs < DAY {
format!("{} hours ago", age_secs / HOUR)
} else {
format!("{} days ago", age_secs / DAY)
}
}
fn describe_snapshot(cache: &CacheData) -> (String, &'static str) {
if cache.has_empty_snapshot() {
("Empty (legacy cache)".to_string(), "empty")
} else {
let stale = cache.validate_snapshot();
if stale.is_empty() {
("Valid".to_string(), "valid")
} else {
(format!("Stale ({} files changed)", stale.len()), "stale")
}
}
}
pub async fn execute_status(args: &StatusArgs, config_service: &dyn ConfigService) -> Result<()> {
let cache_file = cache_path()?;
let journal_file = journal_path()?;
let json_mode = active_mode().is_json() || args.json;
if !cache_file.exists() {
let journal_present = journal_file.exists();
let (pending, applied) = journal_counters(&journal_file).await;
if json_mode {
let payload = CacheStatusPayload {
path: cache_file.to_string_lossy().into_owned(),
exists: false,
journal_present,
total: 0,
pending,
applied,
size_bytes: None,
created_at: None,
age_seconds: None,
cache_version: None,
ai_model: None,
operation_count: None,
config_hash: None,
current_config_hash: None,
config_hash_match: None,
snapshot_status: None,
stale_files: None,
};
emit_success(OutputMode::Json, "cache", payload);
} else {
println!("No cache found at {}", cache_file.display());
}
return Ok(());
}
let cache = CacheData::load(&cache_file).map_err(|e| {
SubXError::config(format!(
"Failed to load cache at {}: {}",
cache_file.display(),
e
))
})?;
let metadata = std::fs::metadata(&cache_file)?;
let size_bytes = metadata.len();
let now_secs = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let age_secs = now_secs.saturating_sub(cache.created_at);
let current_hash = current_config_hash(config_service)?;
let hash_match = current_hash == cache.config_hash;
let (snapshot_label, snapshot_status) = describe_snapshot(&cache);
let stale_entries = if snapshot_status == "stale" {
cache.validate_snapshot()
} else {
Vec::new()
};
let journal_present = journal_file.exists();
let (pending, applied) = journal_counters(&journal_file).await;
let total = cache.match_operations.len() as u64;
if json_mode {
let stale_files: Vec<StaleFileInfo> = stale_entries
.iter()
.map(|s| StaleFileInfo {
path: s.path.clone(),
reason: s.reason.clone(),
})
.collect();
let payload = CacheStatusPayload {
path: cache_file.to_string_lossy().into_owned(),
exists: true,
journal_present,
total,
pending,
applied,
size_bytes: Some(size_bytes),
created_at: Some(cache.created_at),
age_seconds: Some(age_secs),
cache_version: Some(cache.cache_version.clone()),
ai_model: Some(cache.ai_model_used.clone()),
operation_count: Some(cache.match_operations.len()),
config_hash: Some(cache.config_hash.clone()),
current_config_hash: Some(current_hash),
config_hash_match: Some(hash_match),
snapshot_status: Some(snapshot_status),
stale_files: Some(stale_files),
};
emit_success(OutputMode::Json, "cache", payload);
} else {
let config_line = if hash_match {
"✓ (matches current)".to_string()
} else {
format!("✗ (differs from current: {})", current_hash)
};
let journal_line = if journal_present {
"Present"
} else {
"Not found"
};
println!("Cache Status");
println!("============");
println!("Path: {}", cache_file.display());
println!("Size: {}", format_size(size_bytes));
println!("Age: {}", format_age(age_secs));
println!("Cache version: {}", cache.cache_version);
println!("AI model: {}", cache.ai_model_used);
println!("Operations: {}", cache.match_operations.len());
println!("Config hash: {}", cache.config_hash);
println!("Config match: {}", config_line);
println!("Snapshot: {}", snapshot_label);
println!("Journal: {}", journal_line);
}
Ok(())
}
pub async fn execute_apply(args: &ApplyArgs, config_service: &dyn ConfigService) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let json_mode = active_mode().is_json();
let cache_file = cache_path()?;
if !cache_file.exists() {
if json_mode {
emit_success(
OutputMode::Json,
"cache",
CacheApplyPayload {
applied: 0,
failed: 0,
items: Vec::new(),
},
);
} else {
println!(
"No cache found at {}. Run a dry-run match first.",
cache_file.display()
);
}
return Ok(());
}
let mut cache = CacheData::load(&cache_file).map_err(|e| {
SubXError::config(format!(
"Failed to load cache at {}: {}",
cache_file.display(),
e
))
})?;
let config = config_service.get_config()?;
let apply_hash = compute_config_hash(
&cache.original_relocation_mode,
config.general.backup_enabled,
);
if apply_hash != cache.config_hash && !args.force {
return Err(SubXError::config(format!(
"Configuration has changed since the cache was created.\n\
Cache hash: {}\n\
Current hash: {}\n\
Use --force to bypass this check.",
cache.config_hash, apply_hash
)));
}
if cache.has_empty_snapshot() && !args.force {
return Err(SubXError::config(
"Cache was created without file snapshot data (legacy format).\n\
Cannot verify file integrity. Use --force to apply anyway."
.to_string(),
));
}
if !args.force && !cache.has_empty_snapshot() {
let stale = cache.validate_snapshot();
if !stale.is_empty() {
let mut msg = format!(
"{} source file(s) have changed since the cache was created:\n",
stale.len()
);
for s in &stale {
msg.push_str(&format!(" - {} ({})\n", s.path, s.reason));
}
msg.push_str("Use --force to apply anyway.");
return Err(SubXError::config(msg));
}
}
if !args.force {
let conflicts = cache.validate_target_paths();
if !conflicts.is_empty() {
let mut msg = format!("{} target path(s) already exist:\n", conflicts.len());
for p in &conflicts {
msg.push_str(&format!(" - {}\n", p.display()));
}
msg.push_str("Use --force to apply anyway.");
return Err(SubXError::config(msg));
}
}
if let Some(min_conf) = args.confidence {
let threshold = f32::from(min_conf) / 100.0;
let before = cache.match_operations.len();
cache
.match_operations
.retain(|op| op.confidence >= threshold);
let after = cache.match_operations.len();
if before != after && !json_mode {
println!(
"Filtered {} operation(s) below {}% confidence.",
before - after,
min_conf
);
}
}
if cache.match_operations.is_empty() {
if json_mode {
emit_success(
OutputMode::Json,
"cache",
CacheApplyPayload {
applied: 0,
failed: 0,
items: Vec::new(),
},
);
} else {
println!("No operations to apply.");
}
return Ok(());
}
if !json_mode {
println!("Cache Apply Summary");
println!("===================");
println!("Operations: {}", cache.match_operations.len());
println!("AI model: {}", cache.ai_model_used);
println!("Relocation mode: {}", cache.original_relocation_mode);
println!();
for (i, op) in cache.match_operations.iter().enumerate() {
println!(
" {}. {} → {} (confidence: {:.0}%)",
i + 1,
op.subtitle_file,
op.new_subtitle_name,
op.confidence * 100.0
);
}
println!();
}
if !args.yes {
if json_mode {
return Err(SubXError::CommandExecution(
"cache apply in JSON output mode requires --yes (interactive confirmation \
would write to stdout and corrupt the JSON envelope)."
.to_string(),
));
}
if !std::io::stdin().is_terminal() {
return Err(SubXError::config(
"Non-interactive terminal detected. Use --yes to skip confirmation.".to_string(),
));
}
print!("Proceed with apply? [y/N] ");
use std::io::Write;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
if !input.trim().eq_ignore_ascii_case("y") {
println!("Apply cancelled.");
return Ok(());
}
}
let config = config_service.get_config()?;
let relocation_mode = parse_relocation_mode(&cache.original_relocation_mode);
let match_config = MatchConfig {
confidence_threshold: 0.0,
max_sample_length: 2000,
enable_content_analysis: true,
backup_enabled: cache.original_backup_enabled,
relocation_mode,
conflict_resolution: crate::core::matcher::engine::ConflictResolution::Skip,
ai_model: cache.ai_model_used.clone(),
max_subtitle_bytes: config.general.max_subtitle_bytes,
};
if json_mode {
let mut items: Vec<CacheApplyItem> = Vec::with_capacity(cache.match_operations.len());
let mut applied = 0u64;
let mut failed = 0u64;
for op in &cache.match_operations {
let id = op.subtitle_file.clone();
let video_exists = std::path::Path::new(&op.video_file).exists();
let sub_exists = std::path::Path::new(&op.subtitle_file).exists();
if !video_exists || !sub_exists {
let missing = if !sub_exists {
op.subtitle_file.clone()
} else {
op.video_file.clone()
};
let err = SubXError::FileNotFound(missing);
items.push(CacheApplyItem {
id,
status: "error",
error: Some(CacheItemError::from_error(&err)),
});
failed += 1;
continue;
}
let mut single = cache.clone();
single.match_operations = vec![op.clone()];
match apply_cached_operations(&single, &match_config).await {
Ok(()) => {
applied += 1;
items.push(CacheApplyItem {
id,
status: "ok",
error: None,
});
}
Err(e) => {
failed += 1;
items.push(CacheApplyItem {
id,
status: "error",
error: Some(CacheItemError::from_error(&e)),
});
}
}
}
emit_success(
OutputMode::Json,
"cache",
CacheApplyPayload {
applied,
failed,
items,
},
);
} else {
apply_cached_operations(&cache, &match_config).await?;
println!("Apply complete.");
}
Ok(())
}
fn parse_relocation_mode(s: &str) -> FileRelocationMode {
match s {
"Copy" => FileRelocationMode::Copy,
"Move" => FileRelocationMode::Move,
_ => FileRelocationMode::None,
}
}
fn verify_destination_integrity(entry: &JournalEntry) -> Result<()> {
let metadata = match std::fs::metadata(&entry.destination) {
Ok(m) => m,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
return Err(SubXError::config(format!(
"Destination file {} no longer exists. Use --force to override.",
entry.destination.display()
)));
}
Err(e) => return Err(SubXError::Io(e)),
};
if metadata.len() != entry.file_size {
return Err(SubXError::config(format!(
"Destination file {} has been modified since the operation (size differs). \
Use --force to override.",
entry.destination.display()
)));
}
let mtime_secs = metadata
.modified()
.ok()
.and_then(|m| m.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs());
if let Some(actual) = mtime_secs {
if actual != entry.file_mtime {
return Err(SubXError::config(format!(
"Destination file {} has been modified since the operation (mtime differs). \
Use --force to override.",
entry.destination.display()
)));
}
}
Ok(())
}
fn rollback_entry(entry: &JournalEntry, force: bool) -> Result<()> {
let json_mode = active_mode().is_json();
match entry.operation_type {
JournalOperationType::Copied => {
std::fs::remove_file(&entry.destination)?;
if !json_mode {
println!("Removed copy: {}", entry.destination.display());
}
}
JournalOperationType::Moved | JournalOperationType::Renamed => {
if entry.source.exists() && !force {
return Err(SubXError::config(format!(
"Original source path {} already exists. \
Rollback would overwrite it. Use --force to override.",
entry.source.display()
)));
}
if let Some(parent) = entry.source.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent)?;
}
}
std::fs::rename(&entry.destination, &entry.source)?;
if !json_mode {
println!(
"Rolled back: {} \u{2190} {}",
entry.source.display(),
entry.destination.display()
);
}
}
}
if let Some(backup) = &entry.backup_path {
if backup.exists() {
std::fs::remove_file(backup)?;
if !json_mode {
println!("Removed backup: {}", backup.display());
}
}
}
Ok(())
}
pub async fn execute_rollback(args: &RollbackArgs) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let json_mode = active_mode().is_json();
let journal_file = journal_path()?;
if !journal_file.exists() {
if json_mode {
emit_success(
OutputMode::Json,
"cache",
CacheRollbackPayload { rolled_back: 0 },
);
} else {
println!("No operation journal found. Nothing to rollback.");
}
return Ok(());
}
let journal = JournalData::load(&journal_file).await?;
let reversed: Vec<&JournalEntry> = journal
.entries
.iter()
.filter(|e| e.status == JournalEntryStatus::Completed)
.rev()
.collect();
if reversed.is_empty() {
if json_mode {
emit_success(
OutputMode::Json,
"cache",
CacheRollbackPayload { rolled_back: 0 },
);
} else {
println!("Journal has no completed operations to rollback.");
}
return Ok(());
}
if !json_mode {
println!(
"Rolling back {} operations from batch {}...",
reversed.len(),
journal.batch_id
);
}
let mut rolled_back: u64 = 0;
for entry in &reversed {
if !args.force {
verify_destination_integrity(entry)?;
}
rollback_entry(entry, args.force)?;
rolled_back += 1;
}
std::fs::remove_file(&journal_file)?;
if json_mode {
emit_success(
OutputMode::Json,
"cache",
CacheRollbackPayload { rolled_back },
);
} else {
println!("Rollback complete. Journal deleted.");
}
Ok(())
}
pub async fn execute(args: CacheArgs) -> Result<()> {
match args.action {
crate::cli::CacheAction::Clear(clear_args) => {
execute_clear(&clear_args).await?;
}
crate::cli::CacheAction::Status(status_args) => {
let config_service = crate::config::ProductionConfigService::new()?;
execute_status(&status_args, &config_service).await?;
}
crate::cli::CacheAction::Apply(ref apply_args) => {
let config_service = crate::config::ProductionConfigService::new()?;
execute_apply(apply_args, &config_service).await?;
}
crate::cli::CacheAction::Rollback(rollback_args) => {
execute_rollback(&rollback_args).await?;
}
}
Ok(())
}
pub async fn execute_with_config(
args: CacheArgs,
config_service: std::sync::Arc<dyn ConfigService>,
) -> Result<()> {
match args.action {
crate::cli::CacheAction::Status(status_args) => {
execute_status(&status_args, config_service.as_ref()).await
}
crate::cli::CacheAction::Apply(apply_args) => {
execute_apply(&apply_args, config_service.as_ref()).await
}
other => execute(CacheArgs { action: other }).await,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::TestConfigService;
use crate::core::matcher::cache::{CacheData, SnapshotItem};
use crate::core::matcher::journal::{JournalEntry, JournalEntryStatus, JournalOperationType};
use std::path::PathBuf;
use tempfile::TempDir;
fn isolated_config_dir() -> (TempDir, PathBuf) {
let tmp = TempDir::new().expect("tempdir");
unsafe {
std::env::set_var("XDG_CONFIG_HOME", tmp.path());
}
let subx_dir = tmp.path().join("subx");
std::fs::create_dir_all(&subx_dir).expect("create subx dir");
(tmp, subx_dir)
}
fn make_journal_entry(
op_type: JournalOperationType,
source: PathBuf,
destination: PathBuf,
) -> JournalEntry {
let meta = std::fs::metadata(&destination).expect("destination must exist");
let mtime = meta
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
JournalEntry {
operation_type: op_type,
source,
destination,
backup_path: None,
status: JournalEntryStatus::Completed,
file_size: meta.len(),
file_mtime: mtime,
}
}
fn empty_snapshot_cache() -> CacheData {
CacheData {
cache_version: "1.0".into(),
directory: "/tmp".into(),
file_snapshot: vec![],
match_operations: vec![],
created_at: 0,
ai_model_used: "test-model".into(),
config_hash: "abc123".into(),
original_relocation_mode: "None".into(),
original_backup_enabled: false,
}
}
#[test]
fn format_size_bytes() {
assert_eq!(format_size(0), "0 B");
assert_eq!(format_size(512), "512 B");
assert_eq!(format_size(1023), "1023 B");
}
#[test]
fn format_size_kilobytes() {
assert_eq!(format_size(1024), "1.0 KB");
assert_eq!(format_size(2048), "2.0 KB");
let just_below_mb = (1024.0 * 1024.0 - 1.0) as u64;
let result = format_size(just_below_mb);
assert!(result.ends_with("KB"), "expected KB, got {result}");
}
#[test]
fn format_size_megabytes() {
assert_eq!(format_size(1024 * 1024), "1.0 MB");
assert_eq!(format_size(5 * 1024 * 1024), "5.0 MB");
let just_below_gb = (1024.0 * 1024.0 * 1024.0 - 1.0) as u64;
let result = format_size(just_below_gb);
assert!(result.ends_with("MB"), "expected MB, got {result}");
}
#[test]
fn format_size_gigabytes() {
assert_eq!(format_size(1024 * 1024 * 1024), "1.0 GB");
assert_eq!(format_size(2 * 1024 * 1024 * 1024), "2.0 GB");
}
#[test]
fn format_age_seconds() {
assert_eq!(format_age(0), "0 seconds ago");
assert_eq!(format_age(30), "30 seconds ago");
assert_eq!(format_age(59), "59 seconds ago");
}
#[test]
fn format_age_minutes() {
assert_eq!(format_age(60), "1 minutes ago");
assert_eq!(format_age(90), "1 minutes ago");
assert_eq!(format_age(3599), "59 minutes ago");
}
#[test]
fn format_age_hours() {
assert_eq!(format_age(3600), "1 hours ago");
assert_eq!(format_age(7200), "2 hours ago");
assert_eq!(format_age(86399), "23 hours ago");
}
#[test]
fn format_age_days() {
assert_eq!(format_age(86400), "1 days ago");
assert_eq!(format_age(172800), "2 days ago");
assert_eq!(format_age(604800), "7 days ago");
}
#[test]
fn compute_config_hash_is_deterministic() {
let h1 = compute_config_hash("None", false);
let h2 = compute_config_hash("None", false);
assert_eq!(h1, h2);
}
#[test]
fn compute_config_hash_differs_for_different_modes() {
let h_none = compute_config_hash("None", false);
let h_copy = compute_config_hash("Copy", false);
let h_move = compute_config_hash("Move", false);
assert_ne!(h_none, h_copy);
assert_ne!(h_none, h_move);
assert_ne!(h_copy, h_move);
}
#[test]
fn compute_config_hash_differs_for_backup_flag() {
let h_off = compute_config_hash("None", false);
let h_on = compute_config_hash("None", true);
assert_ne!(h_off, h_on);
}
#[test]
fn compute_config_hash_is_16_hex_chars() {
let h = compute_config_hash("None", false);
assert_eq!(h.len(), 16);
assert!(h.chars().all(|c| c.is_ascii_hexdigit()));
}
#[test]
fn current_config_hash_returns_string() {
let svc = TestConfigService::with_defaults();
let h = current_config_hash(&svc).expect("should succeed");
assert_eq!(h.len(), 16);
}
#[test]
fn parse_relocation_mode_copy() {
assert!(matches!(
parse_relocation_mode("Copy"),
FileRelocationMode::Copy
));
}
#[test]
fn parse_relocation_mode_move() {
assert!(matches!(
parse_relocation_mode("Move"),
FileRelocationMode::Move
));
}
#[test]
fn parse_relocation_mode_none_keyword() {
assert!(matches!(
parse_relocation_mode("None"),
FileRelocationMode::None
));
}
#[test]
fn parse_relocation_mode_unknown_falls_back_to_none() {
assert!(matches!(
parse_relocation_mode("UnknownVariant"),
FileRelocationMode::None
));
}
#[test]
fn describe_snapshot_empty_is_reported_as_legacy() {
let cache = empty_snapshot_cache();
let (label, status) = describe_snapshot(&cache);
assert_eq!(status, "empty");
assert!(label.contains("legacy"), "label: {label}");
}
#[test]
fn describe_snapshot_valid_when_files_match_on_disk() {
let tmp = TempDir::new().unwrap();
let file = tmp.path().join("video.srt");
std::fs::write(&file, "content").unwrap();
let meta = std::fs::metadata(&file).unwrap();
let mtime = meta
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let mut cache = empty_snapshot_cache();
cache.file_snapshot = vec![SnapshotItem {
path: file.to_string_lossy().into_owned(),
name: "video.srt".into(),
size: meta.len(),
mtime,
file_type: "subtitle".into(),
}];
let (label, status) = describe_snapshot(&cache);
assert_eq!(status, "valid", "label: {label}");
assert_eq!(label, "Valid");
}
#[test]
fn describe_snapshot_stale_when_file_missing() {
let tmp = TempDir::new().unwrap();
let missing = tmp.path().join("gone.srt");
let mut cache = empty_snapshot_cache();
cache.file_snapshot = vec![SnapshotItem {
path: missing.to_string_lossy().into_owned(),
name: "gone.srt".into(),
size: 100,
mtime: 999,
file_type: "subtitle".into(),
}];
let (label, status) = describe_snapshot(&cache);
assert_eq!(status, "stale", "label: {label}");
assert!(label.starts_with("Stale"), "label: {label}");
}
#[test]
fn clear_file_returns_true_and_removes_existing_file() {
let tmp = TempDir::new().unwrap();
let target = tmp.path().join("to_delete.txt");
std::fs::write(&target, "data").unwrap();
assert!(target.exists());
let result = clear_file(&target, "Cache").expect("should succeed");
assert!(result, "should return true when file existed");
assert!(!target.exists(), "file should be removed");
}
#[test]
fn clear_file_returns_false_when_file_absent() {
let tmp = TempDir::new().unwrap();
let missing = tmp.path().join("nonexistent.txt");
assert!(!missing.exists());
let result = clear_file(&missing, "Cache").expect("should succeed");
assert!(!result, "should return false when file was absent");
}
#[test]
fn get_config_dir_uses_xdg_config_home_when_set() {
let tmp = TempDir::new().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", tmp.path());
}
let dir = get_config_dir().expect("should succeed");
assert_eq!(dir, tmp.path());
}
#[test]
fn cache_path_ends_with_expected_components() {
let tmp = TempDir::new().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", tmp.path());
}
let p = cache_path().expect("should succeed");
assert!(p.ends_with("subx/match_cache.json"));
}
#[test]
fn journal_path_ends_with_expected_components() {
let tmp = TempDir::new().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", tmp.path());
}
let p = journal_path().expect("should succeed");
assert!(p.ends_with("subx/match_journal.json"));
}
#[test]
fn verify_destination_integrity_ok_when_metadata_matches() {
let tmp = TempDir::new().unwrap();
let dst = tmp.path().join("dest.srt");
std::fs::write(&dst, "hello").unwrap();
let entry = make_journal_entry(
JournalOperationType::Copied,
tmp.path().join("src.srt"),
dst,
);
verify_destination_integrity(&entry).expect("should pass integrity check");
}
#[test]
fn verify_destination_integrity_errors_when_file_missing() {
let tmp = TempDir::new().unwrap();
let dst = tmp.path().join("missing.srt");
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: tmp.path().join("src.srt"),
destination: dst,
backup_path: None,
status: JournalEntryStatus::Completed,
file_size: 5,
file_mtime: 1_700_000_000,
};
let err = verify_destination_integrity(&entry).expect_err("should fail");
let msg = format!("{err}");
assert!(
msg.contains("no longer exists"),
"error should mention missing file: {msg}"
);
}
#[test]
fn verify_destination_integrity_errors_on_size_mismatch() {
let tmp = TempDir::new().unwrap();
let dst = tmp.path().join("sized.srt");
std::fs::write(&dst, "hello").unwrap();
let meta = std::fs::metadata(&dst).unwrap();
let mtime = meta
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: tmp.path().join("src.srt"),
destination: dst,
backup_path: None,
status: JournalEntryStatus::Completed,
file_size: 999, file_mtime: mtime,
};
let err = verify_destination_integrity(&entry).expect_err("should fail on size mismatch");
let msg = format!("{err}");
assert!(
msg.contains("size differs"),
"error should mention size: {msg}"
);
}
#[test]
fn verify_destination_integrity_errors_on_mtime_mismatch() {
let tmp = TempDir::new().unwrap();
let dst = tmp.path().join("mtimed.srt");
std::fs::write(&dst, "hello").unwrap();
let meta = std::fs::metadata(&dst).unwrap();
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: tmp.path().join("src.srt"),
destination: dst,
backup_path: None,
status: JournalEntryStatus::Completed,
file_size: meta.len(),
file_mtime: 1, };
let err = verify_destination_integrity(&entry).expect_err("should fail on mtime mismatch");
let msg = format!("{err}");
assert!(
msg.contains("mtime differs"),
"error should mention mtime: {msg}"
);
}
#[test]
fn rollback_entry_copied_removes_destination() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("src.srt");
let dst = tmp.path().join("dst.srt");
std::fs::write(&src, "original").unwrap();
std::fs::write(&dst, "copy").unwrap();
let entry = make_journal_entry(JournalOperationType::Copied, src.clone(), dst.clone());
rollback_entry(&entry, false).expect("rollback copy");
assert!(!dst.exists(), "copy destination must be removed");
assert!(src.exists(), "source must remain");
}
#[test]
fn rollback_entry_moved_restores_source() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("original.srt");
let dst = tmp.path().join("moved.srt");
std::fs::write(&dst, "payload").unwrap();
let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
rollback_entry(&entry, false).expect("rollback move");
assert!(src.exists(), "source must be restored");
assert!(!dst.exists(), "destination must be removed");
assert_eq!(std::fs::read_to_string(&src).unwrap(), "payload");
}
#[test]
fn rollback_entry_renamed_restores_source() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("old_name.srt");
let dst = tmp.path().join("new_name.srt");
std::fs::write(&dst, "content").unwrap();
let entry = make_journal_entry(JournalOperationType::Renamed, src.clone(), dst.clone());
rollback_entry(&entry, false).expect("rollback rename");
assert!(src.exists(), "original name must be restored");
assert!(!dst.exists(), "new name must be gone");
}
#[test]
fn rollback_entry_moved_errors_when_source_exists_without_force() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("exists.srt");
let dst = tmp.path().join("dest.srt");
std::fs::write(&src, "already here").unwrap();
std::fs::write(&dst, "moved here").unwrap();
let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
let err = rollback_entry(&entry, false).expect_err("should abort when source exists");
let msg = format!("{err}");
assert!(
msg.contains("already exists"),
"error should mention conflict: {msg}"
);
}
#[test]
fn rollback_entry_moved_with_force_overwrites_existing_source() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("src_force.srt");
let dst = tmp.path().join("dst_force.srt");
std::fs::write(&src, "old").unwrap();
std::fs::write(&dst, "new content").unwrap();
let entry = make_journal_entry(JournalOperationType::Moved, src.clone(), dst.clone());
rollback_entry(&entry, true).expect("force rollback should succeed");
assert!(src.exists(), "source must exist after force rollback");
assert!(!dst.exists(), "destination must be gone");
assert_eq!(std::fs::read_to_string(&src).unwrap(), "new content");
}
#[test]
fn rollback_entry_removes_existing_backup() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("src_bak.srt");
let dst = tmp.path().join("dst_bak.srt");
let backup = tmp.path().join("src_bak.srt.bak");
std::fs::write(&dst, "copy").unwrap();
std::fs::write(&backup, "backup content").unwrap();
let meta = std::fs::metadata(&dst).unwrap();
let mtime = meta
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: src,
destination: dst.clone(),
backup_path: Some(backup.clone()),
status: JournalEntryStatus::Completed,
file_size: meta.len(),
file_mtime: mtime,
};
rollback_entry(&entry, false).expect("rollback with backup");
assert!(!dst.exists(), "copy destination must be removed");
assert!(!backup.exists(), "backup must be deleted");
}
#[test]
fn rollback_entry_tolerates_missing_backup_file() {
let tmp = TempDir::new().unwrap();
let src = tmp.path().join("src_nobak.srt");
let dst = tmp.path().join("dst_nobak.srt");
let backup = tmp.path().join("missing_backup.srt.bak");
std::fs::write(&dst, "copy").unwrap();
let meta = std::fs::metadata(&dst).unwrap();
let mtime = meta
.modified()
.unwrap()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: src,
destination: dst.clone(),
backup_path: Some(backup),
status: JournalEntryStatus::Completed,
file_size: meta.len(),
file_mtime: mtime,
};
rollback_entry(&entry, false).expect("missing backup should not cause error");
assert!(!dst.exists());
}
#[tokio::test]
async fn execute_status_no_cache_json_output_contains_exists_false() {
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
assert!(!cache_file.exists());
let svc = TestConfigService::with_defaults();
let args = crate::cli::StatusArgs { json: true };
execute_status(&args, &svc)
.await
.expect("status must succeed without cache");
}
#[tokio::test]
async fn execute_status_no_cache_plain_output_is_ok() {
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
assert!(!cache_file.exists());
let svc = TestConfigService::with_defaults();
let args = crate::cli::StatusArgs { json: false };
execute_status(&args, &svc)
.await
.expect("status must succeed without cache (plain)");
}
#[tokio::test]
async fn execute_status_valid_cache_plain_succeeds() {
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
let svc = TestConfigService::with_defaults();
let config = svc.get_config().unwrap();
let hash = compute_config_hash("None", config.general.backup_enabled);
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let cache = serde_json::json!({
"cache_version": "1.0",
"directory": "/some/dir",
"file_snapshot": [],
"match_operations": [
{
"video_file": "/some/video.mkv",
"subtitle_file": "/some/sub.srt",
"new_subtitle_name": "video.srt",
"confidence": 0.95,
"reasoning": []
}
],
"created_at": now,
"ai_model_used": "gpt-4",
"config_hash": hash,
"original_relocation_mode": "None",
"original_backup_enabled": false,
});
std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
let args = crate::cli::StatusArgs { json: false };
execute_status(&args, &svc)
.await
.expect("status with matching hash must succeed");
}
#[tokio::test]
async fn execute_status_valid_cache_json_mode_succeeds() {
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
let journal_file = subx_dir.join("match_journal.json");
let svc = TestConfigService::with_defaults();
let config = svc.get_config().unwrap();
let hash = compute_config_hash("None", config.general.backup_enabled);
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let cache = serde_json::json!({
"cache_version": "1.0",
"directory": "/some/dir",
"file_snapshot": [],
"match_operations": [],
"created_at": now,
"ai_model_used": "gpt-4",
"config_hash": hash,
"original_relocation_mode": "None",
"original_backup_enabled": false,
});
std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
std::fs::write(&journal_file, "{}").unwrap();
let args = crate::cli::StatusArgs { json: true };
execute_status(&args, &svc)
.await
.expect("JSON status must succeed with matching hash");
}
#[tokio::test]
async fn execute_status_mismatched_hash_shows_in_plain_output() {
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let cache = serde_json::json!({
"cache_version": "1.0",
"directory": "/some/dir",
"file_snapshot": [],
"match_operations": [],
"created_at": now,
"ai_model_used": "gpt-4",
"config_hash": "00000000deadbeef",
"original_relocation_mode": "None",
"original_backup_enabled": false,
});
std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
let svc = TestConfigService::with_defaults();
let args = crate::cli::StatusArgs { json: false };
execute_status(&args, &svc)
.await
.expect("status succeeds even with mismatched config hash");
}
#[tokio::test]
async fn execute_rollback_journal_with_only_pending_entries_is_noop() {
use crate::core::matcher::journal::JournalData;
let (_tmp, subx_dir) = isolated_config_dir();
let journal_file = subx_dir.join("match_journal.json");
let tmp2 = TempDir::new().unwrap();
let dst = tmp2.path().join("file.srt");
std::fs::write(&dst, "data").unwrap();
let pending_entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: tmp2.path().join("src.srt"),
destination: dst.clone(),
backup_path: None,
status: JournalEntryStatus::Pending,
file_size: 4,
file_mtime: 0,
};
let journal = JournalData {
batch_id: "pending-only".into(),
created_at: 0,
entries: vec![pending_entry],
};
journal.save(&journal_file).await.expect("save journal");
let args = RollbackArgs { force: false };
execute_rollback(&args)
.await
.expect("should succeed with only pending entries");
assert!(
journal_file.exists(),
"journal kept when nothing was rolled back"
);
assert!(dst.exists(), "pending entry destination must be untouched");
}
#[tokio::test]
async fn execute_rollback_force_skips_integrity_check() {
use crate::core::matcher::journal::JournalData;
let (_tmp, subx_dir) = isolated_config_dir();
let journal_file = subx_dir.join("match_journal.json");
let tmp2 = TempDir::new().unwrap();
let src = tmp2.path().join("orig.srt");
let dst = tmp2.path().join("copy.srt");
std::fs::write(&dst, "data").unwrap();
let entry = JournalEntry {
operation_type: JournalOperationType::Copied,
source: src.clone(),
destination: dst.clone(),
backup_path: None,
status: JournalEntryStatus::Completed,
file_size: 9999, file_mtime: 9999, };
let journal = JournalData {
batch_id: "force-batch".into(),
created_at: 0,
entries: vec![entry],
};
journal.save(&journal_file).await.expect("save journal");
let args = RollbackArgs { force: true };
execute_rollback(&args)
.await
.expect("force rollback should succeed despite integrity mismatch");
assert!(!dst.exists(), "copy destination must be removed");
assert!(!journal_file.exists(), "journal must be deleted");
}
#[tokio::test]
async fn execute_with_config_clear_journal_type_works() {
use std::sync::Arc;
let (_tmp, subx_dir) = isolated_config_dir();
let journal_file = subx_dir.join("match_journal.json");
let cache_file = subx_dir.join("match_cache.json");
std::fs::write(&journal_file, "{}").unwrap();
std::fs::write(&cache_file, "{}").unwrap();
let svc = Arc::new(TestConfigService::with_defaults());
let args = CacheArgs {
action: crate::cli::CacheAction::Clear(crate::cli::ClearArgs {
r#type: crate::cli::ClearType::Journal,
}),
};
execute_with_config(args, svc)
.await
.expect("clear journal via execute_with_config");
assert!(!journal_file.exists(), "journal should be removed");
assert!(cache_file.exists(), "cache should remain");
}
#[tokio::test]
async fn execute_apply_confidence_filter_removes_low_confidence_ops() {
use crate::cli::ApplyArgs;
let (_tmp, subx_dir) = isolated_config_dir();
let cache_file = subx_dir.join("match_cache.json");
let svc = TestConfigService::with_defaults();
let config = svc.get_config().unwrap();
let hash = compute_config_hash("None", config.general.backup_enabled);
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let cache = serde_json::json!({
"cache_version": "1.0",
"directory": "/dir",
"file_snapshot": [],
"match_operations": [
{
"video_file": "/dir/v1.mkv",
"subtitle_file": "/dir/s1.srt",
"new_subtitle_name": "v1.srt",
"confidence": 0.5,
"reasoning": []
}
],
"created_at": now,
"ai_model_used": "gpt-4",
"config_hash": hash,
"original_relocation_mode": "None",
"original_backup_enabled": false,
});
std::fs::write(&cache_file, serde_json::to_string(&cache).unwrap()).unwrap();
let result = execute_apply(
&ApplyArgs {
yes: true,
force: true,
confidence: Some(80),
},
&svc,
)
.await;
assert!(
result.is_ok(),
"confidence filter to empty ops should be Ok: {result:?}"
);
}
}