use crate::Result;
use crate::cli::{ApplyArgs, CacheArgs, ClearArgs, ClearType, RollbackArgs, StatusArgs};
use crate::config::ConfigService;
use crate::core::lock::acquire_subx_lock;
use crate::core::matcher::cache::CacheData;
use crate::core::matcher::engine::{FileRelocationMode, MatchConfig, apply_cached_operations};
use crate::core::matcher::journal::{
JournalData, JournalEntry, JournalEntryStatus, JournalOperationType,
};
use crate::error::SubXError;
use serde_json::json;
use std::io::IsTerminal;
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
fn get_config_dir() -> Result<PathBuf> {
if let Some(xdg_config) = std::env::var_os("XDG_CONFIG_HOME") {
Ok(PathBuf::from(xdg_config))
} else {
dirs::config_dir().ok_or_else(|| SubXError::config("Unable to determine config directory"))
}
}
fn cache_path() -> Result<PathBuf> {
Ok(get_config_dir()?.join("subx").join("match_cache.json"))
}
fn journal_path() -> Result<PathBuf> {
Ok(get_config_dir()?.join("subx").join("match_journal.json"))
}
fn clear_file(path: &Path, label: &str) -> Result<bool> {
if path.exists() {
std::fs::remove_file(path)?;
println!("{} cleared: {}", label, path.display());
Ok(true)
} else {
println!("{} not found: {}", label, path.display());
Ok(false)
}
}
async fn execute_clear(args: &ClearArgs) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let config_dir = get_config_dir()?;
let cache_file = config_dir.join("subx").join("match_cache.json");
let journal_file = config_dir.join("subx").join("match_journal.json");
let mut cleared_any = false;
match args.r#type {
ClearType::Cache => {
cleared_any |= clear_file(&cache_file, "Cache")?;
}
ClearType::Journal => {
cleared_any |= clear_file(&journal_file, "Journal")?;
}
ClearType::All => {
cleared_any |= clear_file(&cache_file, "Cache")?;
cleared_any |= clear_file(&journal_file, "Journal")?;
}
}
if !cleared_any {
println!("No cache files found to clear.");
}
Ok(())
}
fn compute_config_hash(relocation_mode_debug: &str, backup_enabled: bool) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
relocation_mode_debug.hash(&mut hasher);
backup_enabled.hash(&mut hasher);
format!("{:016x}", hasher.finish())
}
fn current_config_hash(config_service: &dyn ConfigService) -> Result<String> {
let config = config_service.get_config()?;
Ok(compute_config_hash("None", config.general.backup_enabled))
}
fn format_size(bytes: u64) -> String {
const KB: f64 = 1024.0;
const MB: f64 = KB * 1024.0;
const GB: f64 = MB * 1024.0;
let b = bytes as f64;
if b >= GB {
format!("{:.1} GB", b / GB)
} else if b >= MB {
format!("{:.1} MB", b / MB)
} else if b >= KB {
format!("{:.1} KB", b / KB)
} else {
format!("{} B", bytes)
}
}
fn format_age(age_secs: u64) -> String {
const MIN: u64 = 60;
const HOUR: u64 = 60 * MIN;
const DAY: u64 = 24 * HOUR;
if age_secs < MIN {
format!("{} seconds ago", age_secs)
} else if age_secs < HOUR {
format!("{} minutes ago", age_secs / MIN)
} else if age_secs < DAY {
format!("{} hours ago", age_secs / HOUR)
} else {
format!("{} days ago", age_secs / DAY)
}
}
fn describe_snapshot(cache: &CacheData) -> (String, &'static str) {
if cache.has_empty_snapshot() {
("Empty (legacy cache)".to_string(), "empty")
} else {
let stale = cache.validate_snapshot();
if stale.is_empty() {
("Valid".to_string(), "valid")
} else {
(format!("Stale ({} files changed)", stale.len()), "stale")
}
}
}
pub async fn execute_status(args: &StatusArgs, config_service: &dyn ConfigService) -> Result<()> {
let cache_file = cache_path()?;
let journal_file = journal_path()?;
if !cache_file.exists() {
if args.json {
let payload = json!({
"path": cache_file.to_string_lossy(),
"exists": false,
"journal_present": journal_file.exists(),
});
println!("{}", serde_json::to_string_pretty(&payload)?);
} else {
println!("No cache found at {}", cache_file.display());
}
return Ok(());
}
let cache = CacheData::load(&cache_file).map_err(|e| {
SubXError::config(format!(
"Failed to load cache at {}: {}",
cache_file.display(),
e
))
})?;
let metadata = std::fs::metadata(&cache_file)?;
let size_bytes = metadata.len();
let now_secs = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let age_secs = now_secs.saturating_sub(cache.created_at);
let current_hash = current_config_hash(config_service)?;
let hash_match = current_hash == cache.config_hash;
let (snapshot_label, snapshot_status) = describe_snapshot(&cache);
let stale_entries = if snapshot_status == "stale" {
cache.validate_snapshot()
} else {
Vec::new()
};
let journal_present = journal_file.exists();
if args.json {
let stale_files: Vec<serde_json::Value> = stale_entries
.iter()
.map(|s| json!({ "path": s.path, "reason": s.reason }))
.collect();
let payload = json!({
"path": cache_file.to_string_lossy(),
"exists": true,
"size_bytes": size_bytes,
"created_at": cache.created_at,
"age_seconds": age_secs,
"cache_version": cache.cache_version,
"ai_model": cache.ai_model_used,
"operation_count": cache.match_operations.len(),
"config_hash": cache.config_hash,
"config_hash_match": hash_match,
"current_config_hash": current_hash,
"snapshot_status": snapshot_status,
"stale_files": stale_files,
"journal_present": journal_present,
});
println!("{}", serde_json::to_string_pretty(&payload)?);
} else {
let config_line = if hash_match {
"✓ (matches current)".to_string()
} else {
format!("✗ (differs from current: {})", current_hash)
};
let journal_line = if journal_present {
"Present"
} else {
"Not found"
};
println!("Cache Status");
println!("============");
println!("Path: {}", cache_file.display());
println!("Size: {}", format_size(size_bytes));
println!("Age: {}", format_age(age_secs));
println!("Cache version: {}", cache.cache_version);
println!("AI model: {}", cache.ai_model_used);
println!("Operations: {}", cache.match_operations.len());
println!("Config hash: {}", cache.config_hash);
println!("Config match: {}", config_line);
println!("Snapshot: {}", snapshot_label);
println!("Journal: {}", journal_line);
}
Ok(())
}
pub async fn execute_apply(args: &ApplyArgs, config_service: &dyn ConfigService) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let cache_file = cache_path()?;
if !cache_file.exists() {
println!(
"No cache found at {}. Run a dry-run match first.",
cache_file.display()
);
return Ok(());
}
let mut cache = CacheData::load(&cache_file).map_err(|e| {
SubXError::config(format!(
"Failed to load cache at {}: {}",
cache_file.display(),
e
))
})?;
let config = config_service.get_config()?;
let apply_hash = compute_config_hash(
&cache.original_relocation_mode,
config.general.backup_enabled,
);
if apply_hash != cache.config_hash && !args.force {
return Err(SubXError::config(format!(
"Configuration has changed since the cache was created.\n\
Cache hash: {}\n\
Current hash: {}\n\
Use --force to bypass this check.",
cache.config_hash, apply_hash
)));
}
if cache.has_empty_snapshot() && !args.force {
return Err(SubXError::config(
"Cache was created without file snapshot data (legacy format).\n\
Cannot verify file integrity. Use --force to apply anyway."
.to_string(),
));
}
if !args.force && !cache.has_empty_snapshot() {
let stale = cache.validate_snapshot();
if !stale.is_empty() {
let mut msg = format!(
"{} source file(s) have changed since the cache was created:\n",
stale.len()
);
for s in &stale {
msg.push_str(&format!(" - {} ({})\n", s.path, s.reason));
}
msg.push_str("Use --force to apply anyway.");
return Err(SubXError::config(msg));
}
}
if !args.force {
let conflicts = cache.validate_target_paths();
if !conflicts.is_empty() {
let mut msg = format!("{} target path(s) already exist:\n", conflicts.len());
for p in &conflicts {
msg.push_str(&format!(" - {}\n", p.display()));
}
msg.push_str("Use --force to apply anyway.");
return Err(SubXError::config(msg));
}
}
if let Some(min_conf) = args.confidence {
let threshold = f32::from(min_conf) / 100.0;
let before = cache.match_operations.len();
cache
.match_operations
.retain(|op| op.confidence >= threshold);
let after = cache.match_operations.len();
if before != after {
println!(
"Filtered {} operation(s) below {}% confidence.",
before - after,
min_conf
);
}
}
if cache.match_operations.is_empty() {
println!("No operations to apply.");
return Ok(());
}
println!("Cache Apply Summary");
println!("===================");
println!("Operations: {}", cache.match_operations.len());
println!("AI model: {}", cache.ai_model_used);
println!("Relocation mode: {}", cache.original_relocation_mode);
println!();
for (i, op) in cache.match_operations.iter().enumerate() {
println!(
" {}. {} → {} (confidence: {:.0}%)",
i + 1,
op.subtitle_file,
op.new_subtitle_name,
op.confidence * 100.0
);
}
println!();
if !args.yes {
if !std::io::stdin().is_terminal() {
return Err(SubXError::config(
"Non-interactive terminal detected. Use --yes to skip confirmation.".to_string(),
));
}
print!("Proceed with apply? [y/N] ");
use std::io::Write;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
if !input.trim().eq_ignore_ascii_case("y") {
println!("Apply cancelled.");
return Ok(());
}
}
let config = config_service.get_config()?;
let relocation_mode = parse_relocation_mode(&cache.original_relocation_mode);
let match_config = MatchConfig {
confidence_threshold: 0.0,
max_sample_length: 2000,
enable_content_analysis: true,
backup_enabled: cache.original_backup_enabled,
relocation_mode,
conflict_resolution: crate::core::matcher::engine::ConflictResolution::Skip,
ai_model: cache.ai_model_used.clone(),
max_subtitle_bytes: config.general.max_subtitle_bytes,
};
apply_cached_operations(&cache, &match_config).await?;
println!("Apply complete.");
Ok(())
}
fn parse_relocation_mode(s: &str) -> FileRelocationMode {
match s {
"Copy" => FileRelocationMode::Copy,
"Move" => FileRelocationMode::Move,
_ => FileRelocationMode::None,
}
}
fn verify_destination_integrity(entry: &JournalEntry) -> Result<()> {
let metadata = match std::fs::metadata(&entry.destination) {
Ok(m) => m,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
return Err(SubXError::config(format!(
"Destination file {} no longer exists. Use --force to override.",
entry.destination.display()
)));
}
Err(e) => return Err(SubXError::Io(e)),
};
if metadata.len() != entry.file_size {
return Err(SubXError::config(format!(
"Destination file {} has been modified since the operation (size differs). \
Use --force to override.",
entry.destination.display()
)));
}
let mtime_secs = metadata
.modified()
.ok()
.and_then(|m| m.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs());
if let Some(actual) = mtime_secs {
if actual != entry.file_mtime {
return Err(SubXError::config(format!(
"Destination file {} has been modified since the operation (mtime differs). \
Use --force to override.",
entry.destination.display()
)));
}
}
Ok(())
}
fn rollback_entry(entry: &JournalEntry, force: bool) -> Result<()> {
match entry.operation_type {
JournalOperationType::Copied => {
std::fs::remove_file(&entry.destination)?;
println!("Removed copy: {}", entry.destination.display());
}
JournalOperationType::Moved | JournalOperationType::Renamed => {
if entry.source.exists() && !force {
return Err(SubXError::config(format!(
"Original source path {} already exists. \
Rollback would overwrite it. Use --force to override.",
entry.source.display()
)));
}
if let Some(parent) = entry.source.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent)?;
}
}
std::fs::rename(&entry.destination, &entry.source)?;
println!(
"Rolled back: {} \u{2190} {}",
entry.source.display(),
entry.destination.display()
);
}
}
if let Some(backup) = &entry.backup_path {
if backup.exists() {
std::fs::remove_file(backup)?;
println!("Removed backup: {}", backup.display());
}
}
Ok(())
}
pub async fn execute_rollback(args: &RollbackArgs) -> Result<()> {
let _lock = acquire_subx_lock().await?;
let journal_file = journal_path()?;
if !journal_file.exists() {
println!("No operation journal found. Nothing to rollback.");
return Ok(());
}
let journal = JournalData::load(&journal_file).await?;
let reversed: Vec<&JournalEntry> = journal
.entries
.iter()
.filter(|e| e.status == JournalEntryStatus::Completed)
.rev()
.collect();
if reversed.is_empty() {
println!("Journal has no completed operations to rollback.");
return Ok(());
}
println!(
"Rolling back {} operations from batch {}...",
reversed.len(),
journal.batch_id
);
for entry in &reversed {
if !args.force {
verify_destination_integrity(entry)?;
}
rollback_entry(entry, args.force)?;
}
std::fs::remove_file(&journal_file)?;
println!("Rollback complete. Journal deleted.");
Ok(())
}
pub async fn execute(args: CacheArgs) -> Result<()> {
match args.action {
crate::cli::CacheAction::Clear(clear_args) => {
execute_clear(&clear_args).await?;
}
crate::cli::CacheAction::Status(status_args) => {
let config_service = crate::config::ProductionConfigService::new()?;
execute_status(&status_args, &config_service).await?;
}
crate::cli::CacheAction::Apply(ref apply_args) => {
let config_service = crate::config::ProductionConfigService::new()?;
execute_apply(apply_args, &config_service).await?;
}
crate::cli::CacheAction::Rollback(rollback_args) => {
execute_rollback(&rollback_args).await?;
}
}
Ok(())
}
pub async fn execute_with_config(
args: CacheArgs,
config_service: std::sync::Arc<dyn ConfigService>,
) -> Result<()> {
match args.action {
crate::cli::CacheAction::Status(status_args) => {
execute_status(&status_args, config_service.as_ref()).await
}
crate::cli::CacheAction::Apply(apply_args) => {
execute_apply(&apply_args, config_service.as_ref()).await
}
other => execute(CacheArgs { action: other }).await,
}
}