use std::collections::BTreeMap;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use chrono::{DateTime, Utc};
use serde::Serialize;
use sha2::{Digest, Sha256};
use zip::write::SimpleFileOptions;
use zip::CompressionMethod;
use crate::cli::output::{OutputConfig, OutputFormat};
use crate::cli::DoctorArgs;
use crate::config;
use crate::error::OlError;
use crate::privacy::{HitCounter, PrivacyFilter};
use crate::telemetry::{self, Event};
const TOKEN_REDACTED_MARKER: &str = "[REDACTED:TOKEN]";
const ARCHIVE_SOFT_CAP_BYTES: u64 = 50 * 1024 * 1024;
const EVENT_LOG_FILE_CAP_BYTES: usize = 5 * 1024 * 1024;
const DAEMON_LOG_TAIL_CAP_BYTES: usize = 1024 * 1024;
pub fn run(
args: &DoctorArgs,
output: &OutputConfig,
fix_applied_after: bool,
) -> Result<(), OlError> {
let started_at = Utc::now();
let started_instant = Instant::now();
let ol_dir = config::openlatch_dir();
crate::cli::header::print(output, &["doctor", "--rescue"]);
if output.format == OutputFormat::Human && !output.quiet {
eprintln!();
}
let since = parse_since(args.since.as_deref())?;
let logs_since = started_at - since;
let port = config::Config::load(None, None, false)
.map(|c| c.port)
.unwrap_or(config::PORT_RANGE_START);
let daemon_health = probe_endpoint(port, "/health");
let daemon_metrics = probe_endpoint(port, "/metrics");
let daemon_reachable = daemon_health.body.is_some() || daemon_metrics.body.is_some();
let plan = build_collection_plan(&ol_dir, logs_since);
if !args.yes && !confirm_inventory(&plan, output)? {
if output.format == OutputFormat::Json {
output.print_json(&serde_json::json!({
"command": "doctor_rescue",
"status": "cancelled",
"message": "user declined inventory prompt",
}));
} else if !output.quiet {
eprintln!("Cancelled.");
}
return Ok(());
}
let agid = read_agent_id_short(&ol_dir);
let ts_label = started_at.format("%Y%m%dT%H%M%SZ").to_string();
let archive_name = format!("openlatch-rescue-{ts_label}-{agid}.zip");
let archive_path = match args.output.as_ref() {
Some(p) => p.clone(),
None => std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from("."))
.join(&archive_name),
};
let archive_file = std::fs::File::create(&archive_path).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_WRITE_FAILED,
format!(
"cannot create rescue archive '{}': {e}",
archive_path.display()
),
)
})?;
let mut zw = zip::ZipWriter::new(archive_file);
let zip_opts = SimpleFileOptions::default().compression_method(CompressionMethod::Deflated);
let cfg = config::Config::load(None, None, false).ok();
let extra = cfg.map(|c| c.extra_patterns).unwrap_or_default();
let filter = PrivacyFilter::new(&extra);
let mut total_hits = HitCounter::new();
let mut file_records: Vec<FileRecord> = Vec::new();
let mut had_partial_failure = false;
for entry in &plan.entries {
match collect_file(entry, &filter, &mut total_hits, &mut zw, zip_opts) {
Ok(record) => file_records.push(record),
Err(e) => {
had_partial_failure = true;
tracing::warn!(
error = %e.message,
code = e.code,
path = %entry.live_path.display(),
"doctor --rescue: collector failed for entry"
);
file_records.push(FileRecord {
path: entry.archive_path.clone(),
size_bytes: 0,
sha256: String::new(),
redactors_applied: Vec::new(),
redaction_failed: true,
note: Some(format!("collector errored: {}", e.code)),
truncated: false,
});
}
}
}
if let Err(e) = write_zip_entry(
&mut zw,
zip_opts,
"system/daemon-health.json",
&daemon_health.to_json_bytes(),
) {
had_partial_failure = true;
tracing::warn!(error = %e.message, "doctor --rescue: cannot write daemon-health.json");
} else {
file_records.push(FileRecord {
path: "system/daemon-health.json".to_string(),
size_bytes: daemon_health.to_json_bytes().len() as u64,
sha256: sha256_hex(&daemon_health.to_json_bytes()),
redactors_applied: Vec::new(),
redaction_failed: false,
note: Some(format!("status={}", daemon_health.status_label())),
truncated: false,
});
}
if let Err(e) = write_zip_entry(
&mut zw,
zip_opts,
"system/daemon-metrics.json",
&daemon_metrics.to_json_bytes(),
) {
had_partial_failure = true;
tracing::warn!(error = %e.message, "doctor --rescue: cannot write daemon-metrics.json");
} else {
file_records.push(FileRecord {
path: "system/daemon-metrics.json".to_string(),
size_bytes: daemon_metrics.to_json_bytes().len() as u64,
sha256: sha256_hex(&daemon_metrics.to_json_bytes()),
redactors_applied: Vec::new(),
redaction_failed: false,
note: Some(format!("status={}", daemon_metrics.status_label())),
truncated: false,
});
}
let os_info = serde_json::json!({
"os": std::env::consts::OS,
"arch": std::env::consts::ARCH,
"family": std::env::consts::FAMILY,
});
let os_bytes = serde_json::to_vec_pretty(&os_info).unwrap_or_default();
let _ = write_zip_entry(&mut zw, zip_opts, "system/os-info.json", &os_bytes);
file_records.push(FileRecord {
path: "system/os-info.json".to_string(),
size_bytes: os_bytes.len() as u64,
sha256: sha256_hex(&os_bytes),
redactors_applied: Vec::new(),
redaction_failed: false,
note: None,
truncated: false,
});
let bin_info = collect_binary_info();
let bin_bytes = serde_json::to_vec_pretty(&bin_info).unwrap_or_default();
let _ = write_zip_entry(&mut zw, zip_opts, "system/binary-info.json", &bin_bytes);
file_records.push(FileRecord {
path: "system/binary-info.json".to_string(),
size_bytes: bin_bytes.len() as u64,
sha256: sha256_hex(&bin_bytes),
redactors_applied: Vec::new(),
redaction_failed: false,
note: None,
truncated: false,
});
let hits_value = serde_json::json!({
"total_hits": total_hits.total(),
"by_pattern": total_hits.as_map(),
});
let hits_bytes = serde_json::to_vec_pretty(&hits_value).unwrap_or_default();
let _ = write_zip_entry(&mut zw, zip_opts, "privacy/redactor-hits.json", &hits_bytes);
let claude_present = match crate::hooks::detect_agent() {
Ok(crate::hooks::DetectedAgent::ClaudeCode { settings_path, .. }) => settings_path.exists(),
_ => false,
};
let agents_checked = serde_json::json!({
"claude-code": {
"found": claude_present,
"config_path": match crate::hooks::detect_agent() {
Ok(crate::hooks::DetectedAgent::ClaudeCode { settings_path, .. }) =>
redact_home(&settings_path.display().to_string()),
_ => String::new(),
},
"openlatch_hooks_present": claude_present,
},
"cursor": {"found": false, "reason": "not_supported_in_v1"},
"windsurf": {"found": false, "reason": "not_supported_in_v1"},
"github-copilot": {"found": false, "reason": "not_supported_in_v1"},
"codex-cli": {"found": false, "reason": "not_supported_in_v1"},
"gemini-cli": {"found": false, "reason": "not_supported_in_v1"},
"cline": {"found": false, "reason": "not_supported_in_v1"},
"openclaw": {"found": false, "reason": "not_supported_in_v1"},
});
let completed_at = Utc::now();
let duration_ms = started_instant.elapsed().as_millis() as u64;
let manifest = serde_json::json!({
"manifest_version": "1",
"generated_at": completed_at,
"tool": {
"name": "openlatch-client",
"version": env!("CARGO_PKG_VERSION"),
"build_profile": if cfg!(debug_assertions) { "debug" } else { "release" },
},
"host": {
"os": std::env::consts::OS,
"arch": std::env::consts::ARCH,
},
"collection": {
"started_at": started_at,
"completed_at": completed_at,
"duration_ms": duration_ms,
"window": { "logs_since": logs_since },
"daemon_reachable": daemon_reachable,
"fix_applied_after": fix_applied_after,
"partial_failure": had_partial_failure,
},
"files": file_records,
"agents_checked": agents_checked,
"redactors": {
"total_hits": total_hits.total(),
"by_pattern": total_hits.as_map(),
},
});
let manifest_bytes = serde_json::to_vec_pretty(&manifest).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_WRITE_FAILED,
format!("cannot serialize MANIFEST.json: {e}"),
)
})?;
write_zip_entry(&mut zw, zip_opts, "MANIFEST.json", &manifest_bytes)?;
let archive_handle = zw.finish().map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_WRITE_FAILED,
format!("cannot finalize archive: {e}"),
)
})?;
drop(archive_handle);
let archive_size = std::fs::metadata(&archive_path)
.map(|m| m.len())
.unwrap_or(0);
print_rescue_results(
&archive_path,
archive_size,
file_records.len(),
daemon_reachable,
claude_present,
total_hits.total(),
duration_ms,
fix_applied_after,
had_partial_failure,
output,
);
if had_partial_failure {
tracing::warn!(
code = crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
"doctor --rescue: archive produced with one or more collector failures"
);
}
let agents_found_for_telemetry: Vec<&str> = if claude_present {
vec!["claude-code"]
} else {
vec![]
};
telemetry::capture_global(Event::doctor_rescue_run(
archive_size,
file_records.len(),
daemon_reachable,
agents_found_for_telemetry,
total_hits.total(),
duration_ms,
fix_applied_after,
));
Ok(())
}
#[derive(Debug, Clone)]
struct CollectionPlan {
entries: Vec<CollectionEntry>,
}
#[derive(Debug, Clone)]
struct CollectionEntry {
live_path: PathBuf,
archive_path: String,
strategy: RedactionStrategy,
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum RedactionStrategy {
PrivacyFilter,
PrivacyFilterTruncating { cap_bytes: usize, keep_tail: bool },
HardRedactToken,
PrivacyFilterAndPathRedact,
Passthrough,
}
fn build_collection_plan(ol_dir: &Path, _logs_since: DateTime<Utc>) -> CollectionPlan {
let mut entries = Vec::new();
let candidates = [
(
ol_dir.join("config.toml"),
"config/config.toml",
RedactionStrategy::PrivacyFilter,
),
(
ol_dir.join("telemetry.json"),
"config/telemetry.json",
RedactionStrategy::PrivacyFilter,
),
(
ol_dir.join("daemon.token"),
"config/daemon.token",
RedactionStrategy::HardRedactToken,
),
(
ol_dir.join("daemon.port"),
"config/daemon.port",
RedactionStrategy::Passthrough,
),
(
ol_dir.join(crate::cli::commands::doctor_fix::JOURNAL_FILENAME),
"config/fix-journal.json",
RedactionStrategy::PrivacyFilterAndPathRedact,
),
];
for (live, archive, strategy) in candidates {
if live.exists() && !is_excluded_extension(&live) {
entries.push(CollectionEntry {
live_path: live,
archive_path: archive.to_string(),
strategy,
});
}
}
let logs_dir = ol_dir.join("logs");
if logs_dir.exists() {
if let Ok(read) = std::fs::read_dir(&logs_dir) {
for ent in read.flatten() {
let p = ent.path();
if !p.is_file() || is_excluded_extension(&p) {
continue;
}
let name = match p.file_name().and_then(|n| n.to_str()) {
Some(s) => s.to_string(),
None => continue,
};
let archive_path = format!("logs/{name}");
let strategy = if name == "daemon.log" {
RedactionStrategy::PrivacyFilterTruncating {
cap_bytes: DAEMON_LOG_TAIL_CAP_BYTES,
keep_tail: true,
}
} else if name.starts_with("events-") && name.ends_with(".jsonl") {
RedactionStrategy::PrivacyFilterTruncating {
cap_bytes: EVENT_LOG_FILE_CAP_BYTES,
keep_tail: true,
}
} else if name == "fallback.jsonl" {
RedactionStrategy::PrivacyFilter
} else {
continue;
};
entries.push(CollectionEntry {
live_path: p,
archive_path,
strategy,
});
}
}
}
if let Ok(crate::hooks::DetectedAgent::ClaudeCode { settings_path, .. }) =
crate::hooks::detect_agent()
{
if settings_path.exists() && !is_excluded_extension(&settings_path) {
entries.push(CollectionEntry {
live_path: settings_path,
archive_path: "agents/claude-code/settings.json".to_string(),
strategy: RedactionStrategy::PrivacyFilter,
});
}
}
CollectionPlan { entries }
}
#[derive(Debug, Clone, Serialize)]
struct FileRecord {
path: String,
size_bytes: u64,
sha256: String,
redactors_applied: Vec<&'static str>,
redaction_failed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
note: Option<String>,
truncated: bool,
}
fn collect_file(
entry: &CollectionEntry,
filter: &PrivacyFilter,
total_hits: &mut HitCounter,
zw: &mut zip::ZipWriter<std::fs::File>,
zip_opts: SimpleFileOptions,
) -> Result<FileRecord, OlError> {
let raw = std::fs::read_to_string(&entry.live_path).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!(
"cannot read '{}' for rescue bundle: {e}",
entry.live_path.display()
),
)
})?;
let mut local_hits = HitCounter::new();
let mut truncated = false;
let body = match &entry.strategy {
RedactionStrategy::HardRedactToken => TOKEN_REDACTED_MARKER.to_string(),
RedactionStrategy::Passthrough => raw,
RedactionStrategy::PrivacyFilter => filter_text(&raw, filter, &mut local_hits),
RedactionStrategy::PrivacyFilterAndPathRedact => {
let filtered = filter_text(&raw, filter, &mut local_hits);
redact_home(&filtered)
}
RedactionStrategy::PrivacyFilterTruncating {
cap_bytes,
keep_tail,
} => {
let mut text = raw;
if text.len() > *cap_bytes {
truncated = true;
if *keep_tail {
let start = text.len() - cap_bytes;
let mut start = start;
while start < text.len() && !text.is_char_boundary(start) {
start += 1;
}
text = text[start..].to_string();
} else {
let mut end = *cap_bytes;
while end > 0 && !text.is_char_boundary(end) {
end -= 1;
}
text.truncate(end);
}
}
filter_text(&text, filter, &mut local_hits)
}
};
let bytes = body.as_bytes();
write_zip_entry(zw, zip_opts, &entry.archive_path, bytes)?;
let mut redactors_applied: Vec<&'static str> = local_hits
.iter()
.filter(|(_, n)| *n > 0)
.map(|(label, _)| label)
.collect();
redactors_applied.sort();
if matches!(entry.strategy, RedactionStrategy::HardRedactToken) {
redactors_applied.push("HARD_TOKEN");
}
for (label, n) in local_hits.iter() {
total_hits.add(label, n);
}
Ok(FileRecord {
path: entry.archive_path.clone(),
size_bytes: bytes.len() as u64,
sha256: sha256_hex(bytes),
redactors_applied,
redaction_failed: false,
note: None,
truncated,
})
}
fn filter_text(raw: &str, filter: &PrivacyFilter, hits: &mut HitCounter) -> String {
let trailing_newline = raw.ends_with('\n');
let mut out: Vec<String> = Vec::new();
for line in raw.lines() {
let mut value = serde_json::Value::String(line.to_string());
crate::privacy::filter_event_with_hits(&mut value, filter, hits);
out.push(value.as_str().map(|s| s.to_string()).unwrap_or_default());
}
let mut joined = out.join("\n");
if trailing_newline {
joined.push('\n');
}
joined
}
fn write_zip_entry(
zw: &mut zip::ZipWriter<std::fs::File>,
opts: SimpleFileOptions,
name: &str,
bytes: &[u8],
) -> Result<(), OlError> {
zw.start_file(name, opts).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_WRITE_FAILED,
format!("cannot start zip entry '{name}': {e}"),
)
})?;
zw.write_all(bytes).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_WRITE_FAILED,
format!("cannot write zip entry '{name}': {e}"),
)
})?;
Ok(())
}
fn is_excluded_extension(p: &Path) -> bool {
matches!(
p.extension().and_then(|s| s.to_str()),
Some("bak") | Some("tmp")
)
}
fn redact_home(text: &str) -> String {
if let Some(home) = dirs::home_dir() {
let home_str = home.display().to_string();
if !home_str.is_empty() {
let alt = home_str.replace('\\', "/");
let replaced = text.replace(&home_str, "~");
if replaced != text {
return replaced.replace(&alt, "~");
}
return text.replace(&alt, "~");
}
}
text.to_string()
}
fn sha256_hex(bytes: &[u8]) -> String {
let mut h = Sha256::new();
h.update(bytes);
let out = h.finalize();
out.iter().map(|b| format!("{b:02x}")).collect()
}
fn read_agent_id_short(ol_dir: &Path) -> String {
let cfg = match config::Config::load(None, None, false) {
Ok(c) => c,
Err(_) => {
let raw = std::fs::read_to_string(ol_dir.join("config.toml")).unwrap_or_default();
for line in raw.lines() {
if let Some(rest) = line.trim().strip_prefix("agent_id") {
if let Some(eq) = rest.find('=') {
let val = rest[eq + 1..].trim().trim_matches('"');
return short_agid(val);
}
}
}
return "unknown".to_string();
}
};
cfg.agent_id
.as_deref()
.map(short_agid)
.unwrap_or_else(|| "unknown".to_string())
}
fn short_agid(full: &str) -> String {
full.strip_prefix("agt_")
.map(|hex| hex.chars().take(4).collect::<String>())
.unwrap_or_else(|| full.chars().take(4).collect::<String>())
}
#[derive(Debug, Clone, Serialize)]
struct ProbeResult {
endpoint: String,
body: Option<serde_json::Value>,
error: Option<String>,
}
impl ProbeResult {
fn status_label(&self) -> &'static str {
if self.body.is_some() {
"ok"
} else {
"unreachable"
}
}
fn to_json_bytes(&self) -> Vec<u8> {
serde_json::to_vec_pretty(self).unwrap_or_default()
}
}
fn probe_endpoint(port: u16, path: &str) -> ProbeResult {
let url = format!("http://127.0.0.1:{port}{path}");
let endpoint = path.to_string();
let client = match reqwest::blocking::Client::builder()
.timeout(Duration::from_millis(500))
.build()
{
Ok(c) => c,
Err(e) => {
return ProbeResult {
endpoint,
body: None,
error: Some(format!("client build failed: {e}")),
};
}
};
match client.get(&url).send() {
Ok(resp) => match resp.json::<serde_json::Value>() {
Ok(v) => ProbeResult {
endpoint,
body: Some(v),
error: None,
},
Err(e) => ProbeResult {
endpoint,
body: None,
error: Some(format!("body parse failed: {e}")),
},
},
Err(e) => ProbeResult {
endpoint,
body: None,
error: Some(format!("request failed: {e}")),
},
}
}
#[derive(Debug, Clone, Serialize)]
struct BinaryInfo {
binaries: BTreeMap<String, BinaryRecord>,
}
#[derive(Debug, Clone, Serialize)]
struct BinaryRecord {
path: String,
size_bytes: Option<u64>,
sha256: Option<String>,
version: String,
locatable: bool,
}
fn collect_binary_info() -> BinaryInfo {
let mut binaries = BTreeMap::new();
let openlatch_path = std::env::current_exe()
.ok()
.map(|p| redact_home(&p.display().to_string()))
.unwrap_or_default();
let (size, sha) = std::env::current_exe()
.ok()
.map(|p| binary_size_and_sha(&p))
.unwrap_or((None, None));
binaries.insert(
"openlatch".to_string(),
BinaryRecord {
path: openlatch_path,
size_bytes: size,
sha256: sha,
version: env!("CARGO_PKG_VERSION").to_string(),
locatable: true,
},
);
let hook_path = crate::hooks::resolve_hook_binary_path();
let (hook_size, hook_sha) = if hook_path.exists() {
binary_size_and_sha(&hook_path)
} else {
(None, None)
};
binaries.insert(
"openlatch-hook".to_string(),
BinaryRecord {
path: redact_home(&hook_path.display().to_string()),
size_bytes: hook_size,
sha256: hook_sha,
version: env!("CARGO_PKG_VERSION").to_string(),
locatable: hook_path.exists(),
},
);
BinaryInfo { binaries }
}
fn binary_size_and_sha(path: &Path) -> (Option<u64>, Option<String>) {
let size = std::fs::metadata(path).ok().map(|m| m.len());
let sha = std::fs::File::open(path).ok().and_then(|mut f| {
let mut hasher = Sha256::new();
let mut buf = [0u8; 64 * 1024];
loop {
match f.read(&mut buf) {
Ok(0) => break,
Ok(n) => hasher.update(&buf[..n]),
Err(_) => return None,
}
}
let out = hasher.finalize();
Some(out.iter().map(|b| format!("{b:02x}")).collect())
});
(size, sha)
}
pub(crate) fn parse_since(input: Option<&str>) -> Result<chrono::Duration, OlError> {
let raw = input.unwrap_or("24h").trim();
if raw.is_empty() {
return Ok(chrono::Duration::hours(24));
}
let (num_part, unit) = match raw.chars().last() {
Some(c) if c.is_alphabetic() => (&raw[..raw.len() - c.len_utf8()], c),
_ => {
return Err(OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!("--since '{raw}' is missing a unit suffix (e.g. 24h, 7d, 30m)"),
));
}
};
let n: i64 = num_part.parse().map_err(|_| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!("--since '{raw}' is not a valid number+unit (e.g. 24h, 7d, 30m)"),
)
})?;
let dur = match unit {
'm' | 'M' => chrono::Duration::minutes(n),
'h' | 'H' => chrono::Duration::hours(n),
'd' | 'D' => chrono::Duration::days(n),
_ => {
return Err(OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!("--since '{raw}' has unknown unit '{unit}' (use m, h, or d)"),
));
}
};
if dur < chrono::Duration::zero() {
return Err(OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!("--since '{raw}' must be a positive duration"),
));
}
Ok(dur)
}
fn confirm_inventory(plan: &CollectionPlan, output: &OutputConfig) -> Result<bool, OlError> {
if output.format == OutputFormat::Json {
return Ok(false);
}
if !std::io::IsTerminal::is_terminal(&std::io::stdin()) {
return Ok(false);
}
if output.quiet {
return Ok(false);
}
eprintln!("openlatch doctor --rescue will bundle the following:");
for entry in &plan.entries {
let live_size = std::fs::metadata(&entry.live_path)
.map(|m| m.len())
.unwrap_or(0);
let strategy_label = match entry.strategy {
RedactionStrategy::HardRedactToken => "fully redacted",
RedactionStrategy::Passthrough => "passthrough",
RedactionStrategy::PrivacyFilter => "redacted",
RedactionStrategy::PrivacyFilterAndPathRedact => "redacted (paths anonymised)",
RedactionStrategy::PrivacyFilterTruncating { .. } => {
"redacted, head-truncated if large"
}
};
eprintln!(
" - {} ({}, {})",
redact_home(&entry.live_path.display().to_string()),
human_size(live_size),
strategy_label,
);
}
eprintln!(" - Daemon /health and /metrics snapshots");
eprintln!(" - Binary metadata (SHA256 + size + version) — no binary bytes");
eprintln!(" - Per-pattern privacy redactor hit counts (no matched text)");
eprintln!();
eprintln!("Continue? [y/N] ");
let mut buf = String::new();
std::io::stdin().read_line(&mut buf).map_err(|e| {
OlError::new(
crate::error::ERR_DOCTOR_RESCUE_PARTIAL,
format!("cannot read confirmation: {e}"),
)
})?;
let answer = buf.trim().to_ascii_lowercase();
Ok(answer == "y" || answer == "yes")
}
fn human_size(n: u64) -> String {
const KB: u64 = 1024;
const MB: u64 = 1024 * KB;
if n < KB {
format!("{n} B")
} else if n < MB {
format!("{:.1} KB", n as f64 / KB as f64)
} else {
format!("{:.1} MB", n as f64 / MB as f64)
}
}
#[allow(clippy::too_many_arguments)] fn print_rescue_results(
archive_path: &Path,
archive_size: u64,
files_collected: usize,
daemon_reachable: bool,
claude_present: bool,
redactor_hits_total: u64,
duration_ms: u64,
fix_applied_after: bool,
partial_failure: bool,
output: &OutputConfig,
) {
let agents: Vec<&str> = if claude_present {
vec!["claude-code"]
} else {
vec![]
};
if output.format == OutputFormat::Json {
output.print_json(&serde_json::json!({
"command": "doctor_rescue",
"archive_path": archive_path.display().to_string(),
"archive_size_bytes": archive_size,
"files_collected_count": files_collected,
"daemon_reachable": daemon_reachable,
"agents_found": agents,
"redactor_hits_total": redactor_hits_total,
"duration_ms": duration_ms,
"fix_applied_after": fix_applied_after,
"partial_failure": partial_failure,
"soft_cap_exceeded": archive_size > ARCHIVE_SOFT_CAP_BYTES,
}));
return;
}
if output.quiet {
return;
}
eprintln!("Rescue archive created:");
eprintln!(
" {} ({})",
archive_path.display(),
human_size(archive_size)
);
if archive_size > ARCHIVE_SOFT_CAP_BYTES {
eprintln!(
" Warning: archive exceeds {} soft cap.",
human_size(ARCHIVE_SOFT_CAP_BYTES)
);
}
eprintln!();
eprintln!("Share this with OpenLatch support:");
eprintln!(" support@openlatch.ai");
eprintln!(
" or attach to a GitHub issue: https://github.com/OpenLatch/openlatch-client/issues"
);
eprintln!();
eprintln!("Review contents before sharing: unzip -l <archive>");
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn test_parse_since_default_24h() {
let d = parse_since(None).unwrap();
assert_eq!(d, chrono::Duration::hours(24));
}
#[test]
fn test_parse_since_accepts_minutes_hours_days() {
assert_eq!(
parse_since(Some("30m")).unwrap(),
chrono::Duration::minutes(30)
);
assert_eq!(parse_since(Some("4h")).unwrap(), chrono::Duration::hours(4));
assert_eq!(parse_since(Some("7d")).unwrap(), chrono::Duration::days(7));
}
#[test]
fn test_parse_since_rejects_missing_unit() {
let err = parse_since(Some("100")).expect_err("must reject");
assert_eq!(err.code, crate::error::ERR_DOCTOR_RESCUE_PARTIAL);
}
#[test]
fn test_parse_since_rejects_unknown_unit() {
let err = parse_since(Some("3y")).expect_err("must reject");
assert_eq!(err.code, crate::error::ERR_DOCTOR_RESCUE_PARTIAL);
}
#[test]
fn test_parse_since_rejects_negative() {
let err = parse_since(Some("-1h")).expect_err("must reject negative");
assert_eq!(err.code, crate::error::ERR_DOCTOR_RESCUE_PARTIAL);
}
#[test]
fn test_is_excluded_extension_skips_bak_and_tmp() {
assert!(is_excluded_extension(Path::new("foo.bak")));
assert!(is_excluded_extension(Path::new("foo.tmp")));
assert!(!is_excluded_extension(Path::new("foo.toml")));
assert!(!is_excluded_extension(Path::new("foo.json")));
}
#[test]
fn test_short_agid_takes_first_4_hex_chars() {
assert_eq!(short_agid("agt_4f2ab1c2d3e4f567"), "4f2a");
assert_eq!(short_agid("nopfx"), "nopf");
}
#[test]
fn test_human_size_formats_kb_mb_b() {
assert_eq!(human_size(512), "512 B");
assert_eq!(human_size(1536), "1.5 KB");
assert_eq!(human_size(2_097_152), "2.0 MB");
}
#[test]
fn test_sha256_hex_matches_known_value() {
assert_eq!(
sha256_hex(b"abc"),
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
);
}
#[test]
fn test_filter_text_redacts_aws_key_and_records_hits() {
let f = PrivacyFilter::new(&[]);
let mut hits = HitCounter::new();
let out = filter_text("export KEY=AKIA1234567890ABCDEF", &f, &mut hits);
assert!(out.contains("[AWS_KEY:AKIA***]"), "got: {out}");
assert_eq!(hits.as_map().get("AWS_KEY"), Some(&1));
}
#[test]
fn test_collect_file_hard_redacts_token() {
let tmp = TempDir::new().unwrap();
let live = tmp.path().join("daemon.token");
std::fs::write(&live, "deadbeef".repeat(8)).unwrap();
let entry = CollectionEntry {
live_path: live,
archive_path: "config/daemon.token".to_string(),
strategy: RedactionStrategy::HardRedactToken,
};
let f = PrivacyFilter::new(&[]);
let mut hits = HitCounter::new();
let zip_path = tmp.path().join("test.zip");
let zip_file = std::fs::File::create(&zip_path).unwrap();
let mut zw = zip::ZipWriter::new(zip_file);
let opts = SimpleFileOptions::default().compression_method(CompressionMethod::Stored);
let record = collect_file(&entry, &f, &mut hits, &mut zw, opts).expect("collect ok");
zw.finish().unwrap();
let zip_file = std::fs::File::open(&zip_path).unwrap();
let mut zr = zip::ZipArchive::new(zip_file).unwrap();
let mut entry = zr.by_name("config/daemon.token").unwrap();
let mut body = String::new();
entry.read_to_string(&mut body).unwrap();
assert_eq!(body, TOKEN_REDACTED_MARKER);
assert!(record.redactors_applied.contains(&"HARD_TOKEN"));
}
#[test]
fn test_collect_file_truncates_oversized_event_log_keeping_tail() {
let tmp = TempDir::new().unwrap();
let live = tmp.path().join("events-2026-04-16.jsonl");
let line = "{\"type\":\"x\"}\n";
let mut content = String::with_capacity(6 * 1024 * 1024);
while content.len() < 6 * 1024 * 1024 {
content.push_str(line);
}
let unique_tail = "{\"type\":\"final-tail-marker\"}\n";
content.push_str(unique_tail);
std::fs::write(&live, &content).unwrap();
let entry = CollectionEntry {
live_path: live,
archive_path: "logs/events-2026-04-16.jsonl".to_string(),
strategy: RedactionStrategy::PrivacyFilterTruncating {
cap_bytes: EVENT_LOG_FILE_CAP_BYTES,
keep_tail: true,
},
};
let f = PrivacyFilter::new(&[]);
let mut hits = HitCounter::new();
let zip_path = tmp.path().join("trunc.zip");
let zip_file = std::fs::File::create(&zip_path).unwrap();
let mut zw = zip::ZipWriter::new(zip_file);
let opts = SimpleFileOptions::default().compression_method(CompressionMethod::Stored);
let record = collect_file(&entry, &f, &mut hits, &mut zw, opts).unwrap();
zw.finish().unwrap();
assert!(record.truncated);
assert!(record.size_bytes <= EVENT_LOG_FILE_CAP_BYTES as u64);
let zip_file = std::fs::File::open(&zip_path).unwrap();
let mut zr = zip::ZipArchive::new(zip_file).unwrap();
let mut e = zr.by_name("logs/events-2026-04-16.jsonl").unwrap();
let mut body = String::new();
e.read_to_string(&mut body).unwrap();
assert!(
body.contains("final-tail-marker"),
"truncation dropped the tail"
);
}
}