#[cfg(unix)]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[cfg(windows)]
#[global_allocator]
static GLOBAL_WIN: mimalloc::MiMalloc = mimalloc::MiMalloc;
use clap::{Parser, Subcommand};
use std::path::Path;
use std::process::ExitCode;
use zccache_cli::{
client_download, run_ino_convert_cached, ArchiveFormat, DownloadParams, DownloadSource,
InoConvertOptions, WaitMode,
};
use zccache_core::NormalizedPath;
use zccache_gha::{GhaCache, GhaError};
#[cfg(test)]
use std::path::PathBuf;
#[derive(Debug, Parser)]
#[command(name = "zccache", version, about)]
struct Cli {
#[arg(long)]
clear: bool,
#[arg(long)]
show_stats: bool,
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Debug, Subcommand)]
enum Commands {
Start,
#[command(visible_alias = "kill")]
Stop,
Status,
Clear,
#[command(name = "session-start")]
SessionStart {
#[arg(long)]
cwd: Option<String>,
#[arg(long)]
log: Option<String>,
#[arg(long)]
endpoint: Option<String>,
#[arg(long)]
stats: bool,
#[arg(long)]
journal: Option<String>,
},
#[command(name = "session-end")]
SessionEnd {
session_id: String,
#[arg(long)]
endpoint: Option<String>,
},
#[command(name = "session-stats")]
SessionStatsCmd {
session_id: String,
#[arg(long)]
endpoint: Option<String>,
},
Wrap {
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<String>,
},
Inspect {
key: String,
},
Crashes {
#[arg(long)]
clear: bool,
},
#[command(name = "fp")]
Fp {
#[arg(long)]
cache_file: String,
#[arg(long, default_value = "two-layer")]
cache_type: String,
#[arg(long)]
endpoint: Option<String>,
#[command(subcommand)]
fp_command: FpCommands,
},
#[command(name = "ino")]
Ino {
#[arg(long)]
input: String,
#[arg(long)]
output: String,
#[arg(long = "clang-arg")]
clang_args: Vec<String>,
#[arg(long)]
no_arduino_include: bool,
},
#[command(name = "gha-cache")]
GhaCache {
#[command(subcommand)]
action: GhaCacheCommands,
},
Download {
#[arg(long)]
url: Option<String>,
#[arg(long = "part-url")]
part_urls: Vec<String>,
archive_path: Option<String>,
#[arg(long = "unarchive")]
unarchive_path: Option<String>,
#[arg(long = "sha256")]
expected_sha256: Option<String>,
#[arg(long)]
max_connections: Option<usize>,
#[arg(long)]
min_segment_size: Option<u64>,
#[arg(long)]
no_wait: bool,
#[arg(long)]
dry_run: bool,
#[arg(long)]
force: bool,
},
#[command(name = "cargo-registry")]
CargoRegistry {
#[command(subcommand)]
action: CargoRegistryCommands,
},
Warm {
#[arg(long, default_value = "target")]
target_dir: String,
#[arg(long, default_value = "debug")]
profile: String,
#[arg(long)]
endpoint: Option<String>,
},
}
#[derive(Debug, Subcommand)]
enum CargoRegistryCommands {
Save {
#[arg(long)]
key: String,
#[arg(long)]
cargo_home: Option<String>,
},
Restore {
#[arg(long)]
key: String,
#[arg(long)]
cargo_home: Option<String>,
},
Hash {
#[arg(long, default_value = "Cargo.lock")]
lockfile: String,
},
Clean,
}
#[derive(Debug, Subcommand)]
enum FpCommands {
Check {
#[arg(long, default_value = ".")]
root: String,
#[arg(long, conflicts_with = "include")]
ext: Vec<String>,
#[arg(long, conflicts_with = "ext")]
include: Vec<String>,
#[arg(long)]
exclude: Vec<String>,
},
#[command(name = "mark-success")]
MarkSuccess,
#[command(name = "mark-failure")]
MarkFailure,
Invalidate,
}
#[derive(Debug, Subcommand)]
enum GhaCacheCommands {
Status,
Save {
#[arg(long)]
key: String,
#[arg(long)]
path: String,
},
Restore {
#[arg(long)]
key: String,
#[arg(long)]
path: String,
},
}
const KNOWN_SUBCOMMANDS: &[&str] = &[
"start",
"stop",
"status",
"clear",
"wrap",
"inspect",
"session-start",
"session-end",
"session-stats",
"crashes",
"fp",
"ino",
"download",
"cargo-registry",
"gha-cache",
"warm",
"help",
"--help",
"-h",
"--version",
"-V",
];
fn absolute_path(path: &str) -> NormalizedPath {
let path = Path::new(path);
if path.is_absolute() {
path.into()
} else {
std::env::current_dir()
.unwrap_or_default()
.join(path)
.into()
}
}
fn exit_code_from_i32(code: i32) -> ExitCode {
let truncated = (code & 0xFF) as u8;
if code != 0 && truncated == 0 {
ExitCode::from(1)
} else {
ExitCode::from(truncated)
}
}
fn main() -> ExitCode {
let args: Vec<String> = std::env::args().collect();
if args.len() > 1
&& !KNOWN_SUBCOMMANDS.contains(&args[1].as_str())
&& !args[1].starts_with("--")
{
return run_wrap(&args[1..]);
}
let cli = Cli::parse();
init_tracing();
if cli.clear {
let endpoint = resolve_endpoint(None);
return run_async(cmd_clear(&endpoint));
}
if cli.show_stats {
let endpoint = resolve_endpoint(None);
return run_async(cmd_status(&endpoint));
}
let command = match cli.command {
Some(cmd) => cmd,
None => {
use clap::CommandFactory;
Cli::command().print_help().ok();
return ExitCode::FAILURE;
}
};
match command {
Commands::Start => {
let endpoint = resolve_endpoint(None);
run_async(cmd_start(&endpoint))
}
Commands::Stop => {
let endpoint = resolve_endpoint(None);
run_async(cmd_stop(&endpoint))
}
Commands::Status => {
let endpoint = resolve_endpoint(None);
run_async(cmd_status(&endpoint))
}
Commands::Clear => {
let endpoint = resolve_endpoint(None);
run_async(cmd_clear(&endpoint))
}
Commands::Ino {
input,
output,
clang_args,
no_arduino_include,
} => match run_ino_convert_cached(
Path::new(&input),
Path::new(&output),
&InoConvertOptions {
clang_args,
inject_arduino_include: !no_arduino_include,
},
) {
Ok(_) => ExitCode::SUCCESS,
Err(err) => {
eprintln!("zccache: {err}");
ExitCode::FAILURE
}
},
Commands::GhaCache { action } => match action {
GhaCacheCommands::Status => cmd_gha_status(),
GhaCacheCommands::Save { key, path } => run_async(cmd_gha_save(&key, &path)),
GhaCacheCommands::Restore { key, path } => run_async(cmd_gha_restore(&key, &path)),
},
Commands::Download {
url,
part_urls,
archive_path,
unarchive_path,
expected_sha256,
max_connections,
min_segment_size,
no_wait,
dry_run,
force,
} => cmd_download(DownloadParams {
source: match resolve_download_source(url, part_urls) {
Ok(source) => source,
Err(err) => {
eprintln!("zccache download: {err}");
return ExitCode::FAILURE;
}
},
archive_path: archive_path.map(Into::into),
unarchive_path: unarchive_path.map(Into::into),
expected_sha256,
archive_format: ArchiveFormat::Auto,
max_connections,
min_segment_size,
wait_mode: if no_wait {
WaitMode::NoWait
} else {
WaitMode::Block
},
dry_run,
force,
}),
Commands::SessionStart {
cwd,
log,
endpoint,
stats,
journal,
} => {
let endpoint = resolve_endpoint(endpoint.as_deref());
let cwd = cwd
.map(NormalizedPath::from)
.unwrap_or_else(|| std::env::current_dir().unwrap_or_default().into());
let log = log.map(|p| absolute_path(&p));
let journal = journal.map(|p| {
if !p.ends_with(".jsonl") {
eprintln!("error: --journal path must end in .jsonl");
std::process::exit(1);
}
absolute_path(&p)
});
run_async(cmd_session_start(
&endpoint,
cwd.as_path(),
log.as_deref(),
stats,
journal,
))
}
Commands::SessionEnd {
session_id,
endpoint,
} => {
let endpoint = resolve_endpoint(endpoint.as_deref());
run_async(cmd_session_end(&endpoint, session_id))
}
Commands::SessionStatsCmd {
session_id,
endpoint,
} => {
let endpoint = resolve_endpoint(endpoint.as_deref());
run_async(cmd_session_stats(&endpoint, session_id))
}
Commands::Wrap { args } => run_wrap(&args),
Commands::Inspect { key } => {
eprintln!("zccache inspect {key}: not yet implemented");
ExitCode::FAILURE
}
Commands::Crashes { clear } => cmd_crashes(clear),
Commands::Fp {
cache_file,
cache_type,
endpoint,
fp_command,
} => {
let endpoint = resolve_endpoint(endpoint.as_deref());
let cache_file = absolute_path(&cache_file);
match fp_command {
FpCommands::Check {
root,
ext,
include,
exclude,
} => {
let root = absolute_path(&root);
run_async(cmd_fp_check(
&endpoint,
cache_file.as_path(),
&cache_type,
root.as_path(),
&ext,
&include,
&exclude,
))
}
FpCommands::MarkSuccess => {
run_async(cmd_fp_mark(&endpoint, cache_file.as_path(), true))
}
FpCommands::MarkFailure => {
run_async(cmd_fp_mark(&endpoint, cache_file.as_path(), false))
}
FpCommands::Invalidate => {
run_async(cmd_fp_invalidate(&endpoint, cache_file.as_path()))
}
}
}
Commands::CargoRegistry { action } => match action {
CargoRegistryCommands::Save { key, cargo_home } => {
cmd_cargo_registry_save(&key, cargo_home.as_deref())
}
CargoRegistryCommands::Restore { key, cargo_home } => {
cmd_cargo_registry_restore(&key, cargo_home.as_deref())
}
CargoRegistryCommands::Hash { lockfile } => cmd_cargo_registry_hash(&lockfile),
CargoRegistryCommands::Clean => cmd_cargo_registry_clean(),
},
Commands::Warm {
target_dir,
profile,
..
} => {
let target_dir = absolute_path(&target_dir);
cmd_warm(&target_dir, &profile)
}
}
}
fn cmd_download(params: DownloadParams) -> ExitCode {
match client_download(None, params) {
Ok(result) => {
println!("status={:?}", result.status);
println!("archive_path={}", result.cache_path.display());
println!("sha256={}", result.sha256);
if let Some(unarchive_path) = &result.expanded_path {
println!("unarchive_path={}", unarchive_path.display());
}
if let Some(bytes) = result.bytes {
println!("bytes={bytes}");
}
ExitCode::SUCCESS
}
Err(err) => {
eprintln!("zccache download: {err}");
ExitCode::FAILURE
}
}
}
fn resolve_download_source(
url: Option<String>,
part_urls: Vec<String>,
) -> Result<DownloadSource, String> {
match (url, part_urls.is_empty()) {
(Some(url), true) => Ok(DownloadSource::Url(url)),
(None, false) => Ok(DownloadSource::MultipartUrls(part_urls)),
(Some(_), false) => Err("use either --url or --part-url, not both".to_string()),
(None, true) => Err("provide either --url or at least one --part-url".to_string()),
}
}
async fn cmd_start(endpoint: &str) -> ExitCode {
match ensure_daemon(endpoint).await {
Ok(()) => {
eprintln!("daemon running at {endpoint}");
ExitCode::SUCCESS
}
Err(e) => {
eprintln!("failed to start daemon: {e}");
ExitCode::FAILURE
}
}
}
async fn cmd_stop(endpoint: &str) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(_) => {
let Some(pid) = zccache_ipc::check_running_daemon() else {
eprintln!("daemon not running at {endpoint}");
return ExitCode::SUCCESS;
};
match zccache_ipc::force_kill_process(pid) {
Ok(()) => {
for _ in 0..50 {
if !zccache_ipc::is_process_alive(pid) {
zccache_ipc::remove_lock_file();
eprintln!(
"daemon process {pid} terminated after IPC connection failed"
);
return ExitCode::SUCCESS;
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
eprintln!(
"zccache: sent termination to daemon process {pid}, but it did not exit"
);
return ExitCode::FAILURE;
}
Err(e) => {
eprintln!(
"zccache: cannot connect to daemon at {endpoint}, and failed to kill \
locked process {pid}: {e}"
);
return ExitCode::FAILURE;
}
}
}
};
if let Err(e) = conn.send(&zccache_protocol::Request::Shutdown).await {
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::ShuttingDown) => {
eprintln!("daemon stopped");
ExitCode::SUCCESS
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_status(endpoint: &str) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("daemon not running at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn.send(&zccache_protocol::Request::Status).await {
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::Status(s)) => {
let total = s.cache_hits + s.cache_misses;
let hit_rate = if total > 0 {
format!("{:.1}%", s.cache_hits as f64 / total as f64 * 100.0)
} else {
"n/a".to_string()
};
println!(
"zccache daemon v{} (protocol v{}) ({}) — uptime {}",
if s.version.is_empty() {
"unknown"
} else {
&s.version
},
zccache_protocol::PROTOCOL_VERSION,
endpoint,
format_uptime(s.uptime_secs)
);
if !s.cache_dir.as_os_str().is_empty() {
println!("cache dir: {}", s.cache_dir.display());
}
println!();
println!(
" Compilations: {} total ({} cached, {} cold, {} non-cacheable)",
s.total_compilations, s.cache_hits, s.cache_misses, s.non_cacheable
);
println!(" Hit rate: {hit_rate}");
if s.time_saved_ms > 0 {
println!(" Time saved: ~{}", format_duration_ms(s.time_saved_ms));
}
if s.compile_errors > 0 {
println!(" Errors: {}", s.compile_errors);
}
println!();
println!(
" Artifacts: {} ({})",
s.artifact_count,
format_bytes(s.cache_size_bytes)
);
{
let disk_info = if s.dep_graph_disk_size > 0 {
format!(
"v{}, {} on disk",
s.dep_graph_version,
format_bytes(s.dep_graph_disk_size)
)
} else {
format!("v{}, not persisted", s.dep_graph_version)
};
println!(
" Dep graph: {} contexts, {} files ({})",
s.dep_graph_contexts, s.dep_graph_files, disk_info
);
}
println!(" Metadata: {} entries", s.metadata_entries);
println!();
if s.total_links > 0 {
println!();
let link_total = s.link_hits + s.link_misses;
let link_hit_rate = if link_total > 0 {
format!("{:.1}%", s.link_hits as f64 / link_total as f64 * 100.0)
} else {
"n/a".to_string()
};
println!(
" Links: {} total ({} cached, {} cold, {} non-cacheable)",
s.total_links, s.link_hits, s.link_misses, s.link_non_cacheable
);
println!(" Link hit rate: {link_hit_rate}");
}
println!();
println!(
" Sessions: {} active / {} total",
s.sessions_active, s.sessions_total
);
ExitCode::SUCCESS
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_clear(endpoint: &str) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(_) => {
eprintln!("daemon not running at {endpoint} — nothing to clear");
return ExitCode::SUCCESS;
}
};
if let Err(e) = conn.send(&zccache_protocol::Request::Clear).await {
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::Cleared {
artifacts_removed,
metadata_cleared,
dep_graph_contexts_cleared,
on_disk_bytes_freed,
}) => {
println!("Cache cleared:");
println!(" Artifacts removed: {artifacts_removed}");
println!(" Metadata cleared: {metadata_cleared}");
println!(" Dep graph contexts: {dep_graph_contexts_cleared}");
if on_disk_bytes_freed > 0 {
println!(
" Disk freed: {}",
format_bytes(on_disk_bytes_freed)
);
}
ExitCode::SUCCESS
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
fn cmd_warm(target_dir: &Path, profile: &str) -> ExitCode {
let cache_dir = zccache_core::config::default_cache_dir();
let index_path = cache_dir.join("index.redb");
let artifact_dir = cache_dir.join("artifacts");
let lockfile = {
let cwd = Path::new("Cargo.lock");
let parent = target_dir.parent().map(|p| p.join("Cargo.lock"));
if cwd.exists() {
Some(cwd.to_path_buf())
} else if let Some(ref p) = parent {
if p.exists() {
Some(p.clone())
} else {
None
}
} else {
None
}
};
match warm_target(
index_path.as_ref(),
artifact_dir.as_ref(),
target_dir,
profile,
lockfile.as_deref(),
) {
Ok((restored, skipped, errors)) => {
println!("zccache warm: restored {restored} files, skipped {skipped}, errors {errors}");
if errors > 0 {
ExitCode::FAILURE
} else {
ExitCode::SUCCESS
}
}
Err(e) => {
eprintln!("zccache warm: {e}");
ExitCode::FAILURE
}
}
}
fn parse_lockfile_crates(lockfile: &Path) -> Result<std::collections::HashSet<String>, String> {
let content = std::fs::read_to_string(lockfile)
.map_err(|e| format!("failed to read {}: {e}", lockfile.display()))?;
let mut crates = std::collections::HashSet::new();
for line in content.lines() {
if let Some(name) = line.strip_prefix("name = \"") {
if let Some(name) = name.strip_suffix('"') {
crates.insert(name.replace('-', "_"));
}
}
}
Ok(crates)
}
fn artifact_matches_lockfile(
filename: &str,
allowed_crates: &std::collections::HashSet<String>,
) -> bool {
let name = filename.strip_prefix("lib").unwrap_or(filename);
if let Some(pos) = name.rfind('-') {
let crate_name = &name[..pos];
allowed_crates.contains(crate_name)
} else {
true
}
}
fn warm_target(
index_path: &Path,
artifact_dir: &Path,
target_dir: &Path,
profile: &str,
lockfile: Option<&Path>,
) -> Result<(u64, u64, u64), String> {
if !index_path.exists() {
return Err(format!("no artifact index at {}", index_path.display()));
}
let store = zccache_artifact::ArtifactStore::open(index_path)
.map_err(|e| format!("failed to open artifact index: {e}"))?;
let all_entries = store
.load_all()
.map_err(|e| format!("failed to read artifact index: {e}"))?;
if all_entries.is_empty() {
return Err("no cached artifacts found in index".to_string());
}
let allowed_crates = match lockfile {
Some(lf) => Some(parse_lockfile_crates(lf)?),
None => None,
};
let artifacts = all_entries;
let deps_dir = target_dir.join(profile).join("deps");
std::fs::create_dir_all(&deps_dir)
.map_err(|e| format!("failed to create {}: {e}", deps_dir.display()))?;
let now = std::time::SystemTime::now();
let file_times = std::fs::FileTimes::new()
.set_accessed(now)
.set_modified(now);
let mut restored = 0u64;
let mut skipped = 0u64;
let mut errors = 0u64;
for (key_hex, idx) in &artifacts {
for (i, name) in idx.output_names.iter().enumerate() {
let src = artifact_dir.join(format!("{key_hex}_{i}"));
let dst = deps_dir.join(name.as_str());
if let Some(ref allowed) = allowed_crates {
if !artifact_matches_lockfile(name, allowed) {
skipped += 1;
continue;
}
}
if !src.exists() {
skipped += 1;
continue;
}
if dst.exists() {
if let Err(e) = std::fs::remove_file(&dst) {
eprintln!(
"zccache warm: failed to remove existing {}: {e}",
dst.display()
);
errors += 1;
continue;
}
}
let linked = std::fs::hard_link(&src, &dst).is_ok();
if !linked {
if let Err(e) = std::fs::copy(&src, &dst) {
eprintln!(
"zccache warm: failed to copy {} -> {}: {e}",
src.display(),
dst.display()
);
errors += 1;
continue;
}
}
if let Ok(f) = std::fs::File::open(&dst) {
let _ = f.set_times(file_times);
}
restored += 1;
}
}
Ok((restored, skipped, errors))
}
async fn cmd_session_start(
endpoint: &str,
cwd: &Path,
log: Option<&Path>,
track_stats: bool,
journal: Option<NormalizedPath>,
) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("cannot start daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::SessionStart {
client_pid: std::process::id(),
working_dir: cwd.into(),
log_file: log.map(NormalizedPath::from),
track_stats,
journal_path: journal,
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::SessionStarted {
session_id,
journal_path,
}) => {
let started_at = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
if let Some(ref jp) = journal_path {
let jp_escaped = jp.display().to_string().replace('\\', "\\\\");
println!(
r#"{{"session_id":"{}","started_at":{},"journal_path":"{}"}}"#,
session_id, started_at, jp_escaped
);
} else {
println!(
r#"{{"session_id":"{}","started_at":{}}}"#,
session_id, started_at
);
}
ExitCode::SUCCESS
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("session-start failed: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_session_end(endpoint: &str, session_id: String) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::SessionEnd {
session_id: session_id.clone(),
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::SessionEnded { stats }) => {
if let Some(s) = stats {
let total = s.hits + s.misses;
let hit_rate = if total > 0 {
format!("{:.1}%", s.hits as f64 / total as f64 * 100.0)
} else {
"n/a".to_string()
};
eprintln!(
"Session {session_id} complete ({})",
format_duration_ms(s.duration_ms)
);
eprintln!(
" {} compilations: {} hits, {} misses, {} non-cacheable",
s.compilations, s.hits, s.misses, s.non_cacheable
);
eprintln!(" Hit rate: {hit_rate}");
if s.time_saved_ms > 0 {
eprintln!(" Time saved: ~{}", format_duration_ms(s.time_saved_ms));
}
}
ExitCode::SUCCESS
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("session-end failed: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_session_stats(endpoint: &str, session_id: String) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::SessionStats {
session_id: session_id.clone(),
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::SessionStatsResult { stats }) => {
if let Some(s) = stats {
let total = s.hits + s.misses;
let hit_rate = if total > 0 {
format!("{:.1}%", s.hits as f64 / total as f64 * 100.0)
} else {
"n/a".to_string()
};
eprintln!(
"Session {session_id} (active, {})",
format_duration_ms(s.duration_ms)
);
eprintln!(
" {} compilations: {} hits, {} misses, {} non-cacheable",
s.compilations, s.hits, s.misses, s.non_cacheable
);
eprintln!(" Hit rate: {hit_rate}");
if s.time_saved_ms > 0 {
eprintln!(" Time saved: ~{}", format_duration_ms(s.time_saved_ms));
}
} else {
eprintln!("Session {session_id}: stats tracking not enabled");
}
ExitCode::SUCCESS
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("session-stats failed: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
fn cmd_crashes(clear: bool) -> ExitCode {
let crash_dir = zccache_core::config::crash_dump_dir();
if clear {
let count = match std::fs::read_dir(&crash_dir) {
Ok(entries) => {
let mut n = 0u64;
for entry in entries.flatten() {
if std::fs::remove_file(entry.path()).is_ok() {
n += 1;
}
}
n
}
Err(_) => 0,
};
println!("Deleted {count} crash dump(s).");
return ExitCode::SUCCESS;
}
let mut dumps: Vec<_> = match std::fs::read_dir(&crash_dir) {
Ok(entries) => entries
.flatten()
.filter(|e| e.path().extension().is_some_and(|ext| ext == "txt"))
.collect(),
Err(_) => {
println!("No crash dumps found.");
return ExitCode::SUCCESS;
}
};
if dumps.is_empty() {
println!("No crash dumps found.");
return ExitCode::SUCCESS;
}
dumps.sort_by_key(|e| e.file_name());
println!("Crash dumps ({}):", dumps.len());
println!();
for entry in &dumps {
let path = entry.path();
println!(" {}", path.display());
if let Ok(content) = std::fs::read_to_string(&path) {
for (i, line) in content.lines().enumerate() {
if i >= 5 {
println!(" ...");
break;
}
println!(" {line}");
}
println!();
}
}
ExitCode::SUCCESS
}
fn resolve_cargo_home(explicit: Option<&str>) -> Result<NormalizedPath, String> {
if let Some(p) = explicit {
return Ok(NormalizedPath::from(p));
}
if let Ok(ch) = std::env::var("CARGO_HOME") {
if !ch.is_empty() {
return Ok(NormalizedPath::from(ch));
}
}
let home = std::env::var("HOME")
.or_else(|_| std::env::var("USERPROFILE"))
.map_err(|_| "cannot determine home directory (set HOME or CARGO_HOME)".to_string())?;
Ok(NormalizedPath::from(home).join(".cargo"))
}
fn cargo_registry_cache_dir() -> Result<NormalizedPath, String> {
let home = std::env::var("HOME")
.or_else(|_| std::env::var("USERPROFILE"))
.map_err(|_| "cannot determine home directory (set HOME)".to_string())?;
Ok(NormalizedPath::from(home)
.join(".zccache")
.join("cargo-registry"))
}
fn cmd_cargo_registry_save(key: &str, cargo_home: Option<&str>) -> ExitCode {
let cargo_home = match resolve_cargo_home(cargo_home) {
Ok(p) => p,
Err(e) => {
eprintln!("zccache cargo-registry save: {e}");
return ExitCode::FAILURE;
}
};
let cache_dir = match cargo_registry_cache_dir() {
Ok(d) => d,
Err(e) => {
eprintln!("zccache cargo-registry save: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = std::fs::create_dir_all(&cache_dir) {
eprintln!(
"zccache cargo-registry save: failed to create {}: {e}",
cache_dir.display()
);
return ExitCode::FAILURE;
}
let archive_path = cache_dir.join(format!("{key}.tar.gz"));
let subdirs: &[&str] = &["registry/index", "registry/cache", "git/db"];
let mut paths: Vec<(NormalizedPath, String)> = Vec::new();
for subdir in subdirs {
let p = cargo_home.join(subdir);
if p.exists() {
paths.push((p, subdir.to_string()));
}
}
if paths.is_empty() {
eprintln!(
"no cargo registry directories found in {}",
cargo_home.display()
);
return ExitCode::SUCCESS;
}
let file = match std::fs::File::create(&archive_path) {
Ok(f) => f,
Err(e) => {
eprintln!(
"zccache cargo-registry save: failed to create {}: {e}",
archive_path.display()
);
return ExitCode::FAILURE;
}
};
let gz = flate2::write::GzEncoder::new(file, flate2::Compression::fast());
let mut tar = tar::Builder::new(gz);
for (path, name) in &paths {
if let Err(e) = tar.append_dir_all(name, path) {
eprintln!("zccache cargo-registry save: failed to add {name}: {e}");
return ExitCode::FAILURE;
}
}
if let Err(e) = tar.finish() {
eprintln!("zccache cargo-registry save: failed to finalize archive: {e}");
return ExitCode::FAILURE;
}
let size = std::fs::metadata(&archive_path)
.map(|m| m.len())
.unwrap_or(0);
println!(
"saved cargo registry to {} ({})",
archive_path.display(),
format_bytes(size)
);
ExitCode::SUCCESS
}
fn cmd_cargo_registry_restore(key: &str, cargo_home: Option<&str>) -> ExitCode {
let cargo_home = match resolve_cargo_home(cargo_home) {
Ok(p) => p,
Err(e) => {
eprintln!("zccache cargo-registry restore: {e}");
return ExitCode::FAILURE;
}
};
let cache_dir = match cargo_registry_cache_dir() {
Ok(d) => d,
Err(e) => {
eprintln!("zccache cargo-registry restore: {e}");
return ExitCode::FAILURE;
}
};
let archive_path = cache_dir.join(format!("{key}.tar.gz"));
if !archive_path.exists() {
eprintln!("no cached registry found for key: {key}");
return ExitCode::FAILURE;
}
let file = match std::fs::File::open(&archive_path) {
Ok(f) => f,
Err(e) => {
eprintln!(
"zccache cargo-registry restore: failed to open {}: {e}",
archive_path.display()
);
return ExitCode::FAILURE;
}
};
let gz = flate2::read::GzDecoder::new(file);
let mut tar = tar::Archive::new(gz);
if let Err(e) = tar.unpack(&cargo_home) {
eprintln!("zccache cargo-registry restore: failed to unpack archive: {e}");
return ExitCode::FAILURE;
}
println!("restored cargo registry from {}", archive_path.display());
ExitCode::SUCCESS
}
fn cmd_cargo_registry_hash(lockfile: &str) -> ExitCode {
let path = Path::new(lockfile);
if !path.exists() {
eprintln!("lockfile not found: {lockfile}");
return ExitCode::FAILURE;
}
let hash = match zccache_hash::hash_file(path) {
Ok(h) => h,
Err(e) => {
eprintln!("zccache cargo-registry hash: failed to hash {lockfile}: {e}");
return ExitCode::FAILURE;
}
};
let hex = hash.to_hex();
println!("{}", &hex[..16]);
ExitCode::SUCCESS
}
fn cmd_cargo_registry_clean() -> ExitCode {
let cache_dir = match cargo_registry_cache_dir() {
Ok(d) => d,
Err(e) => {
eprintln!("zccache cargo-registry clean: {e}");
return ExitCode::FAILURE;
}
};
if cache_dir.exists() {
let count = match std::fs::read_dir(&cache_dir) {
Ok(entries) => entries.count(),
Err(e) => {
eprintln!(
"zccache cargo-registry clean: failed to read {}: {e}",
cache_dir.display()
);
return ExitCode::FAILURE;
}
};
if let Err(e) = std::fs::remove_dir_all(&cache_dir) {
eprintln!(
"zccache cargo-registry clean: failed to remove {}: {e}",
cache_dir.display()
);
return ExitCode::FAILURE;
}
println!("removed {count} cached registry archive(s)");
} else {
println!("no cached archives to clean");
}
ExitCode::SUCCESS
}
fn cmd_gha_status() -> ExitCode {
if GhaCache::is_available() {
let url = std::env::var("ACTIONS_CACHE_URL").unwrap_or_default();
println!("GHA cache: available");
println!(" ACTIONS_CACHE_URL = {url}");
ExitCode::SUCCESS
} else {
println!("GHA cache: not available (ACTIONS_CACHE_URL or ACTIONS_RUNTIME_TOKEN not set)");
ExitCode::SUCCESS
}
}
async fn cmd_gha_save(key: &str, path: &str) -> ExitCode {
let cache = match GhaCache::from_env() {
Ok(c) => c,
Err(GhaError::NotAvailable) => {
eprintln!("zccache gha-cache: not running in GitHub Actions");
return ExitCode::FAILURE;
}
Err(e) => {
eprintln!("zccache gha-cache: {e}");
return ExitCode::FAILURE;
}
};
let src = Path::new(path);
if !src.exists() {
eprintln!("zccache gha-cache save: path does not exist: {path}");
return ExitCode::FAILURE;
}
let data = match tar_gz_encode(src) {
Ok(d) => d,
Err(e) => {
eprintln!("zccache gha-cache save: failed to create archive: {e}");
return ExitCode::FAILURE;
}
};
let version = GhaCache::version_hash(&[path]);
match cache.save(key, &version, &data).await {
Ok(()) => {
eprintln!(
"zccache gha-cache save: uploaded {} bytes for key '{key}'",
data.len()
);
ExitCode::SUCCESS
}
Err(e) => {
eprintln!("zccache gha-cache save: {e}");
ExitCode::FAILURE
}
}
}
async fn cmd_gha_restore(key: &str, path: &str) -> ExitCode {
let cache = match GhaCache::from_env() {
Ok(c) => c,
Err(GhaError::NotAvailable) => {
eprintln!("zccache gha-cache: not running in GitHub Actions");
return ExitCode::FAILURE;
}
Err(e) => {
eprintln!("zccache gha-cache: {e}");
return ExitCode::FAILURE;
}
};
let version = GhaCache::version_hash(&[path]);
let data = match cache.restore(key, &version).await {
Ok(Some(d)) => d,
Ok(None) => {
eprintln!("zccache gha-cache restore: cache miss for key '{key}'");
return ExitCode::FAILURE;
}
Err(e) => {
eprintln!("zccache gha-cache restore: {e}");
return ExitCode::FAILURE;
}
};
let dest = Path::new(path);
if let Err(e) = std::fs::create_dir_all(dest) {
eprintln!("zccache gha-cache restore: failed to create directory: {e}");
return ExitCode::FAILURE;
}
match tar_gz_decode(&data, dest) {
Ok(()) => {
eprintln!(
"zccache gha-cache restore: restored {} bytes for key '{key}' to {path}",
data.len()
);
ExitCode::SUCCESS
}
Err(e) => {
eprintln!("zccache gha-cache restore: failed to extract archive: {e}");
ExitCode::FAILURE
}
}
}
fn tar_gz_encode(src: &Path) -> Result<Vec<u8>, std::io::Error> {
use flate2::write::GzEncoder;
use flate2::Compression;
let buf = Vec::new();
let enc = GzEncoder::new(buf, Compression::fast());
let mut tar = tar::Builder::new(enc);
let prefix = src
.file_name()
.map(|n| n.to_string_lossy().into_owned())
.unwrap_or_else(|| ".".to_string());
tar.append_dir_all(&prefix, src)?;
let enc = tar.into_inner()?;
enc.finish()
}
fn tar_gz_decode(data: &[u8], dest: &Path) -> Result<(), std::io::Error> {
use flate2::read::GzDecoder;
let dec = GzDecoder::new(data);
let mut archive = tar::Archive::new(dec);
archive.unpack(dest)
}
async fn cmd_fp_check(
endpoint: &str,
cache_file: &Path,
cache_type: &str,
root: &Path,
ext: &[String],
include: &[String],
exclude: &[String],
) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("zccache fp: failed to start daemon: {e}");
return ExitCode::from(2);
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("zccache fp: cannot connect to daemon: {e}");
return ExitCode::from(2);
}
};
let request = zccache_protocol::Request::FingerprintCheck {
cache_file: cache_file.into(),
cache_type: cache_type.to_string(),
root: root.into(),
extensions: ext.to_vec(),
include_globs: include.to_vec(),
exclude: exclude.to_vec(),
};
if let Err(e) = conn.send(&request).await {
eprintln!("zccache fp: send error: {e}");
return ExitCode::from(2);
}
match conn.recv::<zccache_protocol::Response>().await {
Ok(Some(zccache_protocol::Response::FingerprintCheckResult {
decision,
reason,
changed_files,
})) => {
if decision == "skip" {
eprintln!("zccache fp: skip (no changes)");
ExitCode::from(1)
} else {
let reason_str = reason.as_deref().unwrap_or("unknown");
if changed_files.is_empty() {
eprintln!("zccache fp: run ({reason_str})");
} else {
eprintln!(
"zccache fp: run ({reason_str}, {} file(s) changed)",
changed_files.len()
);
}
ExitCode::SUCCESS
}
}
Ok(Some(zccache_protocol::Response::Error { message })) => {
eprintln!("zccache fp: daemon error: {message}");
ExitCode::from(2)
}
Ok(other) => {
eprintln!("zccache fp: unexpected response: {other:?}");
ExitCode::from(2)
}
Err(e) => {
eprintln!("zccache fp: recv error: {e}");
ExitCode::from(2)
}
}
}
async fn cmd_fp_mark(endpoint: &str, cache_file: &Path, success: bool) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("zccache fp: failed to start daemon: {e}");
return ExitCode::from(2);
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("zccache fp: cannot connect to daemon: {e}");
return ExitCode::from(2);
}
};
let request = if success {
zccache_protocol::Request::FingerprintMarkSuccess {
cache_file: cache_file.into(),
}
} else {
zccache_protocol::Request::FingerprintMarkFailure {
cache_file: cache_file.into(),
}
};
if let Err(e) = conn.send(&request).await {
eprintln!("zccache fp: send error: {e}");
return ExitCode::from(2);
}
match conn.recv::<zccache_protocol::Response>().await {
Ok(Some(zccache_protocol::Response::FingerprintAck)) => {
let label = if success {
"mark-success"
} else {
"mark-failure"
};
eprintln!("zccache fp: {label}");
ExitCode::SUCCESS
}
Ok(Some(zccache_protocol::Response::Error { message })) => {
eprintln!("zccache fp: daemon error: {message}");
ExitCode::from(2)
}
Ok(other) => {
eprintln!("zccache fp: unexpected response: {other:?}");
ExitCode::from(2)
}
Err(e) => {
eprintln!("zccache fp: recv error: {e}");
ExitCode::from(2)
}
}
}
async fn cmd_fp_invalidate(endpoint: &str, cache_file: &Path) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("zccache fp: failed to start daemon: {e}");
return ExitCode::from(2);
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("zccache fp: cannot connect to daemon: {e}");
return ExitCode::from(2);
}
};
let request = zccache_protocol::Request::FingerprintInvalidate {
cache_file: cache_file.into(),
};
if let Err(e) = conn.send(&request).await {
eprintln!("zccache fp: send error: {e}");
return ExitCode::from(2);
}
match conn.recv::<zccache_protocol::Response>().await {
Ok(Some(zccache_protocol::Response::FingerprintAck)) => {
eprintln!("zccache fp: invalidated");
ExitCode::SUCCESS
}
Ok(Some(zccache_protocol::Response::Error { message })) => {
eprintln!("zccache fp: daemon error: {message}");
ExitCode::from(2)
}
Ok(other) => {
eprintln!("zccache fp: unexpected response: {other:?}");
ExitCode::from(2)
}
Err(e) => {
eprintln!("zccache fp: recv error: {e}");
ExitCode::from(2)
}
}
}
fn run_passthrough(args: &[String]) -> ExitCode {
let tool = &args[0];
let tool_args = if args.len() > 1 { &args[1..] } else { &[] };
let resolved = resolve_compiler_path(tool);
match std::process::Command::new(&resolved)
.args(tool_args)
.status()
{
Ok(status) => exit_code_from_i32(status.code().unwrap_or(1)),
Err(e) => {
eprintln!("zccache: failed to run {}: {e}", resolved.display());
ExitCode::FAILURE
}
}
}
fn run_rustfmt_cached(rustfmt_path: &Path, args: &[String], cwd: &Path) -> ExitCode {
use zccache_compiler::parse_rustfmt::{find_rustfmt_config, parse_rustfmt_invocation};
let parsed = match parse_rustfmt_invocation(args) {
Some(p) => p,
None => {
return run_tool_direct(rustfmt_path, args);
}
};
let context_hash = {
let mut hasher = zccache_hash::StreamHasher::new();
hasher.update(b"zccache-fmt-v1");
if let Ok(bin_hash) = zccache_hash::hash_file(rustfmt_path) {
hasher.update(bin_hash.as_bytes());
} else {
hasher.update(b"unknown-binary");
}
let config_path = parsed
.config_path
.clone()
.or_else(|| find_rustfmt_config(cwd));
if let Some(ref cfg) = config_path {
if let Ok(cfg_hash) = zccache_hash::hash_file(cfg) {
hasher.update(cfg_hash.as_bytes());
}
}
for flag in &parsed.flags {
hasher.update(flag.as_bytes());
hasher.update(b"\0");
}
hasher.finalize().to_hex()
};
let cache_dir = zccache_core::config::default_cache_dir()
.join("fmt")
.join(&context_hash);
let _ = std::fs::create_dir_all(&cache_dir);
use rayon::prelude::*;
let results: Vec<(NormalizedPath, bool, Option<zccache_hash::ContentHash>)> = parsed
.source_files
.par_iter()
.map(|src| {
let abs = if src.is_absolute() {
src.clone()
} else {
cwd.join(src).into()
};
let (is_hit, hash) = match zccache_hash::hash_file(&abs) {
Ok(content_hash) => {
let marker = cache_dir.join(content_hash.to_hex());
(marker.exists(), Some(content_hash))
}
Err(_) => (false, None),
};
(abs, is_hit, hash)
})
.collect();
let mut miss_files: Vec<NormalizedPath> = Vec::new();
let mut all_files: Vec<(NormalizedPath, bool, Option<zccache_hash::ContentHash>)> = Vec::new();
for (abs, is_hit, hash) in results {
if !is_hit {
miss_files.push(abs.clone());
}
all_files.push((abs, is_hit, hash));
}
if miss_files.is_empty() {
if parsed.check_mode {
return ExitCode::SUCCESS;
}
return ExitCode::SUCCESS;
}
let exit_code = if parsed.check_mode {
run_rustfmt_on_files(rustfmt_path, args, &miss_files, &parsed)
} else {
run_rustfmt_on_files(rustfmt_path, args, &miss_files, &parsed)
};
let exit_i32 = match exit_code {
Ok(code) => code,
Err(e) => {
eprintln!("zccache: failed to run rustfmt: {e}");
return ExitCode::FAILURE;
}
};
if exit_i32 == 0 {
for (abs, was_hit, cached_hash) in &all_files {
if *was_hit {
continue; }
let new_hash = if parsed.check_mode {
*cached_hash
} else {
zccache_hash::hash_file(abs).ok()
};
if let Some(h) = new_hash {
let marker = cache_dir.join(h.to_hex());
let _ = std::fs::write(&marker, b"");
}
}
}
exit_code_from_i32(exit_i32)
}
fn run_rustfmt_on_files(
rustfmt_path: &Path,
original_args: &[String],
files: &[NormalizedPath],
parsed: &zccache_compiler::parse_rustfmt::ParsedRustfmt,
) -> Result<i32, std::io::Error> {
let mut cmd = std::process::Command::new(rustfmt_path);
cmd.args(&parsed.flags);
for f in files {
cmd.arg(f);
}
let _ = original_args;
let status = cmd.status()?;
Ok(status.code().unwrap_or(1))
}
fn run_tool_direct(tool: &Path, args: &[String]) -> ExitCode {
match std::process::Command::new(tool).args(args).status() {
Ok(status) => exit_code_from_i32(status.code().unwrap_or(1)),
Err(e) => {
eprintln!("zccache: failed to run {}: {e}", tool.display());
ExitCode::FAILURE
}
}
}
fn run_wrap(args: &[String]) -> ExitCode {
if args.is_empty() {
eprintln!("usage: zccache <compiler|tool> <args...>");
return ExitCode::FAILURE;
}
if std::env::var("ZCCACHE_DISABLE").is_ok_and(|v| v == "1" || v.eq_ignore_ascii_case("true")) {
return run_passthrough(args);
}
let wrapped_tool = resolve_compiler_path(&args[0]);
let tool_args: Vec<String> = if args.len() > 1 {
args[1..].to_vec()
} else {
Vec::new()
};
let cwd = std::env::current_dir().unwrap_or_default();
let client_env: Vec<(String, String)> = std::env::vars().collect();
let endpoint = resolve_endpoint(None);
let _ = std::env::set_current_dir(std::env::temp_dir());
if zccache_compiler::detect_family(&args[0]).is_formatter() {
return run_rustfmt_cached(&wrapped_tool, &tool_args, &cwd);
}
if zccache_compiler::parse_archiver::is_archiver(&args[0])
|| zccache_compiler::parse_linker::is_link_invocation(&args[0], &tool_args)
{
return run_async(cmd_link_ephemeral(
&endpoint,
&wrapped_tool,
tool_args,
cwd.into(),
client_env,
));
}
match std::env::var("ZCCACHE_SESSION_ID") {
Ok(session_id) => {
if session_id.is_empty() {
eprintln!("ZCCACHE_SESSION_ID is empty");
return ExitCode::FAILURE;
}
run_async(cmd_compile(
&endpoint,
&session_id,
tool_args,
cwd.into(),
wrapped_tool,
client_env,
))
}
Err(_) => {
run_async(cmd_compile_ephemeral(
&endpoint,
&wrapped_tool,
tool_args,
cwd.into(),
client_env,
))
}
}
}
fn resolve_compiler_path(compiler: &str) -> NormalizedPath {
let normalized = zccache_core::path::normalize_msys_path(compiler);
let path = Path::new(&normalized);
if path.is_absolute() {
return normalized.into();
}
match which_on_path(&normalized) {
Some(abs) => abs,
None => normalized.into(), }
}
async fn cmd_compile(
endpoint: &str,
session_id: &str,
args: Vec<String>,
cwd: NormalizedPath,
compiler: NormalizedPath,
client_env: Vec<(String, String)>,
) -> ExitCode {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::Compile {
session_id: session_id.to_string(),
args,
cwd,
compiler,
env: Some(client_env),
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::CompileResult {
exit_code,
stdout,
stderr,
..
}) => {
use std::io::Write;
let _ = std::io::stdout().write_all(&stdout);
let _ = std::io::stderr().write_all(&stderr);
exit_code_from_i32(exit_code)
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("zccache error: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_compile_ephemeral(
endpoint: &str,
compiler: &Path,
args: Vec<String>,
cwd: NormalizedPath,
client_env: Vec<(String, String)>,
) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("cannot start daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::CompileEphemeral {
client_pid: std::process::id(),
working_dir: cwd.clone(),
compiler: compiler.into(),
args,
cwd,
env: Some(client_env),
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::CompileResult {
exit_code,
stdout,
stderr,
..
}) => {
use std::io::Write;
let _ = std::io::stdout().write_all(&stdout);
let _ = std::io::stderr().write_all(&stderr);
exit_code_from_i32(exit_code)
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("zccache error: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
async fn cmd_link_ephemeral(
endpoint: &str,
tool: &Path,
args: Vec<String>,
cwd: NormalizedPath,
client_env: Vec<(String, String)>,
) -> ExitCode {
if let Err(e) = ensure_daemon(endpoint).await {
eprintln!("cannot start daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(e) => {
eprintln!("cannot connect to daemon at {endpoint}: {e}");
return ExitCode::FAILURE;
}
};
if let Err(e) = conn
.send(&zccache_protocol::Request::LinkEphemeral {
client_pid: std::process::id(),
tool: tool.into(),
args,
cwd,
env: Some(client_env),
})
.await
{
eprintln!("zccache: failed to send to daemon: {e}");
return ExitCode::FAILURE;
}
let recv_result = match conn.recv().await {
Ok(r) => r,
Err(e) => {
eprintln!("zccache: broken connection to daemon: {e}");
return ExitCode::FAILURE;
}
};
match recv_result {
Some(zccache_protocol::Response::LinkResult {
exit_code,
stdout,
stderr,
warning,
..
}) => {
use std::io::Write;
let _ = std::io::stdout().write_all(&stdout);
let _ = std::io::stderr().write_all(&stderr);
if let Some(w) = warning {
eprintln!("zccache warning: {w}");
}
exit_code_from_i32(exit_code)
}
Some(zccache_protocol::Response::Error { message }) => {
eprintln!("zccache error: {message}");
ExitCode::FAILURE
}
None => {
eprintln!("zccache: lost connection to daemon (no response received)");
ExitCode::FAILURE
}
Some(other) => {
eprintln!("zccache: unexpected response from daemon: {other:?}");
ExitCode::FAILURE
}
}
}
enum VersionCheck {
Ok,
DaemonNewer {
daemon_ver: String,
},
DaemonOlder {
daemon_ver: String,
},
Unreachable,
CommError,
}
async fn check_daemon_version(endpoint: &str) -> VersionCheck {
let mut conn = match connect(endpoint).await {
Ok(c) => c,
Err(_) => return VersionCheck::Unreachable,
};
if conn.send(&zccache_protocol::Request::Status).await.is_err() {
return VersionCheck::CommError;
}
match conn.recv::<zccache_protocol::Response>().await {
Ok(Some(zccache_protocol::Response::Status(s))) => {
if s.version == zccache_core::VERSION {
return VersionCheck::Ok;
}
let client_ver = zccache_core::version::current();
match zccache_core::version::Version::parse(&s.version) {
Some(daemon_ver) => match daemon_ver.cmp(&client_ver) {
std::cmp::Ordering::Equal => VersionCheck::Ok,
std::cmp::Ordering::Greater => VersionCheck::DaemonNewer {
daemon_ver: s.version,
},
std::cmp::Ordering::Less => VersionCheck::DaemonOlder {
daemon_ver: s.version,
},
},
None => VersionCheck::DaemonOlder {
daemon_ver: s.version,
},
}
}
_ => VersionCheck::CommError,
}
}
async fn spawn_and_wait(endpoint: &str) -> Result<(), String> {
let daemon_bin = find_daemon_binary().ok_or("cannot find zccache-daemon binary")?;
tracing::debug!(?daemon_bin, %endpoint, "spawning daemon");
spawn_daemon(&daemon_bin, endpoint)?;
for _ in 0..100 {
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
if connect(endpoint).await.is_ok() {
return Ok(());
}
}
Err("daemon started but not accepting connections after 10s".to_string())
}
async fn stop_stale_daemon(endpoint: &str) {
if let Ok(mut conn) = connect(endpoint).await {
let _ = conn.send(&zccache_protocol::Request::Shutdown).await;
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
if let Some(pid) = zccache_ipc::check_running_daemon() {
tracing::debug!(pid, "force-killing stale daemon process");
if zccache_ipc::force_kill_process(pid).is_ok() {
for _ in 0..50 {
if !zccache_ipc::is_process_alive(pid) {
break;
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
}
zccache_ipc::remove_lock_file();
}
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
}
async fn ensure_daemon(endpoint: &str) -> Result<(), String> {
match check_daemon_version(endpoint).await {
VersionCheck::Ok => return Ok(()),
VersionCheck::DaemonNewer { daemon_ver } => {
tracing::debug!(
daemon_ver,
client_ver = zccache_core::VERSION,
"daemon is newer than client, proceeding"
);
return Ok(());
}
VersionCheck::DaemonOlder { daemon_ver } => {
tracing::info!(
daemon_ver,
client_ver = zccache_core::VERSION,
"daemon is older than client, auto-recovering"
);
stop_stale_daemon(endpoint).await;
return spawn_and_wait(endpoint).await;
}
VersionCheck::CommError => {
tracing::info!("cannot communicate with daemon, auto-recovering");
stop_stale_daemon(endpoint).await;
return spawn_and_wait(endpoint).await;
}
VersionCheck::Unreachable => {
}
}
if let Some(pid) = zccache_ipc::check_running_daemon() {
for _ in 0..20 {
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
match check_daemon_version(endpoint).await {
VersionCheck::Ok => return Ok(()),
VersionCheck::DaemonNewer { daemon_ver } => {
tracing::debug!(
daemon_ver,
client_ver = zccache_core::VERSION,
"daemon is newer than client, proceeding"
);
return Ok(());
}
VersionCheck::DaemonOlder { daemon_ver } => {
tracing::info!(
daemon_ver,
client_ver = zccache_core::VERSION,
"daemon is older than client during startup, auto-recovering"
);
stop_stale_daemon(endpoint).await;
return spawn_and_wait(endpoint).await;
}
VersionCheck::CommError => {
tracing::info!(
"cannot communicate with daemon during startup, auto-recovering"
);
stop_stale_daemon(endpoint).await;
return spawn_and_wait(endpoint).await;
}
VersionCheck::Unreachable => continue,
}
}
return Err(format!(
"daemon process {pid} exists but not accepting connections"
));
}
spawn_and_wait(endpoint).await
}
fn find_daemon_binary() -> Option<NormalizedPath> {
let name = if cfg!(windows) {
"zccache-daemon.exe"
} else {
"zccache-daemon"
};
if let Ok(exe) = std::env::current_exe() {
if let Some(dir) = exe.parent() {
let candidate = dir.join(name);
if candidate.exists() {
return Some(candidate.into());
}
}
}
which_on_path(name)
}
fn which_on_path(name: &str) -> Option<NormalizedPath> {
let path_var = std::env::var_os("PATH")?;
for dir in std::env::split_paths(&path_var) {
let candidate = dir.join(name);
if candidate.is_file() {
return Some(candidate.into());
}
#[cfg(windows)]
if std::path::Path::new(name).extension().is_none() {
let with_exe = dir.join(format!("{name}.exe"));
if with_exe.is_file() {
return Some(with_exe.into());
}
}
}
None
}
fn spawn_daemon(bin: &std::path::Path, endpoint: &str) -> Result<(), String> {
let mut cmd = std::process::Command::new(bin);
cmd.args(["--foreground", "--endpoint", endpoint]);
cmd.stdin(std::process::Stdio::null());
cmd.stdout(std::process::Stdio::null());
cmd.stderr(std::process::Stdio::null());
#[cfg(windows)]
{
use std::os::windows::process::CommandExt;
const CREATE_NO_WINDOW: u32 = 0x0800_0000;
cmd.creation_flags(CREATE_NO_WINDOW);
disable_handle_inheritance();
}
cmd.spawn()
.map_err(|e| format!("failed to spawn daemon: {e}"))?;
#[cfg(windows)]
restore_handle_inheritance();
Ok(())
}
#[cfg(windows)]
fn disable_handle_inheritance() {
use std::os::windows::io::AsRawHandle;
extern "system" {
fn SetHandleInformation(handle: *mut std::ffi::c_void, mask: u32, flags: u32) -> i32;
}
const HANDLE_FLAG_INHERIT: u32 = 1;
unsafe {
let stdout = std::io::stdout();
let stderr = std::io::stderr();
SetHandleInformation(stdout.as_raw_handle() as *mut _, HANDLE_FLAG_INHERIT, 0);
SetHandleInformation(stderr.as_raw_handle() as *mut _, HANDLE_FLAG_INHERIT, 0);
}
}
#[cfg(windows)]
fn restore_handle_inheritance() {
use std::os::windows::io::AsRawHandle;
extern "system" {
fn SetHandleInformation(handle: *mut std::ffi::c_void, mask: u32, flags: u32) -> i32;
}
const HANDLE_FLAG_INHERIT: u32 = 1;
unsafe {
let stdout = std::io::stdout();
let stderr = std::io::stderr();
SetHandleInformation(
stdout.as_raw_handle() as *mut _,
HANDLE_FLAG_INHERIT,
HANDLE_FLAG_INHERIT,
);
SetHandleInformation(
stderr.as_raw_handle() as *mut _,
HANDLE_FLAG_INHERIT,
HANDLE_FLAG_INHERIT,
);
}
}
#[cfg(unix)]
async fn connect(endpoint: &str) -> Result<zccache_ipc::IpcConnection, zccache_ipc::IpcError> {
zccache_ipc::connect(endpoint).await
}
#[cfg(windows)]
async fn connect(
endpoint: &str,
) -> Result<zccache_ipc::IpcClientConnection, zccache_ipc::IpcError> {
zccache_ipc::connect(endpoint).await
}
fn resolve_endpoint(explicit: Option<&str>) -> String {
if let Some(ep) = explicit {
return ep.to_string();
}
if let Ok(ep) = std::env::var("ZCCACHE_ENDPOINT") {
return ep;
}
zccache_ipc::default_endpoint()
}
fn run_async(future: impl std::future::Future<Output = ExitCode>) -> ExitCode {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("failed to create tokio runtime")
.block_on(future)
}
fn format_uptime(secs: u64) -> String {
if secs < 60 {
format!("{secs}s")
} else if secs < 3600 {
format!("{}m {}s", secs / 60, secs % 60)
} else {
let h = secs / 3600;
let m = (secs % 3600) / 60;
format!("{h}h {m}m")
}
}
fn format_duration_ms(ms: u64) -> String {
if ms < 1000 {
format!("{ms}ms")
} else if ms < 60_000 {
format!("{:.1}s", ms as f64 / 1000.0)
} else {
let secs = ms / 1000;
format!("{}m {}s", secs / 60, secs % 60)
}
}
fn format_bytes(bytes: u64) -> String {
if bytes == 0 {
"0 B".to_string()
} else if bytes < 1024 {
format!("{bytes} B")
} else if bytes < 1024 * 1024 {
format!("{:.1} KB", bytes as f64 / 1024.0)
} else if bytes < 1024 * 1024 * 1024 {
format!("{:.1} MB", bytes as f64 / (1024.0 * 1024.0))
} else {
format!("{:.1} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
}
}
fn init_tracing() {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
)
.init();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn exit_code_zero_stays_zero() {
assert_eq!(exit_code_from_i32(0), ExitCode::from(0));
}
#[test]
fn exit_code_one_stays_one() {
assert_eq!(exit_code_from_i32(1), ExitCode::from(1));
}
#[test]
fn exit_code_255_stays_255() {
assert_eq!(exit_code_from_i32(255), ExitCode::from(255));
}
#[test]
fn exit_code_256_becomes_one_not_zero() {
assert_ne!(exit_code_from_i32(256), ExitCode::from(0));
assert_eq!(exit_code_from_i32(256), ExitCode::from(1));
}
#[test]
fn exit_code_512_becomes_one_not_zero() {
assert_eq!(exit_code_from_i32(512), ExitCode::from(1));
}
#[test]
fn exit_code_negative_preserves_failure() {
assert_ne!(exit_code_from_i32(-1), ExitCode::from(0));
assert_eq!(exit_code_from_i32(-1), ExitCode::from(255));
}
#[test]
fn exit_code_257_keeps_low_byte() {
assert_eq!(exit_code_from_i32(257), ExitCode::from(1));
}
#[test]
fn warm_restores_rust_artifacts_to_correct_paths() {
let dir = tempfile::tempdir().unwrap();
let cache_dir = dir.path().join("cache");
let artifact_dir = cache_dir.join("artifacts");
let index_path = cache_dir.join("index.redb");
let target_dir = dir.path().join("target");
std::fs::create_dir_all(&artifact_dir).unwrap();
let store = zccache_artifact::ArtifactStore::open(&index_path).unwrap();
let key1 = "aaaaaaaabbbbbbbb";
let idx1 = zccache_artifact::ArtifactIndex::new(
vec![
"libserde-abc123.rlib".to_string(),
"libserde-abc123.rmeta".to_string(),
"serde-abc123.d".to_string(),
],
vec![100, 50, 10],
vec![],
vec![],
0,
);
store.insert(key1, &idx1).unwrap();
std::fs::write(artifact_dir.join(format!("{key1}_0")), b"rlib-content").unwrap();
std::fs::write(artifact_dir.join(format!("{key1}_1")), b"rmeta-content").unwrap();
std::fs::write(artifact_dir.join(format!("{key1}_2")), b"dep-info").unwrap();
let key2 = "ccccccccdddddddd";
let idx2 = zccache_artifact::ArtifactIndex::new(
vec!["libproc_macro2-def456.rlib".to_string()],
vec![200],
vec![],
vec![],
0,
);
store.insert(key2, &idx2).unwrap();
std::fs::write(artifact_dir.join(format!("{key2}_0")), b"proc-macro2-rlib").unwrap();
let key3 = "eeeeeeeeffffffff";
let idx3 = zccache_artifact::ArtifactIndex::new(
vec!["foo.o".to_string()],
vec![300],
vec![],
vec![],
0,
);
store.insert(key3, &idx3).unwrap();
std::fs::write(artifact_dir.join(format!("{key3}_0")), b"object-file").unwrap();
drop(store);
let (restored, skipped, errors) =
warm_target(&index_path, &artifact_dir, &target_dir, "debug", None).unwrap();
assert_eq!(errors, 0, "should have 0 errors");
assert_eq!(
restored, 5,
"should restore all 5 files (3 serde + 1 proc_macro2 + 1 C++ .o)"
);
assert_eq!(skipped, 0, "all payloads exist on disk");
let deps = target_dir.join("debug").join("deps");
assert!(
deps.join("libserde-abc123.rlib").exists(),
"serde rlib missing"
);
assert!(
deps.join("libserde-abc123.rmeta").exists(),
"serde rmeta missing"
);
assert!(
deps.join("serde-abc123.d").exists(),
"serde dep-info missing"
);
assert!(
deps.join("libproc_macro2-def456.rlib").exists(),
"proc_macro2 rlib missing"
);
assert_eq!(
std::fs::read(deps.join("libserde-abc123.rlib")).unwrap(),
b"rlib-content"
);
assert_eq!(
std::fs::read(deps.join("libproc_macro2-def456.rlib")).unwrap(),
b"proc-macro2-rlib"
);
assert!(
deps.join("foo.o").exists(),
"C++ .o file should also be in deps/"
);
assert_eq!(std::fs::read(deps.join("foo.o")).unwrap(), b"object-file");
let meta = std::fs::metadata(deps.join("libserde-abc123.rlib")).unwrap();
let age = meta.modified().unwrap().elapsed().unwrap();
assert!(age.as_secs() < 5, "mtime should be fresh, got {age:?}");
}
#[test]
fn warm_skips_missing_payloads() {
let dir = tempfile::tempdir().unwrap();
let cache_dir = dir.path().join("cache");
let artifact_dir = cache_dir.join("artifacts");
let index_path = cache_dir.join("index.redb");
let target_dir = dir.path().join("target");
std::fs::create_dir_all(&artifact_dir).unwrap();
let store = zccache_artifact::ArtifactStore::open(&index_path).unwrap();
let key = "1111111122222222";
let idx = zccache_artifact::ArtifactIndex::new(
vec!["libfoo-xyz.rlib".to_string()],
vec![100],
vec![],
vec![],
0,
);
store.insert(key, &idx).unwrap();
drop(store);
let (restored, skipped, errors) =
warm_target(&index_path, &artifact_dir, &target_dir, "debug", None).unwrap();
assert_eq!(restored, 0);
assert_eq!(skipped, 1, "should skip 1 missing payload");
assert_eq!(errors, 0);
}
#[test]
fn warm_returns_error_on_missing_index() {
let dir = tempfile::tempdir().unwrap();
let result = warm_target(
&dir.path().join("nonexistent.redb"),
&dir.path().join("artifacts"),
&dir.path().join("target"),
"debug",
None,
);
assert!(result.is_err());
}
fn make_test_store(dir: &Path) -> (PathBuf, PathBuf) {
let cache_dir = dir.join("cache");
let artifact_dir = cache_dir.join("artifacts");
let index_path = cache_dir.join("index.redb");
std::fs::create_dir_all(&artifact_dir).unwrap();
let store = zccache_artifact::ArtifactStore::open(&index_path).unwrap();
let k1 = "aaaa0001";
store
.insert(
k1,
&zccache_artifact::ArtifactIndex::new(
vec![
"libserde-abc123.rlib".into(),
"libserde-abc123.rmeta".into(),
"serde-abc123.d".into(),
],
vec![100, 50, 10],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k1}_0")), b"serde-rlib").unwrap();
std::fs::write(artifact_dir.join(format!("{k1}_1")), b"serde-rmeta").unwrap();
std::fs::write(artifact_dir.join(format!("{k1}_2")), b"serde-d").unwrap();
let k2 = "aaaa0002";
store
.insert(
k2,
&zccache_artifact::ArtifactIndex::new(
vec!["libproc_macro2-def456.rlib".into()],
vec![200],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k2}_0")), b"proc-macro2-rlib").unwrap();
let k3 = "aaaa0003";
store
.insert(
k3,
&zccache_artifact::ArtifactIndex::new(
vec!["libtokio-ghi789.rlib".into()],
vec![300],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k3}_0")), b"tokio-rlib").unwrap();
let k4 = "aaaa0004";
store
.insert(
k4,
&zccache_artifact::ArtifactIndex::new(
vec!["foo.o".into()],
vec![50],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k4}_0")), b"cpp-object").unwrap();
drop(store);
(index_path, artifact_dir)
}
fn write_lockfile(dir: &Path, crates: &[&str]) -> PathBuf {
let lockfile = dir.join("Cargo.lock");
let mut content = String::from("# This file is automatically @generated\nversion = 3\n\n");
for name in crates {
content.push_str(&format!(
"[[package]]\nname = \"{name}\"\nversion = \"1.0.0\"\n\n"
));
}
std::fs::write(&lockfile, &content).unwrap();
lockfile
}
#[test]
fn parse_lockfile_extracts_crate_names() {
let dir = tempfile::tempdir().unwrap();
let lf = write_lockfile(dir.path(), &["serde", "proc-macro2", "unicode-ident"]);
let crates = parse_lockfile_crates(&lf).unwrap();
assert!(crates.contains("serde"));
assert!(
crates.contains("proc_macro2"),
"hyphens should be underscores"
);
assert!(crates.contains("unicode_ident"));
assert!(!crates.contains("tokio"), "tokio not in lockfile");
}
#[test]
fn artifact_matches_lockfile_basic() {
let mut allowed = std::collections::HashSet::new();
allowed.insert("serde".to_string());
allowed.insert("proc_macro2".to_string());
assert!(artifact_matches_lockfile("libserde-abc123.rlib", &allowed));
assert!(artifact_matches_lockfile("libserde-abc123.rmeta", &allowed));
assert!(artifact_matches_lockfile("serde-abc123.d", &allowed));
assert!(artifact_matches_lockfile(
"libproc_macro2-def456.rlib",
&allowed
));
assert!(!artifact_matches_lockfile("libtokio-ghi789.rlib", &allowed));
assert!(artifact_matches_lockfile("build_script_build", &allowed));
}
#[test]
fn warm_without_lockfile_restores_everything() {
let dir = tempfile::tempdir().unwrap();
let (index_path, artifact_dir) = make_test_store(dir.path());
let target_dir = dir.path().join("target");
let (restored, _, _) =
warm_target(&index_path, &artifact_dir, &target_dir, "debug", None).unwrap();
let deps = target_dir.join("debug").join("deps");
assert_eq!(restored, 6, "without lockfile: restore all 6 files");
assert!(deps.join("libserde-abc123.rlib").exists());
assert!(
deps.join("libtokio-ghi789.rlib").exists(),
"tokio restored without filter"
);
assert!(
deps.join("foo.o").exists(),
"C++ file restored without filter"
);
}
#[test]
fn warm_with_lockfile_filters_to_matching_crates() {
let dir = tempfile::tempdir().unwrap();
let (index_path, artifact_dir) = make_test_store(dir.path());
let target_dir = dir.path().join("target");
let lockfile = write_lockfile(dir.path(), &["serde", "proc-macro2"]);
let (restored, skipped, _) = warm_target(
&index_path,
&artifact_dir,
&target_dir,
"debug",
Some(&lockfile),
)
.unwrap();
let deps = target_dir.join("debug").join("deps");
assert_eq!(restored, 5);
assert!(deps.join("libserde-abc123.rlib").exists());
assert!(deps.join("libproc_macro2-def456.rlib").exists());
assert!(
!deps.join("libtokio-ghi789.rlib").exists(),
"tokio NOT in lockfile"
);
assert!(
deps.join("foo.o").exists(),
"no hash separator = allowed through"
);
assert!(skipped > 0, "tokio should be skipped");
}
#[test]
fn adversarial_crate_removed_from_lockfile() {
let dir = tempfile::tempdir().unwrap();
let (index_path, artifact_dir) = make_test_store(dir.path());
let target_dir = dir.path().join("target");
let lockfile = write_lockfile(dir.path(), &["serde"]);
let (restored, _, _) = warm_target(
&index_path,
&artifact_dir,
&target_dir,
"debug",
Some(&lockfile),
)
.unwrap();
let deps = target_dir.join("debug").join("deps");
assert!(deps.join("libserde-abc123.rlib").exists());
assert!(
!deps.join("libtokio-ghi789.rlib").exists(),
"removed crate must NOT be restored"
);
assert_eq!(restored, 4);
}
#[test]
fn adversarial_stale_file_in_target_from_previous_warm() {
let dir = tempfile::tempdir().unwrap();
let (index_path, artifact_dir) = make_test_store(dir.path());
let target_dir = dir.path().join("target");
let deps = target_dir.join("debug").join("deps");
std::fs::create_dir_all(&deps).unwrap();
std::fs::write(deps.join("libtokio-ghi789.rlib"), b"stale").unwrap();
let lockfile = write_lockfile(dir.path(), &["serde"]);
warm_target(
&index_path,
&artifact_dir,
&target_dir,
"debug",
Some(&lockfile),
)
.unwrap();
assert!(
deps.join("libtokio-ghi789.rlib").exists(),
"warm doesn't clean up stale files — cargo ignores them"
);
assert_eq!(
std::fs::read(deps.join("libtokio-ghi789.rlib")).unwrap(),
b"stale",
"stale file content unchanged"
);
}
#[test]
fn adversarial_version_bump_old_artifact_in_cache() {
let dir = tempfile::tempdir().unwrap();
let cache_dir = dir.path().join("cache");
let artifact_dir = cache_dir.join("artifacts");
let index_path = cache_dir.join("index.redb");
std::fs::create_dir_all(&artifact_dir).unwrap();
let store = zccache_artifact::ArtifactStore::open(&index_path).unwrap();
let k_old = "bbbb0001";
store
.insert(
k_old,
&zccache_artifact::ArtifactIndex::new(
vec!["libserde-old111.rlib".into()],
vec![100],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k_old}_0")), b"old-serde").unwrap();
let k_new = "bbbb0002";
store
.insert(
k_new,
&zccache_artifact::ArtifactIndex::new(
vec!["libserde-new222.rlib".into()],
vec![100],
vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{k_new}_0")), b"new-serde").unwrap();
drop(store);
let target_dir = dir.path().join("target");
let lockfile = write_lockfile(dir.path(), &["serde"]);
let (restored, _, _) = warm_target(
&index_path,
&artifact_dir,
&target_dir,
"debug",
Some(&lockfile),
)
.unwrap();
let deps = target_dir.join("debug").join("deps");
assert_eq!(restored, 2);
assert!(deps.join("libserde-old111.rlib").exists());
assert!(deps.join("libserde-new222.rlib").exists());
}
#[test]
fn adversarial_corrupted_cache_file() {
let dir = tempfile::tempdir().unwrap();
let cache_dir = dir.path().join("cache");
let artifact_dir = cache_dir.join("artifacts");
let index_path = cache_dir.join("index.redb");
std::fs::create_dir_all(&artifact_dir).unwrap();
let store = zccache_artifact::ArtifactStore::open(&index_path).unwrap();
let key = "cccc0001";
store
.insert(
key,
&zccache_artifact::ArtifactIndex::new(
vec!["libserde-abc123.rlib".into()],
vec![1000], vec![],
vec![],
0,
),
)
.unwrap();
std::fs::write(artifact_dir.join(format!("{key}_0")), b"short").unwrap();
drop(store);
let target_dir = dir.path().join("target");
let (restored, _, errors) =
warm_target(&index_path, &artifact_dir, &target_dir, "debug", None).unwrap();
assert_eq!(restored, 1);
assert_eq!(errors, 0);
let deps = target_dir.join("debug").join("deps");
assert_eq!(
std::fs::read(deps.join("libserde-abc123.rlib")).unwrap(),
b"short"
);
}
#[test]
fn adversarial_empty_lockfile() {
let dir = tempfile::tempdir().unwrap();
let (index_path, artifact_dir) = make_test_store(dir.path());
let target_dir = dir.path().join("target");
let lockfile = write_lockfile(dir.path(), &[]);
let (restored, skipped, _) = warm_target(
&index_path,
&artifact_dir,
&target_dir,
"debug",
Some(&lockfile),
)
.unwrap();
assert_eq!(restored, 1, "only foo.o (no hash separator) passes");
assert!(skipped > 0);
}
#[tokio::test]
#[ignore] async fn ensure_daemon_auto_recovers_on_comm_error() {
let endpoint = zccache_ipc::unique_test_endpoint();
let ep = endpoint.clone();
let mut listener = zccache_ipc::IpcListener::bind(&ep).unwrap();
let server = tokio::spawn(async move {
let _ = listener.accept().await;
});
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
let result = ensure_daemon(&endpoint).await;
let _ = server.await;
if let Err(msg) = &result {
assert!(
!msg.contains("zccache stop"),
"Bug #27: ensure_daemon requires manual `zccache stop` instead of \
auto-recovering on protocol mismatch: {msg}"
);
}
}
}