use std::io::{BufRead, Write};
use std::path::{Path, PathBuf};
use std::time::Duration;
use anyhow::{Context, Result, anyhow, bail};
use clap::Subcommand;
use crate::cache::{CacheDir, CacheEntry, KconfigStatus};
use crate::runner::RunConfig;
use crate::scenario::{Scenario, flags};
use crate::workload::WorkType;
pub use crate::vmm::host_topology::CpuCap;
pub use crate::stats::{Dimension, derive_slicing_dims};
#[derive(Subcommand, Debug)]
pub enum KernelCommand {
#[command(long_about = KERNEL_LIST_LONG_ABOUT)]
List {
#[arg(long)]
json: bool,
#[arg(long)]
range: Option<String>,
},
Build {
#[arg(conflicts_with_all = ["source", "git"])]
version: Option<String>,
#[arg(long, conflicts_with_all = ["version", "git"])]
source: Option<PathBuf>,
#[arg(long, requires = "git_ref", conflicts_with_all = ["version", "source"])]
git: Option<String>,
#[arg(long = "ref", requires = "git")]
git_ref: Option<String>,
#[arg(long)]
force: bool,
#[arg(long)]
clean: bool,
#[arg(long, help = CPU_CAP_HELP)]
cpu_cap: Option<usize>,
},
Clean {
#[arg(long)]
keep: Option<usize>,
#[arg(long)]
force: bool,
#[arg(long, conflicts_with = "keep")]
corrupt_only: bool,
},
}
pub const KERNEL_HELP_NO_RAW: &str = "Kernel identifier: a source directory \
path (e.g. `../linux`), a version (`6.14.2`, or major.minor prefix \
`6.14` for latest patch), a cache key (see `kernel list`), a \
version range (`6.12..6.14`), or a git source (`git+URL#REF`). Raw \
image files are rejected. Source directories auto-build (can be slow \
on a fresh tree); versions auto-download from kernel.org on cache \
miss. The flag is REPEATABLE on `test`, `coverage`, and `llvm-cov` \
— passing multiple `--kernel` flags fans the gauntlet across every \
resolved kernel; each (test × scenario × topology × flags × kernel) \
tuple becomes a distinct nextest test case so nextest's parallelism, \
retries, and `-E` filtering work natively. Ranges expand to every \
`stable` and `longterm` release inside `[START, END]` inclusive \
(mainline / linux-next dropped). Git sources clone shallow at the \
ref and build once. In contrast, `ktstr shell` accepts a single \
kernel only — pass exactly one `--kernel`.";
pub const KERNEL_HELP_RAW_OK: &str = "Kernel identifier: a source directory \
path (e.g. `../linux`), a raw image file (`bzImage` / `Image`), a \
version (`6.14.2`, or major.minor prefix `6.14` for latest patch), \
or a cache key (see `kernel list`). Source directories auto-build \
(can be slow on a fresh tree); versions auto-download from kernel.org \
on cache miss. When absent, resolves via cache then filesystem, \
falling back to downloading the latest stable kernel. Ranges \
(`START..END`) and git sources (`git+URL#REF`) are not supported \
in this context; pass a single kernel.";
pub const CPU_CAP_HELP: &str = "Reserve exactly N host CPUs for the build or \
no-perf-mode shell. Integer ≥ 1; must be ≤ the calling process's \
sched_getaffinity cpuset size (the allowed CPU count, NOT the \
host's total online CPUs — under a cgroup-restricted runner the \
allowed set is typically smaller). When absent, 30% of the \
allowed CPUs are reserved (minimum 1). The planner walks whole \
LLCs in consolidation- and NUMA-aware order, filtered to the \
allowed cpuset, partial-taking the last LLC so `plan.cpus.len() \
== N` exactly. The flock set may cover more LLCs than strictly \
required (flock coordination is per-LLC even when the last LLC \
is only partially used for the CPU budget). Run `ktstr locks \
--watch 1s` to observe NUMA placement live. Under --cpu-cap, \
make's `-jN` parallelism matches the reserved CPU count and the \
kernel build runs inside a cgroup v2 sandbox that pins gcc/ld \
to the reserved CPUs + NUMA nodes; if the sandbox cannot be \
installed (missing cgroup v2, missing cpuset controller, \
permission denied), the build aborts rather than running \
without enforcement. Mutually exclusive with \
KTSTR_BYPASS_LLC_LOCKS=1. On `ktstr shell`, requires \
--no-perf-mode (perf-mode already holds every LLC exclusively). \
Also settable via KTSTR_CPU_CAP env var (CLI flag wins when both \
are present).";
macro_rules! eol_explanation_literal {
() => {
"(EOL) marks entries whose major.minor series is absent from \
kernel.org's current active releases. Suppressed when the \
active-release list cannot be fetched."
};
}
pub const EOL_EXPLANATION: &str = eol_explanation_literal!();
pub const KERNEL_LIST_LONG_ABOUT: &str = concat!(
eol_explanation_literal!(),
"\n\n",
"--json emits one JSON object with three top-level fields:\n",
"\n",
" current_ktstr_kconfig_hash hex digest of the kconfig fragment the\n",
" running binary was built with, for\n",
" stale-entry detection.\n",
" active_prefixes_fetch_error null on success; error string on\n",
" active-series fetch failure. When\n",
" non-null, every entry's `eol` is false\n",
" regardless of actual support status —\n",
" check this field before trusting `eol`.\n",
" entries array of per-entry objects. Each\n",
" element is either a VALID entry (full\n",
" field set) or a CORRUPT entry (only\n",
" `key`, `path`, `error`). Detect\n",
" corruption by the presence of `error`.\n",
"\n",
"Valid entry fields: key, path, version (nullable), source, arch,\n",
"built_at, ktstr_kconfig_hash (nullable), kconfig_status, eol,\n",
"config_hash (nullable), image_name, image_path, has_vmlinux,\n",
"vmlinux_stripped.\n",
"\n",
" path absolute path to the cache entry DIRECTORY.\n",
" image_path absolute path to the boot image file INSIDE\n",
" that directory. `path` points at the dir, not\n",
" the image — scripts that want the kernel\n",
" artifact to pass to qemu/vm-loaders should\n",
" read `image_path`, not join `path` with a\n",
" hardcoded filename.\n",
" kconfig_status one of \"matches\", \"stale\", \"untracked\"\n",
" (Display form of cache::KconfigStatus).\n",
" source internally-tagged on \"type\":\n",
" {\"type\": \"tarball\"}\n",
" {\"type\": \"git\", \"git_hash\": ?, \"ref\": ?}\n",
" {\"type\": \"local\", \"source_tree_path\": ?,\n",
" \"git_hash\": ?}\n",
" Dispatch on \"type\" before reading variant\n",
" fields.\n",
" eol true iff the entry's major.minor series is absent\n",
" from the active-prefix list. Meaningful only when\n",
" active_prefixes_fetch_error is null. Also false\n",
" whenever version is null (the missing-version\n",
" short-circuit in `entry_is_eol`).\n",
" has_vmlinux true iff the uncompressed vmlinux is cached\n",
" alongside the compressed image (required for\n",
" DWARF-driven probes).\n",
" vmlinux_stripped true iff the cached vmlinux came from a\n",
" successful strip pass. false marks the\n",
" raw-fallback path — a larger on-disk payload\n",
" indicating the strip pipeline errored on this\n",
" kernel; the entry is still usable but the\n",
" fallback is a signal to investigate. Meaningful\n",
" only when has_vmlinux is true (false otherwise).\n",
" config_hash CRC32 of the final merged .config; distinct\n",
" from ktstr_kconfig_hash which covers only the\n",
" ktstr fragment.\n",
"\n",
"When --range is set, the subcommand SWITCHES to range-preview\n",
"mode and emits a structurally different JSON shape — the cache\n",
"is not walked at all, only kernel.org's releases.json is fetched\n",
"to expand the inclusive range. The --json output is one object\n",
"with four top-level fields:\n",
"\n",
" range literal range string supplied to --range\n",
" (e.g. \"6.12..6.14\").\n",
" start parsed start endpoint\n",
" (MAJOR.MINOR[.PATCH][-rcN]).\n",
" end parsed end endpoint, same shape as start.\n",
" versions array of resolved version strings inside\n",
" [start, end] inclusive, ascending by\n",
" (major, minor, patch, rc) tuple. Stable and\n",
" longterm releases only — mainline / linux-next\n",
" are excluded by the moniker filter.\n",
"\n",
"Range-mode output never carries cache metadata\n",
"(no current_ktstr_kconfig_hash, no entries) — to inspect cached\n",
"kernels for one of the resolved versions, run `kernel list`\n",
"without --range. Consumers should dispatch on the presence of\n",
"the `range` key (range mode) versus `entries` key (list mode)\n",
"to branch the parse."
);
pub const DIRTY_TREE_CACHE_SKIP_HINT: &str = "skipping cache — working tree has uncommitted changes; \
commit or stash to enable caching";
pub const NON_GIT_TREE_CACHE_SKIP_HINT: &str = "skipping cache — source tree is not a git repository so dirty \
state cannot be detected; put the source under git, or replace \
`--source` with one of the content-keyed fetch modes that does \
not need dirty-state detection — `kernel build VERSION` \
(downloads the tarball from kernel.org) or \
`kernel build --git URL --ref REF` (shallow-clones the given \
ref) — to enable caching";
pub(crate) fn eol_legend_if_any(any_eol: bool) -> Option<&'static str> {
if any_eol { Some(EOL_EXPLANATION) } else { None }
}
pub const UNTRACKED_KCONFIG_EXPLANATION: &str = "(untracked kconfig) marks entries with no recorded ktstr.kconfig hash \
(pre-dates kconfig hash tracking). Rebuild with: kernel build --force VERSION";
pub(crate) fn untracked_legend_if_any(any_untracked: bool) -> Option<&'static str> {
if any_untracked {
Some(UNTRACKED_KCONFIG_EXPLANATION)
} else {
None
}
}
pub const STALE_KCONFIG_EXPLANATION: &str = "warning: entries marked (stale kconfig) were built against a different ktstr.kconfig. \
Rebuild with: kernel build --force <entry version>";
pub(crate) fn stale_legend_if_any(any_stale: bool) -> Option<&'static str> {
if any_stale {
Some(STALE_KCONFIG_EXPLANATION)
} else {
None
}
}
pub(crate) fn format_corrupt_footer(cache_root: &Path) -> String {
format!(
"warning: entries marked (corrupt) cannot be used — cached metadata is \
missing, malformed, or references a missing image. Inspect the entry \
directory under {} to remove it manually, or run \
`kernel clean --corrupt-only --force` which removes ONLY corrupt \
entries and leaves valid ones intact. For broader cleanup, \
`kernel clean --force` removes ALL cached entries (valid and corrupt \
alike); `kernel clean --keep N --force` preserves the N newest \
cached entries while removing the rest.",
cache_root.display(),
)
}
pub(crate) fn corrupt_footer_if_any(corrupt_count: usize, cache_root: &Path) -> Option<String> {
if corrupt_count == 0 {
return None;
}
let noun = if corrupt_count == 1 {
"entry"
} else {
"entries"
};
let summary = format!(
"{corrupt_count} corrupt {noun}. \
Run `cargo ktstr kernel clean --corrupt-only` to remove.",
);
let detail = format_corrupt_footer(cache_root);
Some(format!("{summary}\n{detail}"))
}
pub const EMBEDDED_KCONFIG: &str = crate::EMBEDDED_KCONFIG;
pub fn embedded_kconfig_hash() -> String {
crate::kconfig_hash()
}
fn version_prefix(version: &str) -> Option<String> {
let (major, rest) = version.split_once('.')?;
let minor_digits: String = rest.chars().take_while(|c| c.is_ascii_digit()).collect();
if minor_digits.is_empty() {
return None;
}
Some(format!("{major}.{minor_digits}"))
}
fn is_eol(version: &str, active_prefixes: &[String]) -> bool {
if active_prefixes.is_empty() {
return false;
}
let Some(prefix) = version_prefix(version) else {
return false;
};
!active_prefixes.iter().any(|p| p == &prefix)
}
pub(crate) fn entry_is_eol(entry: &CacheEntry, active_prefixes: &[String]) -> bool {
let v = entry.metadata.version.as_deref().unwrap_or("-");
v != "-" && is_eol(v, active_prefixes)
}
pub(crate) fn fetch_active_prefixes() -> anyhow::Result<Vec<String>> {
let releases = crate::fetch::cached_releases()?;
Ok(active_prefixes_from_releases(&releases))
}
fn active_prefixes_from_releases(releases: &[crate::fetch::Release]) -> Vec<String> {
let mut prefixes = Vec::new();
for r in releases {
if crate::fetch::is_skippable_release_moniker(&r.moniker) {
continue;
}
if let Some(prefix) = version_prefix(&r.version)
&& !prefixes.contains(&prefix)
{
prefixes.push(prefix);
}
}
prefixes
}
pub fn format_entry_row(
entry: &CacheEntry,
kconfig_hash: &str,
active_prefixes: &[String],
) -> String {
let meta = &entry.metadata;
let version = meta.version.as_deref().unwrap_or("-");
let source = meta.source.to_string();
let mut tags = String::new();
let status = entry.kconfig_status(kconfig_hash);
if !matches!(status, KconfigStatus::Matches) {
tags.push_str(&format!(" ({status} kconfig)"));
}
if entry_is_eol(entry, active_prefixes) {
tags.push_str(" (EOL)");
}
format!(
" {:<48} {:<12} {:<8} {:<7} {}{}",
entry.key, version, source, meta.arch, meta.built_at, tags,
)
}
pub fn kernel_list(json: bool) -> Result<()> {
kernel_list_inner(json, None)
}
pub fn kernel_list_range_preview(json: bool, range: &str) -> Result<()> {
kernel_list_inner(json, Some(range))
}
fn kernel_list_inner(json: bool, range: Option<&str>) -> Result<()> {
if let Some(spec) = range {
return run_kernel_list_range(json, spec);
}
let cache = CacheDir::new()?;
let entries = cache.list()?;
let kconfig_hash = embedded_kconfig_hash();
let (active_prefixes, active_prefixes_fetch_error): (Vec<String>, Option<String>) =
match fetch_active_prefixes() {
Ok(p) => (p, None),
Err(e) => {
let msg = format!("{e:#}");
eprintln!(
"kernel list: failed to fetch active kernel series ({msg}); \
EOL annotation disabled for this run. \
Check that kernel.org is reachable from this host.",
);
(Vec::new(), Some(msg))
}
};
if json {
let json_entries: Vec<serde_json::Value> = entries
.iter()
.map(|e| match e {
crate::cache::ListedEntry::Valid(entry) => {
let meta = &entry.metadata;
let eol = entry_is_eol(entry, &active_prefixes);
let kconfig_status = entry.kconfig_status(&kconfig_hash).to_string();
serde_json::json!({
"key": entry.key,
"path": entry.path.display().to_string(),
"version": meta.version,
"source": meta.source,
"arch": meta.arch,
"built_at": meta.built_at,
"ktstr_kconfig_hash": meta.ktstr_kconfig_hash,
"kconfig_status": kconfig_status,
"eol": eol,
"config_hash": meta.config_hash,
"image_name": meta.image_name,
"image_path": entry.image_path().display().to_string(),
"has_vmlinux": meta.has_vmlinux(),
"vmlinux_stripped": meta.vmlinux_stripped(),
})
}
crate::cache::ListedEntry::Corrupt { key, path, reason } => {
let error_kind = e.error_kind().unwrap_or("unknown");
serde_json::json!({
"key": key,
"path": path.display().to_string(),
"error": reason,
"error_kind": error_kind,
})
}
})
.collect();
let wrapper = serde_json::json!({
"current_ktstr_kconfig_hash": kconfig_hash,
"active_prefixes_fetch_error": active_prefixes_fetch_error,
"entries": json_entries,
});
println!("{}", serde_json::to_string_pretty(&wrapper)?);
return Ok(());
}
eprintln!("cache: {}", cache.root().display());
if entries.is_empty() {
println!("no cached kernels. Run `kernel build` to download and build a kernel.");
return Ok(());
}
println!(
" {:<48} {:<12} {:<8} {:<7} BUILT",
"KEY", "VERSION", "SOURCE", "ARCH"
);
let mut any_stale = false;
let mut any_untracked = false;
let mut any_eol = false;
let mut corrupt_count: usize = 0;
for listed in &entries {
match listed {
crate::cache::ListedEntry::Valid(entry) => {
let status = entry.kconfig_status(&kconfig_hash);
if status.is_stale() {
any_stale = true;
}
if status.is_untracked() {
any_untracked = true;
}
if entry_is_eol(entry, &active_prefixes) {
any_eol = true;
}
println!(
"{}",
format_entry_row(entry, &kconfig_hash, &active_prefixes)
);
}
crate::cache::ListedEntry::Corrupt { key, reason, .. } => {
corrupt_count += 1;
println!(" {key:<48} (corrupt: {reason})");
}
}
}
if let Some(legend) = eol_legend_if_any(any_eol) {
eprintln!("{legend}");
}
if let Some(legend) = untracked_legend_if_any(any_untracked) {
eprintln!("{legend}");
}
if let Some(legend) = stale_legend_if_any(any_stale) {
eprintln!("{legend}");
}
if let Some(footer) = corrupt_footer_if_any(corrupt_count, cache.root()) {
eprintln!("{footer}");
}
Ok(())
}
fn run_kernel_list_range(json: bool, spec: &str) -> Result<()> {
use crate::kernel_path::KernelId;
let id = KernelId::parse(spec);
let (start, end) = match &id {
KernelId::Range { start, end } => (start.clone(), end.clone()),
_ => {
bail!(
"kernel list --range: `{spec}` does not parse as a \
`START..END` range. Expected `MAJOR.MINOR[.PATCH][-rcN]..\
MAJOR.MINOR[.PATCH][-rcN]` (e.g. `6.12..6.14`)."
);
}
};
id.validate()
.map_err(|e| anyhow::anyhow!("kernel list --range {spec}: {e}"))?;
let versions = expand_kernel_range(&start, &end, "kernel list")?;
if json {
let payload = serde_json::json!({
"range": spec,
"start": start,
"end": end,
"versions": versions,
});
println!("{}", serde_json::to_string_pretty(&payload)?);
return Ok(());
}
for v in &versions {
println!("{v}");
}
Ok(())
}
fn partition_clean_candidates(
entries: &[crate::cache::ListedEntry],
keep: Option<usize>,
corrupt_only: bool,
) -> Vec<&crate::cache::ListedEntry> {
let skip = keep.unwrap_or(0);
let mut valid_kept = 0usize;
let mut to_remove: Vec<&crate::cache::ListedEntry> = Vec::new();
for listed in entries {
match listed {
crate::cache::ListedEntry::Valid(_) => {
if corrupt_only {
continue;
}
if valid_kept < skip {
valid_kept += 1;
continue;
}
to_remove.push(listed);
}
crate::cache::ListedEntry::Corrupt { .. } => {
to_remove.push(listed);
}
}
}
to_remove
}
pub fn kernel_clean(keep: Option<usize>, force: bool, corrupt_only: bool) -> Result<()> {
let cache = CacheDir::new()?;
let entries = cache.list()?;
if entries.is_empty() {
println!("nothing to clean");
return Ok(());
}
let kconfig_hash = embedded_kconfig_hash();
let to_remove = partition_clean_candidates(&entries, keep, corrupt_only);
if to_remove.is_empty() {
println!("nothing to clean");
return Ok(());
}
if !force {
use std::io::IsTerminal;
if !std::io::stdin().is_terminal() {
bail!("confirmation requires a terminal. Use --force to skip.");
}
let active_prefixes = match fetch_active_prefixes() {
Ok(p) => p,
Err(e) => {
eprintln!(
"kernel clean: failed to fetch active kernel series ({e:#}); \
EOL annotation disabled for this run. \
Check that kernel.org is reachable from this host."
);
Vec::new()
}
};
println!("the following entries will be removed:");
for listed in &to_remove {
match listed {
crate::cache::ListedEntry::Valid(entry) => {
println!(
"{}",
format_entry_row(entry, &kconfig_hash, &active_prefixes)
);
}
crate::cache::ListedEntry::Corrupt { key, reason, .. } => {
println!(" {key:<48} (corrupt: {reason})");
}
}
}
eprint!("remove {} entries? [y/N] ", to_remove.len());
std::io::stderr().flush()?;
let mut answer = String::new();
std::io::stdin().lock().read_line(&mut answer)?;
if !matches!(answer.trim(), "y" | "Y") {
println!("aborted");
return Ok(());
}
}
let total = to_remove.len();
let mut removed = 0usize;
let mut last_err: Option<String> = None;
for listed in &to_remove {
match std::fs::remove_dir_all(listed.path()) {
Ok(()) => removed += 1,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
removed += 1;
}
Err(e) => {
last_err = Some(format!("remove {}: {e}", listed.key()));
}
}
}
println!("removed {removed} cached kernel(s).");
if let Some(err) = last_err {
bail!("removed {removed} of {total} entries; {err}");
}
Ok(())
}
pub fn run_make(kernel_dir: &Path, args: &[&str]) -> Result<()> {
const RUN_MAKE_TIMEOUT: Duration = Duration::from_secs(30 * 60);
const POLL_INTERVAL: Duration = Duration::from_millis(100);
let child = std::process::Command::new("make")
.args(args)
.current_dir(kernel_dir)
.spawn()
.with_context(|| format!("spawn make {}", args.join(" ")))?;
poll_child_with_timeout(
child,
RUN_MAKE_TIMEOUT,
POLL_INTERVAL,
&format!("make {}", args.join(" ")),
)
}
fn poll_child_with_timeout(
mut child: std::process::Child,
timeout: Duration,
poll_interval: Duration,
label: &str,
) -> Result<()> {
let deadline = std::time::Instant::now() + timeout;
loop {
match child.try_wait() {
Ok(Some(status)) => {
anyhow::ensure!(status.success(), "{label} failed");
return Ok(());
}
Ok(None) => {
if std::time::Instant::now() >= deadline {
let _ = child.kill();
let _ = child.wait();
bail!("{label} timed out after {timeout:?}; child killed");
}
std::thread::sleep(poll_interval);
}
Err(e) => {
let _ = child.kill();
let _ = child.wait();
return Err(e).with_context(|| format!("wait on {label}"));
}
}
}
}
fn all_fragment_lines_present(fragment: &str, config: &str) -> bool {
let existing: std::collections::HashSet<&str> = config.lines().map(str::trim).collect();
fragment
.lines()
.map(str::trim)
.filter(|t| !t.is_empty())
.all(|t| existing.contains(t))
}
pub fn configure_kernel(kernel_dir: &Path, fragment: &str) -> Result<()> {
let config_path = kernel_dir.join(".config");
if !config_path.exists() {
run_make(kernel_dir, &["defconfig"])?;
}
let config_content = std::fs::read_to_string(&config_path)?;
if all_fragment_lines_present(fragment, &config_content) {
return Ok(());
}
let mut config = std::fs::OpenOptions::new()
.append(true)
.open(&config_path)?;
std::io::Write::write_all(&mut config, fragment.as_bytes())?;
run_make(kernel_dir, &["olddefconfig"])?;
Ok(())
}
fn drain_lines_lossy(
mut reader: impl BufRead,
mut on_line: impl FnMut(&str),
) -> std::io::Result<Vec<String>> {
let mut captured = Vec::new();
let mut buf = Vec::new();
loop {
buf.clear();
let n = reader.read_until(b'\n', &mut buf)?;
if n == 0 {
break;
}
let mut slice: &[u8] = &buf;
if let Some(rest) = slice.strip_suffix(b"\n") {
slice = rest;
if let Some(rest) = slice.strip_suffix(b"\r") {
slice = rest;
}
}
let line = String::from_utf8_lossy(slice).into_owned();
on_line(&line);
captured.push(line);
}
Ok(captured)
}
pub fn run_make_with_output(
kernel_dir: &Path,
args: &[&str],
spinner: Option<&Spinner>,
) -> Result<()> {
let (read_fd, write_fd) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)
.context("create pipe for merged make stdout+stderr")?;
let write_fd_err = write_fd
.try_clone()
.context("clone pipe write end for stderr")?;
let mut child = std::process::Command::new("make")
.args(args)
.current_dir(kernel_dir)
.stdout(std::process::Stdio::from(write_fd))
.stderr(std::process::Stdio::from(write_fd_err))
.spawn()
.with_context(|| format!("spawn make {}", args.join(" ")))?;
let reader = std::io::BufReader::new(std::fs::File::from(read_fd));
let captured = match drain_lines_lossy(reader, |line| {
if let Some(sp) = spinner {
sp.println(line);
}
}) {
Ok(v) => v,
Err(e) => {
child.kill().ok();
child.wait().ok();
return Err(e).context("read merged make stdout+stderr");
}
};
let status = child.wait()?;
if !status.success() {
for line in &captured {
eprintln!("{line}");
}
bail!("make {} failed", args.join(" "));
}
Ok(())
}
pub fn make_kernel_with_output(
kernel_dir: &Path,
spinner: Option<&Spinner>,
jobs_override: Option<usize>,
) -> Result<()> {
let nproc = jobs_override.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
});
let args = build_make_args(nproc);
let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
run_make_with_output(kernel_dir, &arg_refs, spinner)
}
pub fn resolve_flags(flag_arg: Option<Vec<String>>) -> Result<Option<Vec<&'static str>>> {
match flag_arg {
Some(fs) => {
let mut resolved = Vec::new();
for f in &fs {
match flags::from_short_name(f) {
Some(name) => resolved.push(name),
None => bail!(
"unknown flag: '{f}'. valid flags: {}",
flags::ALL.join(", "),
),
}
}
Ok(Some(resolved))
}
None => Ok(None),
}
}
pub fn parse_work_type(name: Option<&str>) -> Result<Option<WorkType>> {
match name {
Some(name) => match WorkType::from_name(name) {
Some(wt) => Ok(Some(wt)),
None => bail!(
"unknown work type: '{name}'. valid types: {}",
WorkType::ALL_NAMES.join(", "),
),
},
None => Ok(None),
}
}
pub fn parse_topology_string(topology: &str) -> Result<(u32, u32, u32, u32)> {
let parts: Vec<&str> = topology.split(',').collect();
if parts.len() != 4 {
bail!(
"invalid topology '{topology}': expected 'numa_nodes,llcs,cores,threads' \
(e.g. '1,2,4,1')"
);
}
let fields: [(&str, &str); 4] = [
("numa_nodes", parts[0]),
("llcs", parts[1]),
("cores", parts[2]),
("threads", parts[3]),
];
let mut vals: [u32; 4] = [0; 4];
for (i, (name, raw)) in fields.iter().enumerate() {
vals[i] = raw
.parse::<u32>()
.map_err(|_| anyhow::anyhow!("invalid {name} value: '{raw}'"))?;
}
let [numa_nodes, llcs, cores, threads] = vals;
if numa_nodes == 0 || llcs == 0 || cores == 0 || threads == 0 {
bail!("invalid topology '{topology}': all values must be >= 1");
}
Ok((numa_nodes, llcs, cores, threads))
}
pub fn filter_scenarios<'a>(
scenarios: &'a [Scenario],
filter: Option<&str>,
) -> Result<Vec<&'a Scenario>> {
let refs: Vec<&Scenario> = scenarios
.iter()
.filter(|s| filter.is_none_or(|f| s.name.contains(f)))
.collect();
if refs.is_empty() {
let hint = filter
.and_then(suggest_closest_scenario_name)
.map(|s| format!(" Did you mean `{s}`?"))
.unwrap_or_default();
bail!("no scenarios matched filter.{hint} Run 'ktstr list' to see available scenarios.",);
}
Ok(refs)
}
#[allow(clippy::too_many_arguments)]
pub fn build_run_config(
parent_cgroup: String,
duration: u64,
workers: usize,
active_flags: Option<Vec<&'static str>>,
repro: bool,
probe_stack: Option<String>,
auto_repro: bool,
kernel_dir: Option<String>,
work_type_override: Option<WorkType>,
) -> RunConfig {
RunConfig {
parent_cgroup,
duration: Duration::from_secs(duration),
workers_per_cgroup: workers,
active_flags,
repro,
probe_stack,
auto_repro,
kernel_dir,
work_type_override,
..Default::default()
}
}
pub fn has_sched_ext(kernel_dir: &std::path::Path) -> bool {
let config = kernel_dir.join(".config");
std::fs::read_to_string(config)
.map(|s| s.lines().any(|l| l == "CONFIG_SCHED_CLASS_EXT=y"))
.unwrap_or(false)
}
const VALIDATE_CONFIG_CRITICAL: &[(&str, &str)] = &[
(
"CONFIG_SCHED_CLASS_EXT",
"depends on CONFIG_DEBUG_INFO_BTF — ensure pahole >= 1.16 is installed (dwarves package)",
),
(
"CONFIG_DEBUG_INFO_BTF",
"requires pahole >= 1.16 (dwarves package)",
),
("CONFIG_BPF_SYSCALL", "required for BPF program loading"),
(
"CONFIG_FTRACE",
"gate for all tracing infrastructure — arm64 defconfig disables it, \
silently dropping KPROBE_EVENTS and BPF_EVENTS",
),
(
"CONFIG_KPROBE_EVENTS",
"required for ktstr probe pipeline (depends on FTRACE + KPROBES)",
),
(
"CONFIG_BPF_EVENTS",
"required for BPF kprobe/tracepoint attachment (depends on KPROBE_EVENTS + PERF_EVENTS)",
),
];
pub fn validate_kernel_config(kernel_dir: &std::path::Path) -> Result<()> {
let config_path = kernel_dir.join(".config");
let config = std::fs::read_to_string(&config_path)
.with_context(|| format!("read {}", config_path.display()))?;
let existing: std::collections::HashSet<&str> = config.lines().map(str::trim).collect();
let mut missing = Vec::new();
for &(option, hint) in VALIDATE_CONFIG_CRITICAL {
let enabled = format!("{option}=y");
if !existing.contains(enabled.as_str()) {
missing.push((option, hint));
}
}
if !missing.is_empty() {
let mut msg =
String::from("kernel build completed but critical config options are missing:\n");
for (option, hint) in &missing {
msg.push_str(&format!(" {option} not set — {hint}\n"));
}
msg.push_str(
"\nThe kernel build system silently disables options whose dependencies \
are not met. Install missing tools and rebuild with --force.",
);
bail!("{msg}");
}
Ok(())
}
#[non_exhaustive]
pub struct KernelBuildResult {
pub entry: Option<crate::cache::CacheEntry>,
pub image_path: std::path::PathBuf,
pub post_build_is_dirty: bool,
}
#[derive(Debug)]
pub(crate) struct BuildReservation {
pub(crate) _sandbox: Option<crate::vmm::cgroup_sandbox::BuildSandbox>,
pub(crate) plan: Option<crate::vmm::host_topology::LlcPlan>,
pub(crate) make_jobs: Option<usize>,
}
pub(crate) fn acquire_build_reservation(
cli_label: &str,
cpu_cap: Option<crate::vmm::host_topology::CpuCap>,
) -> Result<BuildReservation> {
let bypass = std::env::var("KTSTR_BYPASS_LLC_LOCKS")
.ok()
.is_some_and(|v| !v.is_empty());
let plan: Option<crate::vmm::host_topology::LlcPlan> = if bypass {
if cpu_cap.is_some() {
anyhow::bail!(
"{cli_label}: --cpu-cap conflicts with KTSTR_BYPASS_LLC_LOCKS=1; \
unset one of them. --cpu-cap is a resource contract; bypass \
disables the contract entirely."
);
}
None
} else if let Ok(host_topo) = crate::vmm::host_topology::HostTopology::from_sysfs() {
let test_topo = crate::topology::TestTopology::from_system()?;
let acquired_plan =
crate::vmm::host_topology::acquire_llc_plan(&host_topo, &test_topo, cpu_cap)?;
crate::vmm::host_topology::warn_if_cross_node_spill(&acquired_plan, &host_topo);
Some(acquired_plan)
} else {
if cpu_cap.is_some() {
anyhow::bail!(
"{cli_label}: --cpu-cap set but host LLC topology unreadable \
from sysfs — cannot enforce the resource budget. Run on a \
host with /sys/devices/system/cpu populated, or drop \
--cpu-cap to build without enforcement."
);
}
tracing::warn!(
"{cli_label}: could not read host LLC topology from sysfs; \
skipping kernel-build LLC reservation. Concurrent perf-mode \
runs on this host will NOT be serialized against this build"
);
None
};
let sandbox: Option<crate::vmm::cgroup_sandbox::BuildSandbox> = match plan.as_ref() {
Some(p) => Some(crate::vmm::cgroup_sandbox::BuildSandbox::try_create(
&p.cpus,
&p.mems,
cpu_cap.is_some(),
)?),
None => None,
};
let make_jobs = plan
.as_ref()
.map(crate::vmm::host_topology::make_jobs_for_plan);
Ok(BuildReservation {
plan,
_sandbox: sandbox,
make_jobs,
})
}
pub(crate) fn acquire_source_tree_lock(
canonical: &Path,
cli_label: &str,
) -> Result<std::os::fd::OwnedFd> {
use anyhow::Context;
let path_hash = crate::fetch::canonical_path_hash(canonical);
let cache = crate::cache::CacheDir::new()
.with_context(|| "open cache root for source-tree lockfile placement")?;
cache
.ensure_lock_dir()
.with_context(|| "create cache `.locks/` subdir for source-tree lock")?;
let lock_path = cache.lock_path(&format!("source-{path_hash}"));
let fd = crate::flock::try_flock(&lock_path, crate::flock::FlockMode::Exclusive)
.with_context(|| format!("acquire source-tree flock {}", lock_path.display()))?
.ok_or_else(|| {
let holders = crate::flock::read_holders(&lock_path).unwrap_or_default();
let holder_text = if holders.is_empty() {
String::new()
} else {
format!("\n{}", crate::flock::format_holder_list(&holders))
};
anyhow::anyhow!(
"{cli_label}: source tree {} is locked by a concurrent ktstr build \
(lockfile {}). Wait for the peer to finish, or run \
`cargo ktstr locks` to identify it.{holder_text}",
canonical.display(),
lock_path.display(),
)
})?;
Ok(fd)
}
pub fn kernel_build_pipeline(
acquired: &crate::fetch::AcquiredSource,
cache: &crate::cache::CacheDir,
cli_label: &str,
clean: bool,
is_local_source: bool,
cpu_cap: Option<crate::vmm::host_topology::CpuCap>,
) -> Result<KernelBuildResult> {
let source_dir = &acquired.source_dir;
let (arch, image_name) = crate::fetch::arch_info();
let BuildReservation {
plan: _plan,
_sandbox,
make_jobs,
} = acquire_build_reservation(cli_label, cpu_cap)?;
let _source_lock = if is_local_source
&& std::env::var("KTSTR_BYPASS_LLC_LOCKS")
.ok()
.is_none_or(|v| v.is_empty())
{
Some(acquire_source_tree_lock(source_dir, cli_label)?)
} else {
None
};
if clean {
if !is_local_source {
eprintln!(
"{cli_label}: --clean is only meaningful with --source (downloaded sources start clean)"
);
} else {
eprintln!("{cli_label}: make mrproper");
run_make(source_dir, &["mrproper"])?;
}
}
if !has_sched_ext(source_dir) {
Spinner::with_progress("Configuring kernel...", "Kernel configured", |_| {
configure_kernel(source_dir, EMBEDDED_KCONFIG)
})?;
}
Spinner::with_progress("Building kernel...", "Kernel built", |sp| {
make_kernel_with_output(source_dir, Some(sp), make_jobs)
})?;
validate_kernel_config(source_dir)?;
if !acquired.is_temp {
Spinner::with_progress(
"Generating compile_commands.json...",
"compile_commands.json generated",
|sp| run_make_with_output(source_dir, &["compile_commands.json"], Some(sp)),
)?;
}
let image_path = crate::kernel_path::find_image_in_dir(source_dir)
.ok_or_else(|| anyhow::anyhow!("no kernel image found in {}", source_dir.display()))?;
let vmlinux_path = source_dir.join("vmlinux");
let vmlinux_ref = if vmlinux_path.exists() {
let orig_mb = std::fs::metadata(&vmlinux_path)
.map(|m| m.len() as f64 / (1024.0 * 1024.0))
.unwrap_or(0.0);
eprintln!("{cli_label}: caching vmlinux ({orig_mb:.0} MB, will be stripped)");
Some(vmlinux_path.as_path())
} else {
eprintln!("{cli_label}: warning: vmlinux not found, BTF will not be cached");
None
};
if acquired.is_dirty {
eprintln!("{cli_label}: kernel built at {}", image_path.display());
let hint = if acquired.is_git {
DIRTY_TREE_CACHE_SKIP_HINT
} else {
NON_GIT_TREE_CACHE_SKIP_HINT
};
eprintln!("{cli_label}: {hint}");
return Ok(KernelBuildResult {
entry: None,
image_path,
post_build_is_dirty: true,
});
}
if is_local_source {
match crate::fetch::inspect_local_source_state(source_dir) {
Ok(post) => {
let hash_changed = post.short_hash
!= acquired
.kernel_source
.as_local_git_hash()
.map(str::to_string);
if post.is_dirty || hash_changed {
eprintln!(
"{cli_label}: source tree changed during build \
(acquire-time dirty={}, post-build dirty={}; \
hash_changed={hash_changed}); skipping cache store \
to avoid recording a stale identity. Re-run after \
the working tree settles to populate the cache.",
acquired.is_dirty, post.is_dirty,
);
return Ok(KernelBuildResult {
entry: None,
image_path,
post_build_is_dirty: true,
});
}
}
Err(e) => {
tracing::warn!(
cli_label = cli_label,
err = %format!("{e:#}"),
"post-build dirty re-check failed; proceeding to cache store",
);
}
}
}
let config_path = source_dir.join(".config");
let config_hash = if config_path.exists() {
let data = std::fs::read(&config_path)?;
Some(format!("{:08x}", crc32fast::hash(&data)))
} else {
None
};
let kconfig_hash = embedded_kconfig_hash();
let source_vmlinux_stat = vmlinux_ref.and_then(|v| {
let stat = std::fs::metadata(v).ok()?;
let mtime_secs = stat.modified().ok().and_then(|t| {
t.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.ok()
.or_else(|| {
std::time::UNIX_EPOCH
.duration_since(t)
.ok()
.map(|d| -(d.as_secs() as i64))
})
})?;
Some((stat.len(), mtime_secs))
});
let mut metadata = crate::cache::KernelMetadata::new(
acquired.kernel_source.clone(),
arch.to_string(),
image_name.to_string(),
crate::test_support::now_iso8601(),
)
.with_version(acquired.version.clone())
.with_config_hash(config_hash)
.with_ktstr_kconfig_hash(Some(kconfig_hash));
if is_local_source && let Some((size, mtime_secs)) = source_vmlinux_stat {
metadata = metadata.with_source_vmlinux_stat(size, mtime_secs);
}
let mut artifacts = crate::cache::CacheArtifacts::new(&image_path);
if let Some(v) = vmlinux_ref {
artifacts = artifacts.with_vmlinux(v);
}
let entry = match cache.store(&acquired.cache_key, &artifacts, &metadata) {
Ok(entry) => {
success(&format!("\u{2713} Kernel cached: {}", acquired.cache_key));
eprintln!("{cli_label}: image: {}", entry.image_path().display());
if crate::remote_cache::is_enabled() {
crate::remote_cache::remote_store(&entry, cli_label);
}
Some(entry)
}
Err(e) => {
warn(&format!("{cli_label}: cache store failed: {e:#}"));
None
}
};
Ok(KernelBuildResult {
entry,
image_path,
post_build_is_dirty: false,
})
}
fn build_make_args(nproc: usize) -> Vec<String> {
vec![format!("-j{nproc}"), "KCFLAGS=-Wno-error".into()]
}
pub fn print_stats_report() -> Option<String> {
let dir = match std::env::var("KTSTR_SIDECAR_DIR") {
Ok(d) if !d.is_empty() => Some(std::path::PathBuf::from(d)),
_ => crate::test_support::newest_run_dir(),
};
let report = dir
.as_deref()
.map(|d| crate::test_support::analyze_sidecars(Some(d)))
.filter(|r| !r.is_empty());
if report.is_none() {
eprintln!("cargo ktstr: no sidecar data found (skipped)");
}
report
}
pub fn list_runs() -> Result<()> {
crate::stats::list_runs()
}
pub fn list_metrics(json: bool) -> Result<String> {
crate::stats::list_metrics(json)
}
pub fn list_values(json: bool, dir: Option<&Path>) -> Result<String> {
crate::stats::list_values(json, dir)
}
pub fn compare_partitions(
filter_a: &RowFilter,
filter_b: &RowFilter,
filter: Option<&str>,
policy: &ComparisonPolicy,
dir: Option<&Path>,
no_average: bool,
) -> Result<i32> {
crate::stats::compare_partitions(filter_a, filter_b, filter, policy, dir, no_average)
}
pub use crate::stats::{AveragedGroup, ComparisonPolicy, RowFilter};
pub fn show_host() -> String {
crate::host_context::collect_host_context().format_human()
}
pub fn restore_sigpipe_default() {
unsafe {
libc::signal(libc::SIGPIPE, libc::SIG_DFL);
}
}
fn suggest_closest_run_key(query: &str, root: &Path) -> Option<String> {
let threshold = std::cmp::max(3, query.len() / 3);
let entries = std::fs::read_dir(root).ok()?;
let mut best: Option<(usize, String)> = None;
for entry in entries.flatten() {
if !crate::test_support::is_run_directory(&entry) {
continue;
}
let name = match entry.file_name().to_str() {
Some(s) => s.to_string(),
None => continue,
};
let d = strsim::levenshtein(query, &name);
if d > threshold {
continue;
}
match best {
Some((best_d, _)) if best_d <= d => continue,
_ => best = Some((d, name)),
}
}
best.map(|(_, name)| name)
}
pub fn show_run_host(run: &str, dir: Option<&Path>) -> Result<String> {
let root: std::path::PathBuf = match dir {
Some(d) => d.to_path_buf(),
None => crate::test_support::runs_root(),
};
let run_dir = root.join(run);
if !run_dir.exists() {
let suggestion = suggest_closest_run_key(run, &root)
.map(|name| format!(" Did you mean `{name}`?"))
.unwrap_or_default();
bail!(
"run '{run}' not found under {}.{suggestion} \
Run `cargo ktstr stats list` to enumerate available run keys.",
root.display(),
);
}
let sidecars = crate::test_support::collect_sidecars(&run_dir);
if sidecars.is_empty() {
bail!("run '{run}' has no sidecar data");
}
let host = sidecars
.iter()
.find_map(|sc| sc.host.as_ref())
.ok_or_else(|| {
anyhow!(
"run '{run}' has {} sidecar(s) but none carries a populated \
host context; this usually means the run predates host-context \
enrichment. Re-run the test to produce a sidecar with the \
current schema.",
sidecars.len(),
)
})?;
Ok(host.format_human())
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum NoneClassification {
Expected,
Actionable,
}
impl NoneClassification {
fn as_str(self) -> &'static str {
match self {
Self::Expected => "expected",
Self::Actionable => "actionable",
}
}
}
struct NoneCatalogEntry {
field: &'static str,
classification: NoneClassification,
causes: &'static [&'static str],
fix: Option<&'static str>,
}
const SIDECAR_NONE_CATALOG: &[NoneCatalogEntry] = &[
NoneCatalogEntry {
field: "scheduler_commit",
classification: NoneClassification::Expected,
causes: &["no SchedulerSpec variant currently exposes a reliable \
commit source — reserved on the schema for future \
enrichment (e.g. --version probe or ELF-note read on \
the resolved scheduler binary)"],
fix: None,
},
NoneCatalogEntry {
field: "project_commit",
classification: NoneClassification::Actionable,
causes: &[
"current_dir() could not be resolved at sidecar-write \
time (process cwd was rmdir'd while alive)",
"test process cwd was not inside any git repository",
"HEAD could not be read (unborn HEAD on a fresh \
`git init` with zero commits, or a corrupt repository)",
],
fix: Some(
"run from inside a git-tracked source tree with at \
least one commit",
),
},
NoneCatalogEntry {
field: "payload",
classification: NoneClassification::Expected,
causes: &["test declared no binary payload (scheduler-only test \
or pure-scenario test that never invokes \
ctx.payload(...))"],
fix: None,
},
NoneCatalogEntry {
field: "monitor",
classification: NoneClassification::Actionable,
causes: &[
"host-only test path: monitor loop never started",
"early VM failure: monitor loop terminated before \
producing samples",
"sample collection produced no valid data",
],
fix: None,
},
NoneCatalogEntry {
field: "kvm_stats",
classification: NoneClassification::Actionable,
causes: &[
"host-only test path: VM did not run",
"KVM stats were unavailable on this host (e.g. KVM \
module not loaded, /dev/kvm permissions, or kernel \
missing the stats interface)",
],
fix: None,
},
NoneCatalogEntry {
field: "kernel_version",
classification: NoneClassification::Actionable,
causes: &[
"host-only test path: no kernel under test",
"neither cache metadata nor `include/config/kernel.release` \
yielded a version string",
],
fix: None,
},
NoneCatalogEntry {
field: "kernel_commit",
classification: NoneClassification::Actionable,
causes: &[
"KTSTR_KERNEL is unset or empty",
"kernel source is a Tarball or Git transient cache \
entry (no on-disk source tree to probe)",
"resolved kernel directory is not a git repository \
(gix::open failed)",
"HEAD cannot be read (unborn HEAD on a fresh `git init` \
with zero commits)",
"gix probe failed for another reason — metadata, not \
a gate",
],
fix: Some(
"set KTSTR_KERNEL to a local kernel source tree that \
is a git repository (e.g. a git clone of the kernel)",
),
},
NoneCatalogEntry {
field: "host",
classification: NoneClassification::Actionable,
causes: &[
"test-fixture path: not the production sidecar \
writer (production writers always populate `host`)",
"pre-enrichment archive: sidecar predates the \
host-context landing — re-run the test to \
regenerate under the current schema",
],
fix: Some(
"for pre-enrichment archives, re-run the test to \
regenerate under the current schema; test-fixture \
sidecars are not production runs and cannot be \
recovered by re-running",
),
},
NoneCatalogEntry {
field: "cleanup_duration_ms",
classification: NoneClassification::Actionable,
causes: &[
"host-only / host-only-stub test path: no VM teardown \
window to time",
"run was killed by the watchdog before \
`KtstrVm::collect_results` returned",
],
fix: None,
},
NoneCatalogEntry {
field: "run_source",
classification: NoneClassification::Actionable,
causes: &["pre-rename archive: sidecar carries the old `source` \
key which the current schema drops as an unknown \
field, leaving `run_source` to fall back to None via \
serde's tolerate-absence rule. Re-run the test to \
regenerate under the new schema, or rename the key \
in-place before deserialize"],
fix: Some(
"re-run the test to regenerate, or rename the on-disk \
`source` key to `run_source`",
),
},
];
fn project_optional_fields(sc: &crate::test_support::SidecarResult) -> [(&'static str, bool); 10] {
[
("scheduler_commit", sc.scheduler_commit.is_some()),
("project_commit", sc.project_commit.is_some()),
("payload", sc.payload.is_some()),
("monitor", sc.monitor.is_some()),
("kvm_stats", sc.kvm_stats.is_some()),
("kernel_version", sc.kernel_version.is_some()),
("kernel_commit", sc.kernel_commit.is_some()),
("host", sc.host.is_some()),
("cleanup_duration_ms", sc.cleanup_duration_ms.is_some()),
("run_source", sc.run_source.is_some()),
]
}
struct WalkStats {
walked: usize,
valid: usize,
errors: Vec<crate::test_support::SidecarParseError>,
io_errors: Vec<crate::test_support::SidecarIoError>,
}
fn count_sidecar_files(run_dir: &Path) -> usize {
let mut count = 0usize;
let entries = match std::fs::read_dir(run_dir) {
Ok(e) => e,
Err(_) => return 0,
};
let mut subdirs = Vec::new();
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() {
subdirs.push(path);
continue;
}
if crate::test_support::is_sidecar_filename(&path) {
count += 1;
}
}
for sub in subdirs {
if let Ok(entries) = std::fs::read_dir(&sub) {
for entry in entries.flatten() {
if crate::test_support::is_sidecar_filename(&entry.path()) {
count += 1;
}
}
}
}
count
}
fn walk_run_with_stats(run_dir: &Path) -> (Vec<crate::test_support::SidecarResult>, WalkStats) {
let walked = count_sidecar_files(run_dir);
let (sidecars, errors, io_errors) = crate::test_support::collect_sidecars_with_errors(run_dir);
let valid = sidecars.len();
(
sidecars,
WalkStats {
walked,
valid,
errors,
io_errors,
},
)
}
pub fn explain_sidecar(run: &str, dir: Option<&Path>, json: bool) -> Result<String> {
if run.is_empty() {
bail!(
"run argument must not be empty. The run argument is \
joined onto the run-root via `Path::join` and must \
contain at least one `Normal` path component — i.e. \
must not be empty, `.`, `..`, or absolute (e.g. a \
typical run key shape: `6.14-abc1234` or \
`6.14-abc1234-dirty`). To point at a different pool \
root, use `--dir`. Run `cargo ktstr stats list` to \
enumerate available run keys.",
);
}
for component in std::path::Path::new(run).components() {
match component {
std::path::Component::CurDir
| std::path::Component::ParentDir
| std::path::Component::RootDir
| std::path::Component::Prefix(_) => {
bail!(
"run '{run}' contains pool-root-aliasing or \
path-traversal components (`.`, `..`, or absolute \
path). The run argument is joined onto the \
run-root via `Path::join` and must contain only \
`Normal` path components — no `.`, `..`, or \
absolute prefix (e.g. a typical run key shape: \
`6.14-abc1234` or `6.14-abc1234-dirty`; \
multi-component paths like `gauntlet/job-1` are \
also accepted). To point at a different pool \
root, use `--dir`. Run `cargo ktstr stats list` \
to enumerate available run keys.",
);
}
std::path::Component::Normal(_) => {}
}
}
let root: std::path::PathBuf = match dir {
Some(d) => d.to_path_buf(),
None => crate::test_support::runs_root(),
};
let run_dir = root.join(run);
if !run_dir.exists() {
let suggestion = suggest_closest_run_key(run, &root)
.map(|name| format!(" Did you mean `{name}`?"))
.unwrap_or_default();
bail!(
"run '{run}' not found under {}.{suggestion} \
Run `cargo ktstr stats list` to enumerate available run keys.",
root.display(),
);
}
let (sidecars, walk_stats) = walk_run_with_stats(&run_dir);
if walk_stats.walked == 0 {
bail!(
"run '{run}' has no sidecar data (searched {})",
run_dir.display(),
);
}
if json {
Ok(render_explain_sidecar_json(&sidecars, &walk_stats))
} else {
Ok(render_explain_sidecar_text(&sidecars, &walk_stats))
}
}
fn render_explain_sidecar_text(
sidecars: &[crate::test_support::SidecarResult],
walk_stats: &WalkStats,
) -> String {
use std::fmt::Write as _;
let mut sorted: Vec<&crate::test_support::SidecarResult> = sidecars.iter().collect();
sorted.sort_by(|a, b| {
a.test_name
.cmp(&b.test_name)
.then_with(|| a.run_id.cmp(&b.run_id))
});
let mut out = String::new();
let _ = writeln!(
out,
"walked {} sidecar file(s), parsed {} valid\n",
walk_stats.walked, walk_stats.valid,
);
for sc in &sorted {
let _ = writeln!(out, "test: {}", sc.test_name);
let _ = writeln!(out, " topology: {}", sc.topology);
let _ = writeln!(out, " scheduler: {}", sc.scheduler);
let _ = writeln!(out, " run_id: {}", sc.run_id);
let arch = sc
.host
.as_ref()
.and_then(|h| h.arch.as_deref())
.unwrap_or("-");
let _ = writeln!(out, " arch: {arch}");
let projected = project_optional_fields(sc);
let populated: Vec<&'static str> = projected
.iter()
.filter(|(_, b)| *b)
.map(|(n, _)| *n)
.collect();
let none_fields: Vec<&'static str> = projected
.iter()
.filter(|(_, b)| !*b)
.map(|(n, _)| *n)
.collect();
let populated_text = if populated.is_empty() {
"<none>".to_string()
} else {
populated.join(", ")
};
let _ = writeln!(
out,
" populated optional fields ({}): {populated_text}",
populated.len(),
);
if none_fields.is_empty() {
let _ = writeln!(out, " none fields: <all populated>\n");
continue;
}
let _ = writeln!(out, " none fields ({}):", none_fields.len());
for field in none_fields {
let entry = SIDECAR_NONE_CATALOG
.iter()
.find(|e| e.field == field)
.expect(
"catalog must cover every projected field — \
guarded by none_catalog_covers_every_option_field",
);
let _ = writeln!(
out,
" {} [{}]",
entry.field,
entry.classification.as_str(),
);
for cause in entry.causes {
let _ = writeln!(out, " - {cause}");
}
if let Some(fix) = entry.fix {
let _ = writeln!(out, " fix: {fix}");
}
}
out.push('\n');
}
if !walk_stats.errors.is_empty() {
let _ = writeln!(out, "corrupt sidecars ({}):", walk_stats.errors.len());
for err in &walk_stats.errors {
let _ = writeln!(out, " {}", err.path.display());
let _ = writeln!(out, " error: {}", err.raw_error);
if let Some(prose) = &err.enriched_message {
let _ = writeln!(out, " enriched: {prose}");
}
}
out.push('\n');
}
if !walk_stats.io_errors.is_empty() {
let _ = writeln!(out, "io errors ({}):", walk_stats.io_errors.len());
for err in &walk_stats.io_errors {
let _ = writeln!(out, " {}", err.path.display());
let _ = writeln!(out, " error: {}", err.raw_error);
}
out.push('\n');
}
out
}
const EXPLAIN_SIDECAR_SCHEMA_VERSION: &str = "1";
#[derive(serde::Serialize)]
struct ExplainOutput<'a> {
_schema_version: &'a str,
_walk: WalkStatsJson<'a>,
fields: std::collections::BTreeMap<&'a str, FieldDiagnostic<'a>>,
}
#[derive(serde::Serialize)]
struct WalkStatsJson<'a> {
walked: usize,
valid: usize,
errors: Vec<WalkError<'a>>,
io_errors: Vec<WalkIoError<'a>>,
}
#[derive(serde::Serialize)]
struct WalkError<'a> {
path: String,
error: &'a str,
enriched_message: Option<&'a str>,
}
#[derive(serde::Serialize)]
struct WalkIoError<'a> {
path: String,
error: &'a str,
}
#[derive(serde::Serialize)]
struct FieldDiagnostic<'a> {
none_count: usize,
some_count: usize,
classification: &'a str,
causes: &'a [&'a str],
fix: Option<&'a str>,
}
fn render_explain_sidecar_json(
sidecars: &[crate::test_support::SidecarResult],
walk_stats: &WalkStats,
) -> String {
let fields: std::collections::BTreeMap<&str, FieldDiagnostic<'_>> = SIDECAR_NONE_CATALOG
.iter()
.map(|entry| {
let none_count = sidecars
.iter()
.filter(|sc| {
project_optional_fields(sc)
.iter()
.any(|(n, b)| *n == entry.field && !*b)
})
.count();
let some_count = sidecars.len().saturating_sub(none_count);
(
entry.field,
FieldDiagnostic {
none_count,
some_count,
classification: entry.classification.as_str(),
causes: entry.causes,
fix: entry.fix,
},
)
})
.collect();
let errors: Vec<WalkError<'_>> = walk_stats
.errors
.iter()
.map(|err| WalkError {
path: err.path.display().to_string(),
error: &err.raw_error,
enriched_message: err.enriched_message.as_deref(),
})
.collect();
let io_errors: Vec<WalkIoError<'_>> = walk_stats
.io_errors
.iter()
.map(|err| WalkIoError {
path: err.path.display().to_string(),
error: &err.raw_error,
})
.collect();
let output = ExplainOutput {
_schema_version: EXPLAIN_SIDECAR_SCHEMA_VERSION,
_walk: WalkStatsJson {
walked: walk_stats.walked,
valid: walk_stats.valid,
errors,
io_errors,
},
fields,
};
serde_json::to_string_pretty(&output).expect(
"static-shape JSON serialization is infallible — every \
field in ExplainOutput / WalkStatsJson / WalkError / WalkIoError / \
FieldDiagnostic is a primitive, &str, or Vec/BTreeMap \
of those — no NaN, no non-string keys, no unsupported \
types",
)
}
fn suggest_closest_test_name(query: &str) -> Option<&'static str> {
let threshold = std::cmp::max(3, query.len() / 3);
let mut best: Option<(usize, &'static str)> = None;
for entry in crate::test_support::KTSTR_TESTS.iter() {
let d = strsim::levenshtein(query, entry.name);
if d > threshold {
continue;
}
match best {
Some((best_d, _)) if best_d <= d => continue,
_ => best = Some((d, entry.name)),
}
}
best.map(|(_, name)| name)
}
fn suggest_closest_scenario_name(query: &str) -> Option<&'static str> {
let threshold = std::cmp::max(3, query.len() / 3);
let mut best: Option<(usize, &'static str)> = None;
for s in crate::scenario::all_scenarios() {
let d = strsim::levenshtein(query, s.name);
if d > threshold {
continue;
}
match best {
Some((best_d, _)) if best_d <= d => continue,
_ => best = Some((d, s.name)),
}
}
best.map(|(_, name)| name)
}
pub fn scenario_filter_hint(filter: &str) -> Option<String> {
suggest_closest_scenario_name(filter).map(|s| format!(" Did you mean `{s}`?"))
}
pub fn show_thresholds(test_name: &str) -> Result<String> {
let entry = crate::test_support::find_test(test_name).ok_or_else(|| {
let suggestion = suggest_closest_test_name(test_name)
.map(|s| format!(" Did you mean `{s}`?"))
.unwrap_or_default();
anyhow!(
"no registered ktstr test named '{test_name}'.{suggestion} \
Run `cargo nextest list` to see the available test names \
— then pass just the function-name component to \
`show-thresholds`, not the `<binary>::` prefix that \
nextest prepends to each line."
)
})?;
let merged = crate::assert::Assert::default_checks()
.merge(entry.scheduler.assert())
.merge(&entry.assert);
let mut out = format!("Test: {}\n", entry.name);
out.push_str(&format!(
"Scheduler: {}\n",
entry.scheduler.scheduler_name(),
));
out.push_str("Resolved assertion thresholds:\n");
out.push_str(&merged.format_human());
Ok(out)
}
pub fn check_kvm() -> Result<()> {
use std::path::Path;
if !Path::new("/dev/kvm").exists() {
bail!(
"/dev/kvm not found. KVM requires:\n \
- Linux kernel with KVM support (CONFIG_KVM)\n \
- Access to /dev/kvm (check permissions or add user to 'kvm' group)\n \
- Hardware virtualization enabled in BIOS (VT-x/AMD-V)"
);
}
if let Err(e) = std::fs::File::open("/dev/kvm") {
if e.kind() == std::io::ErrorKind::PermissionDenied {
bail!(
"/dev/kvm: permission denied. Add your user to the 'kvm' group:\n \
sudo usermod -aG kvm $USER\n \
then log out and back in."
);
}
bail!("/dev/kvm: {e}");
}
Ok(())
}
pub fn default_cleanup_parents() -> Vec<std::path::PathBuf> {
let root = std::path::Path::new("/sys/fs/cgroup");
let entries = match std::fs::read_dir(root) {
Ok(e) => e,
Err(_) => return Vec::new(),
};
let mut out = Vec::new();
for entry in entries.flatten() {
let Ok(ty) = entry.file_type() else { continue };
if !ty.is_dir() {
continue;
}
let name = entry.file_name();
let Some(name) = name.to_str() else { continue };
if name == "ktstr" {
out.push(entry.path());
continue;
}
if let Some(pid_str) = name.strip_prefix("ktstr-")
&& !pid_str.is_empty()
&& pid_str.bytes().all(|b| b.is_ascii_digit())
{
if is_ktstr_pid_alive(pid_str) {
eprintln!("ktstr: skipping {} (live process)", entry.path().display());
continue;
}
out.push(entry.path());
}
}
out.sort();
out
}
pub fn is_ktstr_pid_alive(pid: &str) -> bool {
let comm_path = format!("/proc/{pid}/comm");
let Ok(comm) = std::fs::read_to_string(&comm_path) else {
return false;
};
let comm = comm.trim();
comm == "ktstr" || comm == "cargo-ktstr"
}
pub fn cleanup(parent_cgroup: Option<String>) -> Result<()> {
use crate::cgroup::CgroupManager;
match parent_cgroup {
Some(path) => {
if !std::path::Path::new(&path).exists() {
bail!("cgroup path not found: {path}");
}
let cgroups = CgroupManager::new(&path);
cgroups.cleanup_all()?;
println!("cleaned up {path}");
}
None => {
let parents = default_cleanup_parents();
if parents.is_empty() {
println!("no leftover cgroups found");
} else {
for path in parents {
let cgroups = CgroupManager::new(path.to_str().unwrap_or_default());
if let Err(e) = cgroups.cleanup_all() {
eprintln!("ktstr: cleanup_all failed on {}: {e}", path.display());
continue;
}
match std::fs::remove_dir(&path) {
Ok(()) => println!("cleaned up {}", path.display()),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
println!("cleaned up {}", path.display());
}
Err(e) => {
eprintln!("ktstr: failed to remove {}: {e}", path.display());
}
}
}
}
}
}
Ok(())
}
pub fn resolve_kernel_parallelism() -> usize {
if let Ok(raw) = std::env::var(crate::KTSTR_KERNEL_PARALLELISM_ENV) {
let trimmed = raw.trim();
match trimmed.parse::<usize>() {
Ok(n) if n > 0 => return n,
_ => {
tracing::warn!(
env_var = crate::KTSTR_KERNEL_PARALLELISM_ENV,
value = %raw,
"KTSTR_KERNEL_PARALLELISM={raw:?} failed to parse, using default",
);
}
}
}
std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
}
fn resolve_in_path(name: &std::path::Path) -> Option<std::path::PathBuf> {
use std::os::unix::fs::PermissionsExt;
let path_var = std::env::var_os("PATH")?;
for dir in std::env::split_paths(&path_var) {
let candidate = dir.join(name);
if let Ok(meta) = std::fs::metadata(&candidate)
&& meta.is_file()
&& meta.permissions().mode() & 0o111 != 0
{
return Some(candidate);
}
}
None
}
pub fn resolve_include_files(
paths: &[std::path::PathBuf],
) -> Result<Vec<(String, std::path::PathBuf)>> {
use std::path::{Component, PathBuf};
let mut resolved_includes: Vec<(String, PathBuf)> = Vec::new();
for path in paths {
let is_explicit_path = {
matches!(
path.components().next(),
Some(Component::RootDir | Component::CurDir | Component::ParentDir)
) || path.components().count() > 1
};
let resolved = if is_explicit_path {
anyhow::ensure!(
path.exists(),
"--include-files path not found: {}",
path.display()
);
path.clone()
} else {
if path.exists() {
path.clone()
} else {
resolve_in_path(path).ok_or_else(|| {
anyhow::anyhow!("-i {}: not found in filesystem or PATH", path.display())
})?
}
};
if resolved.is_dir() {
let dir_name = resolved
.file_name()
.ok_or_else(|| {
anyhow::anyhow!("include directory has no name: {}", resolved.display())
})?
.to_string_lossy()
.to_string();
let prefix = format!("include-files/{dir_name}");
let mut count = 0usize;
for entry in walkdir::WalkDir::new(&resolved).follow_links(true) {
let entry = entry.map_err(|e| anyhow::anyhow!("-i {}: {e}", resolved.display()))?;
if !entry.file_type().is_file() {
continue;
}
let rel = entry
.path()
.strip_prefix(&resolved)
.expect("walkdir entry is under root");
let archive_path = format!("{prefix}/{}", rel.display());
resolved_includes.push((archive_path, entry.into_path()));
count += 1;
}
if count == 0 {
eprintln!(
"warning: -i {}: directory contains no regular files",
resolved.display()
);
}
} else {
let file_name = resolved
.file_name()
.ok_or_else(|| {
anyhow::anyhow!("include file has no filename: {}", resolved.display())
})?
.to_string_lossy();
let archive_path = format!("include-files/{file_name}");
resolved_includes.push((archive_path, resolved));
}
}
let mut seen = std::collections::HashMap::<&str, &std::path::Path>::new();
for (archive_path, host_path) in &resolved_includes {
if let Some(prev) = seen.insert(archive_path.as_str(), host_path.as_path()) {
anyhow::bail!(
"duplicate include path '{}': provided by both {} and {}",
archive_path,
prev.display(),
host_path.display(),
);
}
}
Ok(resolved_includes)
}
pub fn cache_lookup(
cache: &crate::cache::CacheDir,
cache_key: &str,
cli_label: &str,
) -> Option<crate::cache::CacheEntry> {
if let Some(entry) = cache.lookup(cache_key) {
return Some(entry);
}
if crate::remote_cache::is_enabled() {
return crate::remote_cache::remote_lookup(cache, cache_key, cli_label);
}
None
}
pub fn resolve_cached_kernel(
id: &crate::kernel_path::KernelId,
cli_label: &str,
) -> Result<std::path::PathBuf> {
use crate::kernel_path::KernelId;
match id {
KernelId::Version(ver) => {
let resolved = if crate::fetch::is_major_minor_prefix(ver) {
crate::fetch::fetch_version_for_prefix(
crate::fetch::shared_client(),
ver,
cli_label,
)?
} else {
ver.clone()
};
let cache = crate::cache::CacheDir::new()?;
let (arch, _) = crate::fetch::arch_info();
let cache_key = format!("{resolved}-tarball-{arch}-kc{}", crate::cache_key_suffix());
if let Some(entry) = cache_lookup(&cache, &cache_key, cli_label) {
return Ok(entry.path);
}
download_and_cache_version(&resolved, cli_label, None)
}
KernelId::CacheKey(key) => {
let cache = crate::cache::CacheDir::new()?;
if let Some(entry) = cache_lookup(&cache, key, cli_label) {
return Ok(entry.path);
}
bail!(
"cache key {key} not found. \
Run `{cli_label} kernel list` to see available entries."
)
}
KernelId::Path(_) => bail!("resolve_cached_kernel called with Path variant"),
KernelId::Range { .. } | KernelId::Git { .. } => {
id.validate()
.map_err(|e| anyhow::anyhow!("--kernel {id}: {e}"))?;
bail!(
"--kernel {id}: kernel ranges and git sources are not \
yet supported in this context — use a single kernel \
version, cache key, or path"
)
}
}
}
pub struct KernelResolvePolicy<'a> {
pub accept_raw_image: bool,
pub cli_label: &'a str,
}
pub fn resolve_kernel_image(
kernel: Option<&str>,
policy: &KernelResolvePolicy<'_>,
) -> Result<std::path::PathBuf> {
use crate::kernel_path::KernelId;
if let Some(val) = kernel {
match KernelId::parse(val) {
KernelId::Path(p) => {
let path = std::path::PathBuf::from(&p);
if path.is_dir() {
resolve_kernel_dir(&path, policy.cli_label, None)
} else if path.is_file() {
if policy.accept_raw_image {
Ok(path)
} else {
bail!(
"--kernel {}: raw image files are not supported. \
Pass a source directory, version, or cache key.",
path.display()
)
}
} else {
bail!("kernel path not found: {}", path.display())
}
}
id @ (KernelId::Version(_) | KernelId::CacheKey(_)) => {
let cache_dir = resolve_cached_kernel(&id, policy.cli_label)?;
crate::kernel_path::find_image_in_dir(&cache_dir).ok_or_else(|| {
anyhow::anyhow!("no kernel image found in {}", cache_dir.display())
})
}
id @ (KernelId::Range { .. } | KernelId::Git { .. }) => {
id.validate()
.map_err(|e| anyhow::anyhow!("--kernel {val}: {e}"))?;
bail!(
"--kernel {val}: kernel ranges and git sources are not \
yet supported in this context — use a single kernel \
version, cache key, or path"
)
}
}
} else {
match crate::find_kernel()? {
Some(image) => Ok(image),
None => auto_download_kernel(policy.cli_label),
}
}
}
pub fn auto_download_kernel(cli_label: &str) -> Result<std::path::PathBuf> {
status(&format!(
"{cli_label}: no kernel found, downloading latest stable"
));
let sp = Spinner::start("Fetching latest kernel version...");
let ver = crate::fetch::fetch_latest_stable_version(crate::fetch::shared_client(), cli_label)?;
sp.finish(format!("Latest stable: {ver}"));
let cache_dir = download_and_cache_version(&ver, cli_label, None)?;
let (_, image_name) = crate::fetch::arch_info();
Ok(cache_dir.join(image_name))
}
pub fn download_and_cache_version(
version: &str,
cli_label: &str,
cpu_cap: Option<crate::vmm::host_topology::CpuCap>,
) -> Result<std::path::PathBuf> {
let (arch, _) = crate::fetch::arch_info();
let cache_key = format!("{version}-tarball-{arch}-kc{}", crate::cache_key_suffix());
if let Ok(cache) = crate::cache::CacheDir::new()
&& let Some(entry) = cache_lookup(&cache, &cache_key, cli_label)
{
return Ok(entry.path);
}
let tmp_dir = tempfile::TempDir::new()?;
let sp = Spinner::start("Downloading kernel...");
let acquired = crate::fetch::download_tarball(
crate::fetch::shared_client(),
version,
tmp_dir.path(),
cli_label,
)?;
sp.finish("Downloaded");
let cache = crate::cache::CacheDir::new()?;
let result = kernel_build_pipeline(&acquired, &cache, cli_label, false, false, cpu_cap)?;
match result.entry {
Some(entry) => Ok(entry.path),
None => bail!(
"kernel built but cache store failed — cannot return image from temporary directory"
),
}
}
pub fn expand_kernel_range(start: &str, end: &str, cli_label: &str) -> Result<Vec<String>> {
use crate::kernel_path::decompose_version_for_compare;
let start_key = decompose_version_for_compare(start).ok_or_else(|| {
anyhow!(
"kernel range start `{start}` is not a parseable version. \
Endpoints must match `MAJOR.MINOR[.PATCH][-rcN]`."
)
})?;
let end_key = decompose_version_for_compare(end).ok_or_else(|| {
anyhow!(
"kernel range end `{end}` is not a parseable version. \
Endpoints must match `MAJOR.MINOR[.PATCH][-rcN]`."
)
})?;
eprintln!("{cli_label}: expanding kernel range {start}..{end}");
let releases = crate::fetch::cached_releases()?;
let versions = filter_and_sort_range(&releases, start_key, end_key);
if versions.is_empty() {
bail!(
"kernel range {start}..{end} expanded to 0 stable releases. \
releases.json has no `stable` or `longterm` rows in this \
interval — verify the endpoints, or use a single \
`--kernel <version>` if you want a pre-release or \
archived version."
);
}
eprintln!(
"{cli_label}: range expanded to {n} kernel(s): {list}",
n = versions.len(),
list = versions.join(", "),
);
Ok(versions)
}
fn filter_and_sort_range(
releases: &[crate::fetch::Release],
start_key: (u64, u64, u64, u64),
end_key: (u64, u64, u64, u64),
) -> Vec<String> {
use crate::kernel_path::decompose_version_for_compare;
let mut selected: Vec<(String, (u64, u64, u64, u64))> = Vec::new();
for r in releases {
if r.moniker != "stable" && r.moniker != "longterm" {
continue;
}
let Some(key) = decompose_version_for_compare(&r.version) else {
continue;
};
if key < start_key || key > end_key {
continue;
}
selected.push((r.version.clone(), key));
}
selected.sort_by_key(|s| s.1);
selected.into_iter().map(|(v, _)| v).collect()
}
pub fn resolve_git_kernel(url: &str, git_ref: &str, cli_label: &str) -> Result<std::path::PathBuf> {
let tmp_dir = tempfile::TempDir::new()?;
let acquired = crate::fetch::git_clone(url, git_ref, tmp_dir.path(), cli_label)?;
let cache = crate::cache::CacheDir::new()?;
if let Some(entry) = cache_lookup(&cache, &acquired.cache_key, cli_label) {
return Ok(entry.path);
}
let result = kernel_build_pipeline(&acquired, &cache, cli_label, false, false, None)?;
match result.entry {
Some(entry) => Ok(entry.path),
None => bail!(
"kernel built from git+{url}#{git_ref} but cache store failed — \
cannot return image from temporary directory"
),
}
}
#[derive(Debug, Clone)]
pub struct KernelDirCacheHit {
pub cache_key: String,
pub built_at: String,
}
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct KernelDirOutcome {
pub dir: std::path::PathBuf,
pub cache_hit: Option<KernelDirCacheHit>,
pub is_dirty: bool,
}
pub fn resolve_kernel_dir_to_entry(
path: &std::path::Path,
cli_label: &str,
cpu_cap: Option<crate::vmm::host_topology::CpuCap>,
) -> Result<KernelDirOutcome> {
let acquired = acquire_local_source_tree(path)?;
let cache_key = acquired.cache_key.clone();
let is_dirty = acquired.is_dirty;
let cache = crate::cache::CacheDir::new()?;
if !is_dirty && let Some(entry) = cache_lookup(&cache, &cache_key, cli_label) {
if entry.image_path().exists() {
let hit = KernelDirCacheHit {
cache_key: cache_key.clone(),
built_at: entry.metadata.built_at.clone(),
};
return Ok(KernelDirOutcome {
dir: entry.path,
cache_hit: Some(hit),
is_dirty: false,
});
}
}
let result = kernel_build_pipeline(&acquired, &cache, cli_label, false, true, cpu_cap)?;
let dir = match result.entry {
Some(entry) => entry.path,
None => acquired.source_dir,
};
Ok(KernelDirOutcome {
dir,
cache_hit: None,
is_dirty: is_dirty || result.post_build_is_dirty,
})
}
pub fn resolve_kernel_dir(
path: &std::path::Path,
cli_label: &str,
cpu_cap: Option<crate::vmm::host_topology::CpuCap>,
) -> Result<std::path::PathBuf> {
let acquired = acquire_local_source_tree(path)?;
let cache_key = acquired.cache_key.clone();
let cache = crate::cache::CacheDir::new()?;
if !acquired.is_dirty
&& let Some(entry) = cache_lookup(&cache, &cache_key, cli_label)
{
let image = entry.image_path();
if image.exists() {
success(&format!("{cli_label}: using cached kernel {cache_key}"));
return Ok(image);
}
}
let result = kernel_build_pipeline(&acquired, &cache, cli_label, false, true, cpu_cap)?;
match result.entry {
Some(entry) => Ok(entry.image_path()),
None => Ok(result.image_path),
}
}
fn acquire_local_source_tree(path: &std::path::Path) -> Result<crate::fetch::AcquiredSource> {
let is_source_tree = path.join("Makefile").exists() && path.join("Kconfig").exists();
if !is_source_tree {
bail!(
"no kernel image found in {} (not a kernel source tree — \
missing Makefile or Kconfig)",
path.display()
);
}
crate::fetch::local_source(path).map_err(|e| anyhow::anyhow!("{e}"))
}
pub fn stderr_color() -> bool {
use std::io::IsTerminal;
static COLOR: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
*COLOR.get_or_init(|| std::io::stderr().is_terminal())
}
pub fn stdout_color() -> bool {
use std::io::IsTerminal;
static COLOR: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
*COLOR.get_or_init(|| std::io::stdout().is_terminal())
}
pub fn new_table() -> comfy_table::Table {
use comfy_table::{ContentArrangement, Table, presets::NOTHING};
let mut t = Table::new();
t.load_preset(NOTHING);
t.set_content_arrangement(ContentArrangement::Disabled);
if !stdout_color() {
t.force_no_tty();
}
t
}
pub fn new_wrapped_table() -> comfy_table::Table {
use comfy_table::{ContentArrangement, Table, presets::NOTHING};
let mut t = Table::new();
t.load_preset(NOTHING);
t.set_content_arrangement(ContentArrangement::Dynamic);
if !stdout_color() {
t.force_no_tty();
}
t
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct LlcLockRow {
pub(crate) llc_idx: usize,
pub(crate) numa_node: Option<usize>,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct CpuLockRow {
pub(crate) cpu: usize,
pub(crate) numa_node: Option<usize>,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct CacheLockRow {
pub(crate) cache_key: String,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct RunDirLockRow {
pub(crate) run_key: String,
pub(crate) lockfile: String,
pub(crate) holders: Vec<crate::flock::HolderInfo>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct LocksSnapshot {
pub(crate) llcs: Vec<LlcLockRow>,
pub(crate) cpus: Vec<CpuLockRow>,
pub(crate) cache: Vec<CacheLockRow>,
pub(crate) run_dirs: Vec<RunDirLockRow>,
}
fn collect_locks_snapshot() -> Result<LocksSnapshot> {
let cache_root = CacheDir::default_root().ok();
let runs_root = crate::test_support::runs_root();
collect_locks_snapshot_from(Path::new("/tmp"), cache_root.as_deref(), Some(&runs_root))
}
pub(crate) fn collect_locks_snapshot_from(
tmp_root: &Path,
cache_root: Option<&Path>,
runs_root: Option<&Path>,
) -> Result<LocksSnapshot> {
use crate::vmm::host_topology::HostTopology;
let host_topo = HostTopology::from_sysfs().ok();
let llc_pattern = format!("{}/ktstr-llc-*.lock", tmp_root.display());
let mut llcs: Vec<LlcLockRow> = Vec::new();
for entry in glob::glob(&llc_pattern)
.map_err(|e| anyhow!("glob {llc_pattern}: {e}"))?
.flatten()
{
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let Some(idx_str) = stem.strip_prefix("ktstr-llc-") else {
continue;
};
let Ok(llc_idx) = idx_str.parse::<usize>() else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
let numa_node = host_topo.as_ref().and_then(|t| {
if llc_idx < t.llc_groups.len() {
Some(t.llc_numa_node(llc_idx))
} else {
None
}
});
llcs.push(LlcLockRow {
llc_idx,
numa_node,
lockfile: entry.display().to_string(),
holders,
});
}
llcs.sort_by_key(|r| r.llc_idx);
let cpu_pattern = format!("{}/ktstr-cpu-*.lock", tmp_root.display());
let mut cpus: Vec<CpuLockRow> = Vec::new();
for entry in glob::glob(&cpu_pattern)
.map_err(|e| anyhow!("glob {cpu_pattern}: {e}"))?
.flatten()
{
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let Some(idx_str) = stem.strip_prefix("ktstr-cpu-") else {
continue;
};
let Ok(cpu) = idx_str.parse::<usize>() else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
let numa_node = host_topo
.as_ref()
.and_then(|t| t.cpu_to_node.get(&cpu).copied());
cpus.push(CpuLockRow {
cpu,
numa_node,
lockfile: entry.display().to_string(),
holders,
});
}
cpus.sort_by_key(|r| r.cpu);
let mut cache: Vec<CacheLockRow> = Vec::new();
if let Some(cache_root) = cache_root {
let locks_dir = cache_root.join(crate::flock::LOCK_DIR_NAME);
let pattern = format!("{}/*.lock", locks_dir.display());
if let Ok(expanded) = glob::glob(&pattern) {
for entry in expanded.flatten() {
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
cache.push(CacheLockRow {
cache_key: stem.to_string(),
lockfile: entry.display().to_string(),
holders,
});
}
}
}
cache.sort_by(|a, b| a.cache_key.cmp(&b.cache_key));
let mut run_dirs: Vec<RunDirLockRow> = Vec::new();
if let Some(runs_root) = runs_root {
let locks_dir = runs_root.join(crate::flock::LOCK_DIR_NAME);
let pattern = format!("{}/*.lock", locks_dir.display());
if let Ok(expanded) = glob::glob(&pattern) {
for entry in expanded.flatten() {
let Some(stem) = entry.file_stem().and_then(|s| s.to_str()) else {
continue;
};
let holders = crate::flock::read_holders(&entry).unwrap_or_default();
run_dirs.push(RunDirLockRow {
run_key: stem.to_string(),
lockfile: entry.display().to_string(),
holders,
});
}
}
}
run_dirs.sort_by(|a, b| a.run_key.cmp(&b.run_key));
Ok(LocksSnapshot {
llcs,
cpus,
cache,
run_dirs,
})
}
fn render_locks_human(snap: &LocksSnapshot) -> String {
use std::fmt::Write;
let mut out = String::new();
let fmt_holders = |hs: &[crate::flock::HolderInfo]| -> String {
if hs.is_empty() {
crate::flock::NO_HOLDERS_RECORDED.to_string()
} else {
hs.iter()
.map(|h| format!("{} ({})", h.pid, h.cmdline))
.collect::<Vec<_>>()
.join("\n")
}
};
let fmt_node = |n: Option<usize>| -> String {
match n {
Some(v) => v.to_string(),
None => "?".to_string(),
}
};
writeln!(out, "LLC locks:").unwrap();
if snap.llcs.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["LLC", "NODE", "LOCKFILE", "HOLDERS"]);
for r in &snap.llcs {
t.add_row([
r.llc_idx.to_string(),
fmt_node(r.numa_node),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nPer-CPU locks:").unwrap();
if snap.cpus.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["CPU", "NODE", "LOCKFILE", "HOLDERS"]);
for r in &snap.cpus {
t.add_row([
r.cpu.to_string(),
fmt_node(r.numa_node),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nCache-entry locks:").unwrap();
if snap.cache.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["CACHE KEY", "LOCKFILE", "HOLDERS"]);
for r in &snap.cache {
t.add_row([
r.cache_key.clone(),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
writeln!(out, "\nRun-dir locks:").unwrap();
if snap.run_dirs.is_empty() {
writeln!(out, " (none)").unwrap();
} else {
let mut t = new_table();
t.set_header(["RUN KEY", "LOCKFILE", "HOLDERS"]);
for r in &snap.run_dirs {
t.add_row([
r.run_key.clone(),
r.lockfile.clone(),
fmt_holders(&r.holders),
]);
}
writeln!(out, "{t}").unwrap();
}
out
}
static LOCKS_WATCH_KILL: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
extern "C" fn locks_watch_sigint_handler(_sig: libc::c_int) {
LOCKS_WATCH_KILL.store(true, std::sync::atomic::Ordering::SeqCst);
}
pub fn list_locks(json: bool, watch: Option<std::time::Duration>) -> Result<()> {
if watch.is_none() {
let snap = collect_locks_snapshot()?;
if json {
println!("{}", serde_json::to_string_pretty(&snap)?);
} else {
print!("{}", render_locks_human(&snap));
}
return Ok(());
}
let interval = watch.unwrap();
unsafe {
libc::signal(
libc::SIGINT,
locks_watch_sigint_handler as *const () as libc::sighandler_t,
);
}
loop {
if LOCKS_WATCH_KILL.load(std::sync::atomic::Ordering::SeqCst) {
break;
}
let snap = collect_locks_snapshot()?;
if json {
println!("{}", serde_json::to_string(&snap)?);
} else {
print!("\x1b[2J\x1b[H{}", render_locks_human(&snap));
}
std::thread::sleep(interval);
}
Ok(())
}
fn status(msg: &str) {
if stderr_color() {
eprintln!("\x1b[1m{msg}\x1b[0m");
} else {
eprintln!("{msg}");
}
}
fn success(msg: &str) {
if stderr_color() {
eprintln!("\x1b[32m{msg}\x1b[0m");
} else {
eprintln!("{msg}");
}
}
fn warn(msg: &str) {
if stderr_color() {
eprintln!("\x1b[34m{msg}\x1b[0m");
} else {
eprintln!("{msg}");
}
}
static SPINNER_SAVED_TERMIOS: std::sync::Mutex<Option<libc::termios>> = std::sync::Mutex::new(None);
static SPINNER_ACTIVE: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
fn install_spinner_termios_panic_hook() {
static INSTALLED: std::sync::Once = std::sync::Once::new();
INSTALLED.call_once(|| {
let default = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
if let Ok(guard) = SPINNER_SAVED_TERMIOS.try_lock()
&& let Some(termios) = *guard
{
unsafe {
libc::tcsetattr(libc::STDIN_FILENO, libc::TCSANOW, &termios);
}
}
default(info);
}));
});
}
pub struct Spinner {
pb: Option<indicatif::ProgressBar>,
saved_termios: Option<libc::termios>,
}
impl Spinner {
pub fn start(msg: impl Into<std::borrow::Cow<'static, str>>) -> Self {
debug_assert!(
!SPINNER_ACTIVE.swap(true, std::sync::atomic::Ordering::SeqCst),
"Spinner::start called while another Spinner is already \
active. Nested spinners clobber SPINNER_SAVED_TERMIOS — \
the outer spinner's restore path would reset to the \
already-modified termios state instead of the original. \
If nesting is genuinely needed, refactor the save/restore \
path to depth-count before lifting this assertion.",
);
if !stderr_color() {
return Spinner {
pb: None,
saved_termios: None,
};
}
let pb = indicatif::ProgressBar::new_spinner();
pb.set_style(
indicatif::ProgressStyle::with_template("{spinner:.cyan} {msg}")
.expect("valid template"),
);
pb.set_message(msg);
pb.enable_steady_tick(Duration::from_millis(80));
if pb.is_hidden() {
return Spinner {
pb: None,
saved_termios: None,
};
}
let saved_termios = Self::disable_echo();
Spinner {
pb: Some(pb),
saved_termios,
}
}
fn disable_echo() -> Option<libc::termios> {
use std::io::IsTerminal;
if !std::io::stdin().is_terminal() {
return None;
}
unsafe {
let fd = libc::STDIN_FILENO;
let mut termios: libc::termios = std::mem::zeroed();
if libc::tcgetattr(fd, &mut termios) != 0 {
return None;
}
let saved = termios;
install_spinner_termios_panic_hook();
*SPINNER_SAVED_TERMIOS.lock().unwrap() = Some(saved);
termios.c_lflag &= !libc::ECHO;
libc::tcsetattr(fd, libc::TCSANOW, &termios);
Some(saved)
}
}
fn teardown(&mut self) {
if let Some(termios) = self.saved_termios.take() {
unsafe {
libc::tcsetattr(libc::STDIN_FILENO, libc::TCSANOW, &termios);
}
*SPINNER_SAVED_TERMIOS.lock().unwrap() = None;
}
}
pub fn set_message(&self, msg: impl Into<std::borrow::Cow<'static, str>>) {
if let Some(ref pb) = self.pb {
pb.set_message(msg);
}
}
pub fn finish(mut self, msg: impl Into<std::borrow::Cow<'static, str>>) {
self.teardown();
match self.pb.take() {
Some(pb) => pb.finish_with_message(msg),
None => eprintln!("{}", msg.into()),
}
}
pub fn println(&self, msg: impl AsRef<str>) {
match self.pb {
Some(ref pb) => pb.println(msg),
None => eprintln!("{}", msg.as_ref()),
}
}
pub fn suspend<F: FnOnce() -> R, R>(&self, f: F) -> R {
match self.pb {
Some(ref pb) => pb.suspend(f),
None => f(),
}
}
pub fn with_progress<T, E, F>(
start_msg: impl Into<std::borrow::Cow<'static, str>>,
success_msg: impl Into<std::borrow::Cow<'static, str>>,
f: F,
) -> Result<T, E>
where
F: FnOnce(&Spinner) -> Result<T, E>,
{
let sp = Spinner::start(start_msg);
let result = f(&sp);
match result {
Ok(v) => {
sp.finish(success_msg);
Ok(v)
}
Err(e) => {
drop(sp);
Err(e)
}
}
}
}
impl Drop for Spinner {
fn drop(&mut self) {
self.teardown();
if let Some(pb) = self.pb.take() {
pb.finish_and_clear();
}
SPINNER_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scenario;
fn make_test_run(name: &str) -> (tempfile::TempDir, std::path::PathBuf) {
let tmp = tempfile::tempdir().expect("tempdir must succeed");
let run_dir = tmp.path().join(name);
std::fs::create_dir(&run_dir).expect("create run dir");
(tmp, run_dir)
}
fn write_sidecar(
dir: &std::path::Path,
key: &str,
sc: &crate::test_support::SidecarResult,
) -> std::path::PathBuf {
let path = dir.join(format!("{key}.ktstr.json"));
let json = serde_json::to_string(sc).expect("fixture must serialize");
std::fs::write(&path, json).expect("write sidecar");
path
}
fn write_corrupt_sidecar(dir: &std::path::Path, key: &str, body: &str) -> std::path::PathBuf {
let path = dir.join(format!("{key}.ktstr.json"));
std::fs::write(&path, body).expect("write corrupt sidecar");
path
}
pub(super) const SIDECAR_VEC_FIELDS: &[&str] = &[
"metrics",
"stimulus_events",
"active_flags",
"verifier_stats",
"sysctls",
"kargs",
];
#[test]
fn sidecar_vec_fields_drift_guard() {
let sc = crate::test_support::SidecarResult::test_fixture();
let value = serde_json::to_value(&sc).expect("fixture must serialize");
let obj = value.as_object().expect("fixture is an Object");
for name in SIDECAR_VEC_FIELDS {
let v = obj.get(*name).unwrap_or_else(|| {
panic!(
"SIDECAR_VEC_FIELDS lists `{name}` but \
it is not on the serialized fixture — \
schema rename or removal not propagated \
to the constant"
)
});
assert!(
v.is_array(),
"SIDECAR_VEC_FIELDS lists `{name}` but it is not a \
JSON array on the fixture — schema flipped Vec→Option \
or another shape; update the constant",
);
}
let array_keys: Vec<&str> = obj
.iter()
.filter(|(_, v)| v.is_array())
.map(|(k, _)| k.as_str())
.collect();
assert_eq!(
array_keys.len(),
SIDECAR_VEC_FIELDS.len(),
"SidecarResult has {} JSON-array fields, SIDECAR_VEC_FIELDS \
lists {}. Drift detected — update the constant. \
Live array keys: {array_keys:?}; constant: {SIDECAR_VEC_FIELDS:?}",
array_keys.len(),
SIDECAR_VEC_FIELDS.len(),
);
}
#[test]
fn parse_topology_string_happy_path() {
let (n, l, c, t) = parse_topology_string("1,2,4,8").expect("valid");
assert_eq!((n, l, c, t), (1, 2, 4, 8));
}
#[test]
fn parse_topology_string_rejects_too_few_parts() {
let err = parse_topology_string("1,2,4").expect_err("3 parts must fail");
let rendered = format!("{err:#}");
assert!(
rendered.contains("invalid topology '1,2,4'"),
"error must echo the bad input: {rendered}",
);
assert!(
rendered.contains("numa_nodes,llcs,cores,threads"),
"error must name the expected shape: {rendered}",
);
}
#[test]
fn parse_topology_string_rejects_too_many_parts() {
let err = parse_topology_string("1,2,4,8,16").expect_err("5 parts must fail");
assert!(format!("{err:#}").contains("invalid topology"));
}
#[test]
fn parse_topology_string_names_failing_field() {
for (pos, field) in [(0, "numa_nodes"), (1, "llcs"), (2, "cores"), (3, "threads")] {
let mut parts = ["1"; 4];
parts[pos] = "abc";
let input = parts.join(",");
let err = parse_topology_string(&input).expect_err("non-numeric must fail");
let rendered = format!("{err:#}");
assert!(
rendered.contains(&format!("invalid {field} value: 'abc'")),
"pos {pos}: error must name the `{field}` field, got: {rendered}",
);
}
}
#[test]
fn parse_topology_string_rejects_zero_dimensions() {
for pos in 0..4 {
let mut parts = ["1"; 4];
parts[pos] = "0";
let input = parts.join(",");
let err = parse_topology_string(&input).expect_err("zero must fail");
let rendered = format!("{err:#}");
assert!(
rendered.contains(">= 1"),
"pos {pos}: error must cite the >=1 rule: {rendered}",
);
}
}
#[test]
fn parse_topology_string_accepts_u32_max() {
let big = u32::MAX;
let input = format!("{big},{big},{big},{big}");
let (n, l, c, t) = parse_topology_string(&input).expect("u32::MAX valid");
assert_eq!((n, l, c, t), (big, big, big, big));
}
#[test]
fn parse_topology_string_rejects_u32_overflow() {
let too_big = (u32::MAX as u64) + 1;
let input = format!("1,{too_big},4,1");
let err = parse_topology_string(&input).expect_err("overflow must fail");
assert!(
format!("{err:#}").contains(&format!("invalid llcs value: '{too_big}'")),
"overflow must surface field + bad token: {err:#}",
);
}
#[test]
fn show_host_returns_populated_report() {
let out = show_host();
assert!(!out.is_empty(), "show_host must return non-empty output");
assert!(
out.ends_with('\n'),
"show_host output must end with a newline for print! use: {out:?}",
);
assert!(
out.contains("kernel_name"),
"show_host must surface the kernel_name field: {out}",
);
}
#[test]
fn show_run_host_missing_run_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let err = show_run_host("nonexistent-run", Some(tmp.path())).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("run 'nonexistent-run' not found"),
"missing-run error must name the run: {msg}",
);
assert!(
msg.contains("cargo ktstr stats list"),
"missing-run error must name the `stats list` discovery \
command so operators can enumerate available run keys \
without extra lookups: {msg}",
);
}
#[test]
fn show_run_host_empty_run_returns_error() {
let tmp = tempfile::tempdir().unwrap();
std::fs::create_dir(tmp.path().join("run-empty")).unwrap();
let err = show_run_host("run-empty", Some(tmp.path())).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("no sidecar data"),
"empty-run error must name the condition: {msg}",
);
}
#[test]
fn show_run_host_all_host_none_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-no-host");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
let json = serde_json::to_string(&sc).unwrap();
std::fs::write(run_dir.join("t-0000000000000000.ktstr.json"), json).unwrap();
let err = show_run_host("run-no-host", Some(tmp.path())).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("no sidecar with a populated host")
|| msg.contains("none carries a populated host context"),
"all-host-None error must name the pre-enrichment likely cause: {msg}",
);
}
#[test]
fn show_run_host_populated_sidecar_returns_format_human() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-with-host");
std::fs::create_dir(&run_dir).unwrap();
let mut sc = crate::test_support::SidecarResult::test_fixture();
sc.host = Some(crate::host_context::HostContext::test_fixture());
let json = serde_json::to_string(&sc).unwrap();
std::fs::write(run_dir.join("t-0000000000000000.ktstr.json"), json).unwrap();
let out = show_run_host("run-with-host", Some(tmp.path())).unwrap();
assert!(
out.contains("kernel_name"),
"populated host output must include the kernel_name row: {out}",
);
assert!(
out.ends_with('\n'),
"output must end with newline for print!: {out:?}",
);
}
#[test]
fn show_run_host_forward_scans_past_none_sidecars() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-mixed");
std::fs::create_dir(&run_dir).unwrap();
let sc_none = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&sc_none).unwrap(),
)
.unwrap();
let mut sc_host = crate::test_support::SidecarResult::test_fixture();
sc_host.host = Some(crate::host_context::HostContext::test_fixture());
std::fs::write(
run_dir.join("b-0000000000000000.ktstr.json"),
serde_json::to_string(&sc_host).unwrap(),
)
.unwrap();
let out = show_run_host("run-mixed", Some(tmp.path()))
.expect("forward scan must find the populated sidecar");
assert!(
out.contains("kernel_name"),
"output from populated sidecar must include kernel_name: {out}",
);
}
#[test]
fn none_catalog_covers_every_option_field() {
const EXPECTED_OPTION_FIELD_COUNT: usize = 10;
assert_eq!(
super::SIDECAR_NONE_CATALOG.len(),
EXPECTED_OPTION_FIELD_COUNT,
"SIDECAR_NONE_CATALOG must cover every Option<T> field on \
SidecarResult; expected {EXPECTED_OPTION_FIELD_COUNT}, got \
{}. A schema change must update the catalog in lockstep.",
super::SIDECAR_NONE_CATALOG.len(),
);
let sc = crate::test_support::SidecarResult::test_fixture();
let projected = super::project_optional_fields(&sc);
assert_eq!(
projected.len(),
EXPECTED_OPTION_FIELD_COUNT,
"project_optional_fields must enumerate every Option<T> \
field; expected {EXPECTED_OPTION_FIELD_COUNT}, got {}. Co-update \
with the catalog when adding a new Option field.",
projected.len(),
);
for (i, (name, _)) in projected.iter().enumerate() {
let catalog = &super::SIDECAR_NONE_CATALOG[i];
assert_eq!(
*name, catalog.field,
"projected field {i} ({name:?}) must match catalog \
entry at the same index ({:?}) — order drift breaks \
the renderer's catalog-lookup expectation",
catalog.field,
);
}
}
#[test]
fn none_catalog_every_entry_has_causes() {
for entry in super::SIDECAR_NONE_CATALOG {
assert!(
!entry.causes.is_empty(),
"catalog entry for {} has no causes — every field's \
None case must document at least one cause",
entry.field,
);
}
}
#[test]
fn none_catalog_expected_entries_have_no_fix() {
for entry in super::SIDECAR_NONE_CATALOG {
if matches!(entry.classification, super::NoneClassification::Expected) {
assert!(
entry.fix.is_none(),
"Expected-classified field {} must not carry a `fix:` \
— there is no operator action that recovers a \
steady-state None",
entry.field,
);
}
}
}
#[test]
fn none_catalog_fix_assignments_match_design_ruling() {
let by_field: std::collections::HashMap<&'static str, Option<&'static str>> =
super::SIDECAR_NONE_CATALOG
.iter()
.map(|e| (e.field, e.fix))
.collect();
let must_fix = ["project_commit", "kernel_commit", "host", "run_source"];
let must_not_fix = [
"scheduler_commit",
"payload",
"monitor",
"kvm_stats",
"kernel_version",
"cleanup_duration_ms",
];
assert_eq!(
must_fix.len() + must_not_fix.len(),
super::SIDECAR_NONE_CATALOG.len(),
"every catalog entry must be classified as either \
must-fix or must-not-fix; expected sum = catalog len \
({}), got must_fix={} + must_not_fix={}",
super::SIDECAR_NONE_CATALOG.len(),
must_fix.len(),
must_not_fix.len(),
);
for field in &must_fix {
let fix = by_field.get(field).copied().flatten();
assert!(
fix.is_some(),
"field {field} must carry a `fix:` per the design ruling",
);
}
for field in &must_not_fix {
let fix = by_field.get(field).copied().flatten();
assert!(
fix.is_none(),
"field {field} must NOT carry a `fix:` (multi-cause or \
steady-state None) — got: {fix:?}",
);
}
}
#[test]
fn explain_sidecar_missing_run_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let err = super::explain_sidecar("nonexistent-run", Some(tmp.path()), false).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("run 'nonexistent-run' not found"),
"missing-run error must name the run: {msg}",
);
assert!(
msg.contains("cargo ktstr stats list"),
"missing-run error must name the discovery command: {msg}",
);
}
#[test]
fn explain_sidecar_empty_run_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-empty");
std::fs::create_dir(&run_dir).unwrap();
let err = super::explain_sidecar("run-empty", Some(tmp.path()), false).unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("no sidecar data"),
"empty-run error must use the canonical message: {msg}",
);
assert!(
msg.contains("searched"),
"empty-run error must name the searched directory: {msg}",
);
assert!(
msg.contains(&run_dir.display().to_string()),
"empty-run error must include the resolved run_dir path \
({}): {msg}",
run_dir.display(),
);
}
#[test]
fn explain_sidecar_all_corrupt_renders_structured_diagnostic() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-corrupt");
std::fs::create_dir(&run_dir).unwrap();
std::fs::write(run_dir.join("a-0000000000000000.ktstr.json"), "not json {").unwrap();
std::fs::write(
run_dir.join("b-0000000000000000.ktstr.json"),
"{\"missing\": \"required-fields\"}",
)
.unwrap();
let out = super::explain_sidecar("run-corrupt", Some(tmp.path()), false)
.expect("all-corrupt is no longer a hard error — must render");
assert!(
out.contains("walked 2"),
"header must name the walked count: {out}",
);
assert!(
out.contains("parsed 0 valid"),
"header must distinguish walked-vs-parsed (zero valid): {out}",
);
assert!(
out.contains("corrupt sidecars (2):"),
"all-corrupt run must surface the corrupt-sidecars \
block listing every parse failure: {out}",
);
assert!(
!out.contains("test:"),
"no sidecar parsed — must not emit any per-sidecar \
block: {out}",
);
}
#[test]
fn explain_sidecar_text_lists_all_none_fields_for_fixture() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-all-none");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-all-none", Some(tmp.path()), false).unwrap();
assert!(out.contains("walked 1"), "header must report walked: {out}");
assert!(out.contains("parsed 1"), "header must report parsed: {out}");
assert!(
out.contains("none fields (10)"),
"fixture has every Option as None — count must be 10: {out}",
);
for entry in super::SIDECAR_NONE_CATALOG {
assert!(
out.contains(entry.field),
"output must mention field {}: {out}",
entry.field,
);
}
assert!(
out.contains("[expected]"),
"expected-class fields must surface their tag: {out}",
);
assert!(
out.contains("[actionable]"),
"actionable-class fields must surface their tag: {out}",
);
let project_commit_fix = super::SIDECAR_NONE_CATALOG
.iter()
.find(|e| e.field == "project_commit")
.and_then(|e| e.fix)
.expect("project_commit must carry a fix per the design ruling");
assert!(
out.contains(&format!("fix: {project_commit_fix}")),
"project_commit's fix: line must render its catalog \
prose verbatim ({project_commit_fix:?}): {out}",
);
let fix_line_count = out.matches("\n fix:").count();
let expected_fix_count = super::SIDECAR_NONE_CATALOG
.iter()
.filter(|e| e.fix.is_some())
.count();
assert_eq!(
fix_line_count, expected_fix_count,
"exactly {expected_fix_count} entries carry a fix: in \
the catalog (count derived via \
SIDECAR_NONE_CATALOG.iter().filter(|e| e.fix.is_some()).count()); \
output emitted {fix_line_count}: {out}",
);
}
#[test]
fn explain_sidecar_json_shape_aggregates_none_counts() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-json");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-json", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let walk = parsed.get("_walk").expect("must have _walk key");
assert_eq!(walk.get("walked").and_then(|v| v.as_u64()), Some(1));
assert_eq!(walk.get("valid").and_then(|v| v.as_u64()), Some(1));
let fields = parsed.get("fields").expect("must have fields key");
for entry in super::SIDECAR_NONE_CATALOG {
let f = fields
.get(entry.field)
.unwrap_or_else(|| panic!("missing field {}", entry.field));
let none_count = f
.get("none_count")
.and_then(|v| v.as_u64())
.unwrap_or_else(|| panic!("missing none_count for {}", entry.field));
let some_count = f
.get("some_count")
.and_then(|v| v.as_u64())
.unwrap_or_else(|| panic!("missing some_count for {}", entry.field));
assert_eq!(
none_count, 1,
"every field in fixture is None — none_count must be 1 for {}",
entry.field,
);
assert_eq!(
some_count, 0,
"every field in fixture is None — some_count must be 0 for {}",
entry.field,
);
assert_eq!(
none_count + some_count,
1,
"none_count + some_count must sum to _walk.valid (1) for {}",
entry.field,
);
assert_eq!(
f.get("classification").and_then(|v| v.as_str()),
Some(entry.classification.as_str()),
"classification must round-trip for {}",
entry.field,
);
let causes = f
.get("causes")
.and_then(|v| v.as_array())
.unwrap_or_else(|| panic!("missing causes for {}", entry.field));
assert_eq!(
causes.len(),
entry.causes.len(),
"causes array length must match catalog for {}",
entry.field,
);
let fix_value = f
.get("fix")
.unwrap_or_else(|| panic!("missing fix for {}", entry.field));
match entry.fix {
Some(expected) => {
assert_eq!(
fix_value.as_str(),
Some(expected),
"fix string must round-trip for {}",
entry.field,
);
}
None => {
assert!(
fix_value.is_null(),
"fix must be JSON null for fix=None entry {}: \
got {fix_value:?}",
entry.field,
);
}
}
}
}
#[test]
fn explain_sidecar_text_distinguishes_populated_from_none() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-mixed");
std::fs::create_dir(&run_dir).unwrap();
let mut sc = crate::test_support::SidecarResult::test_fixture();
sc.payload = Some("ipc_pingpong".to_string());
sc.kernel_version = Some("6.14.2".to_string());
sc.run_source = Some("local".to_string());
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-mixed", Some(tmp.path()), false).unwrap();
assert!(
out.contains("populated optional fields (3)"),
"must report 3 populated: {out}",
);
assert!(
out.contains("payload"),
"populated `payload` must appear: {out}",
);
assert!(out.contains("none fields (7)"), "must report 7 None: {out}",);
}
#[test]
fn explain_sidecar_text_renders_arch_line() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-arch");
std::fs::create_dir(&run_dir).unwrap();
let mut sc = crate::test_support::SidecarResult::test_fixture();
sc.host = Some(crate::host_context::HostContext::test_fixture());
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-arch", Some(tmp.path()), false).unwrap();
assert!(
out.contains("arch: x86_64"),
"host-populated sidecar must surface `arch: x86_64` per the \
test_fixture default: {out}",
);
}
#[test]
fn explain_sidecar_text_arch_line_falls_back_to_dash_when_host_none() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-arch-none");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-arch-none", Some(tmp.path()), false).unwrap();
assert!(
out.contains("arch: -"),
"host-None sidecar must surface `arch: -` (consistent \
sentinel with `list_runs`'s arch column): {out}",
);
}
#[test]
fn explain_sidecar_text_emits_one_block_per_sidecar() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-two");
std::fs::create_dir(&run_dir).unwrap();
let mut a = crate::test_support::SidecarResult::test_fixture();
a.test_name = "test_a".to_string();
let mut b = crate::test_support::SidecarResult::test_fixture();
b.test_name = "test_b".to_string();
b.payload = Some("ipc_pingpong".to_string());
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&a).unwrap(),
)
.unwrap();
std::fs::write(
run_dir.join("b-0000000000000000.ktstr.json"),
serde_json::to_string(&b).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-two", Some(tmp.path()), false).unwrap();
assert!(out.contains("test: test_a"), "test_a block missing: {out}");
assert!(out.contains("test: test_b"), "test_b block missing: {out}");
assert!(out.contains("walked 2"), "walked count must be 2: {out}");
assert!(out.contains("parsed 2"), "parsed count must be 2: {out}");
}
#[test]
fn explain_sidecar_json_aggregates_partial_none_correctly() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-partial");
std::fs::create_dir(&run_dir).unwrap();
let a = crate::test_support::SidecarResult::test_fixture();
let mut b = crate::test_support::SidecarResult::test_fixture();
b.payload = Some("ipc_pingpong".to_string());
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&a).unwrap(),
)
.unwrap();
std::fs::write(
run_dir.join("b-0000000000000000.ktstr.json"),
serde_json::to_string(&b).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-partial", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value = serde_json::from_str(&out).unwrap();
let payload = parsed
.get("fields")
.and_then(|f| f.get("payload"))
.expect("payload field must be present");
assert_eq!(
payload.get("none_count").and_then(|v| v.as_u64()),
Some(1),
"payload None in 1 of 2 sidecars — none_count must be 1",
);
assert_eq!(
payload.get("some_count").and_then(|v| v.as_u64()),
Some(1),
"payload Some in 1 of 2 sidecars — some_count must be 1",
);
let host = parsed
.get("fields")
.and_then(|f| f.get("host"))
.expect("host field must be present");
assert_eq!(
host.get("none_count").and_then(|v| v.as_u64()),
Some(2),
"host None in 2 of 2 sidecars — none_count must be 2",
);
assert_eq!(
host.get("some_count").and_then(|v| v.as_u64()),
Some(0),
"host Some in 0 of 2 sidecars — some_count must be 0",
);
}
#[test]
fn explain_sidecar_walks_corrupt_files_into_count() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-mixed-parse");
std::fs::create_dir(&run_dir).unwrap();
let valid = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&valid).unwrap(),
)
.unwrap();
std::fs::write(run_dir.join("b-0000000000000000.ktstr.json"), "garbage{").unwrap();
let out = super::explain_sidecar("run-mixed-parse", Some(tmp.path()), false).unwrap();
assert!(
out.contains("walked 2"),
"walker must visit both files: {out}",
);
assert!(
out.contains("parsed 1"),
"only the valid file parses: {out}",
);
}
#[test]
fn explain_sidecar_walks_one_level_subdirectory() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-sub");
let sub = run_dir.join("job-x");
std::fs::create_dir_all(&sub).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
sub.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-sub", Some(tmp.path()), false).unwrap();
assert!(out.contains("walked 1"), "must walk into job-x: {out}");
assert!(
out.contains("parsed 1"),
"must parse the nested file: {out}"
);
}
#[test]
fn explain_sidecar_ignores_non_ktstr_json() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-with-other-json");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
std::fs::write(run_dir.join("metadata.json"), "{}").unwrap();
let out = super::explain_sidecar("run-with-other-json", Some(tmp.path()), false).unwrap();
assert!(
out.contains("walked 1"),
"non-ktstr JSON must not inflate the walked count: {out}",
);
}
#[test]
fn explain_sidecar_json_is_valid_document() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-roundtrip");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-roundtrip", Some(tmp.path()), true).unwrap();
let _: serde_json::Value = serde_json::from_str(&out).expect("output must be valid JSON");
}
#[test]
fn explain_sidecar_text_handles_partial_population() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-partial-pop");
std::fs::create_dir(&run_dir).unwrap();
let mut sc = crate::test_support::SidecarResult::test_fixture();
sc.scheduler_commit = Some("aaaa111".to_string());
sc.project_commit = Some("bbbb222".to_string());
sc.payload = Some("payload".to_string());
sc.kernel_version = Some("6.14.2".to_string());
sc.kernel_commit = Some("cccc333".to_string());
sc.cleanup_duration_ms = Some(123);
sc.run_source = Some("local".to_string());
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-partial-pop", Some(tmp.path()), false).unwrap();
assert!(
out.contains("populated optional fields (7)"),
"7 of 10 Options populated must be reflected in the count: {out}",
);
assert!(
out.contains("none fields (3)"),
"3 of 10 Options remain None — must report (3): {out}",
);
}
#[test]
fn none_classification_as_str_returns_stable_tokens() {
assert_eq!(super::NoneClassification::Expected.as_str(), "expected");
assert_eq!(super::NoneClassification::Actionable.as_str(), "actionable",);
}
#[test]
fn kernel_commit_catalog_lists_five_causes() {
let entry = super::SIDECAR_NONE_CATALOG
.iter()
.find(|e| e.field == "kernel_commit")
.expect("kernel_commit must be in the catalog");
assert_eq!(
entry.causes.len(),
5,
"kernel_commit rustdoc enumerates 5 None causes; catalog \
must mirror that",
);
}
#[test]
fn explain_sidecar_schema_version_constant_is_one() {
assert_eq!(super::EXPLAIN_SIDECAR_SCHEMA_VERSION, "1");
}
#[test]
fn explain_sidecar_json_includes_schema_version() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-schema");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-schema", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
assert_eq!(
parsed.get("_schema_version").and_then(|v| v.as_str()),
Some(super::EXPLAIN_SIDECAR_SCHEMA_VERSION),
"JSON output must stamp _schema_version: {out}",
);
}
#[test]
fn explain_sidecar_json_walk_errors_empty_when_all_valid() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-clean-walk");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-clean-walk", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let errors = parsed
.get("_walk")
.and_then(|w| w.get("errors"))
.and_then(|e| e.as_array())
.expect("_walk.errors must be a JSON array");
assert!(
errors.is_empty(),
"no parse failures — _walk.errors must be empty: {out}",
);
}
#[test]
fn explain_sidecar_json_walk_errors_lists_corrupt_files() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-mixed-errs-json");
std::fs::create_dir(&run_dir).unwrap();
let valid = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&valid).unwrap(),
)
.unwrap();
let corrupt_path = run_dir.join("b-0000000000000000.ktstr.json");
std::fs::write(&corrupt_path, "garbage{").unwrap();
let out = super::explain_sidecar("run-mixed-errs-json", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let walk = parsed.get("_walk").expect("must have _walk key");
assert_eq!(walk.get("walked").and_then(|v| v.as_u64()), Some(2));
assert_eq!(walk.get("valid").and_then(|v| v.as_u64()), Some(1));
let errors = walk
.get("errors")
.and_then(|e| e.as_array())
.expect("_walk.errors must be a JSON array");
assert_eq!(errors.len(), 1, "exactly one parse failure expected: {out}",);
let entry = &errors[0];
let path = entry
.get("path")
.and_then(|v| v.as_str())
.expect("each error entry must carry a string `path`");
assert_eq!(
path,
corrupt_path.display().to_string(),
"error path must match the corrupt file's resolved path",
);
let error = entry
.get("error")
.and_then(|v| v.as_str())
.expect("each error entry must carry a string `error`");
assert!(
!error.is_empty(),
"error message must not be empty (serde_json should produce \
a parse-error message for `garbage{{`): {out}",
);
let enriched = entry
.get("enriched_message")
.expect("each error entry must carry an enriched_message key");
assert!(
enriched.is_null(),
"generic parse failure has no schema-drift remediation; \
enriched_message must be JSON null: {enriched:?}",
);
}
#[test]
fn enriched_parse_error_message_returns_prose_for_host_missing_pattern() {
let raw = "missing field `host` at line 1 column 100";
let path = std::path::Path::new("/tmp/example-run/sidecar.ktstr.json");
let enriched = crate::test_support::enriched_parse_error_message_for_test(path, raw)
.expect("host-missing pattern must produce enrichment prose");
assert!(
enriched.contains("host"),
"enrichment must mention the host field: {enriched}",
);
assert!(
enriched.contains("re-run"),
"enrichment must point at the re-run remediation: {enriched}",
);
assert!(
enriched.contains("disposable-sidecar"),
"enrichment must reference the pre-1.0 disposable-sidecar \
policy: {enriched}",
);
let raw_generic = "expected ident at line 1 column 2";
let no_enrichment =
crate::test_support::enriched_parse_error_message_for_test(path, raw_generic);
assert!(
no_enrichment.is_none(),
"generic parse error must produce no enrichment: {no_enrichment:?}",
);
}
#[test]
fn explain_sidecar_all_corrupt_json_renders_structured_diagnostic() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-all-corrupt-json");
std::fs::create_dir(&run_dir).unwrap();
std::fs::write(run_dir.join("a-0000000000000000.ktstr.json"), "{").unwrap();
std::fs::write(run_dir.join("b-0000000000000000.ktstr.json"), "garbage{").unwrap();
let out = super::explain_sidecar("run-all-corrupt-json", Some(tmp.path()), true)
.expect("all-corrupt JSON must render, not bail");
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let walk = parsed.get("_walk").expect("must have _walk key");
assert_eq!(walk.get("walked").and_then(|v| v.as_u64()), Some(2));
assert_eq!(
walk.get("valid").and_then(|v| v.as_u64()),
Some(0),
"all-corrupt run must report valid=0: {out}",
);
let errors = walk
.get("errors")
.and_then(|e| e.as_array())
.expect("_walk.errors must be present");
assert_eq!(
errors.len(),
2,
"every parse failure must surface in _walk.errors: {out}",
);
let fields = parsed
.get("fields")
.and_then(|f| f.as_object())
.expect("fields must be present");
for entry in super::SIDECAR_NONE_CATALOG {
let f = fields
.get(entry.field)
.unwrap_or_else(|| panic!("field {} must be present", entry.field));
assert_eq!(
f.get("none_count").and_then(|v| v.as_u64()),
Some(0),
"{}: zero valid sidecars — none_count must be 0",
entry.field,
);
assert_eq!(
f.get("some_count").and_then(|v| v.as_u64()),
Some(0),
"{}: zero valid sidecars — some_count must be 0",
entry.field,
);
}
assert_eq!(
parsed.get("_schema_version").and_then(|v| v.as_str()),
Some(super::EXPLAIN_SIDECAR_SCHEMA_VERSION),
"schema_version must stamp on every render: {out}",
);
}
#[test]
fn explain_sidecar_text_omits_enriched_line_for_generic_failure() {
let (tmp, run_dir) = make_test_run("run-generic-fail-text");
write_corrupt_sidecar(&run_dir, "a-0000000000000000", "garbage{");
let out = super::explain_sidecar("run-generic-fail-text", Some(tmp.path()), false).unwrap();
assert!(
out.contains("corrupt sidecars (1):"),
"generic parse failure must surface in the corrupt \
block: {out}",
);
assert!(
out.contains(" error:"),
"generic parse failure must emit raw `error:` line: {out}",
);
assert!(
!out.contains(" enriched:"),
"generic parse failure has no enrichment — `enriched:` \
line must NOT appear: {out}",
);
}
#[test]
fn explain_sidecar_text_appends_corrupt_sidecars_block() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-text-corrupt");
std::fs::create_dir(&run_dir).unwrap();
let mut valid = crate::test_support::SidecarResult::test_fixture();
valid.test_name = "valid_test".to_string();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&valid).unwrap(),
)
.unwrap();
let corrupt_path = run_dir.join("b-0000000000000000.ktstr.json");
std::fs::write(&corrupt_path, "garbage{").unwrap();
let out = super::explain_sidecar("run-text-corrupt", Some(tmp.path()), false).unwrap();
assert!(
out.contains("corrupt sidecars (1):"),
"text output must include trailing corrupt-sidecars block \
when errors exist: {out}",
);
assert!(
out.contains(&corrupt_path.display().to_string()),
"corrupt-sidecars block must list the corrupt file's path: {out}",
);
assert!(
out.contains(" error:"),
"corrupt-sidecars block must indent each error under its path: {out}",
);
let header_pos = out
.find("walked 2 sidecar file(s)")
.expect("walked-header must precede everything");
let test_block_pos = out
.find("test: valid_test")
.expect("per-sidecar block must emit for the valid file");
let corrupt_pos = out
.find("corrupt sidecars (1):")
.expect("corrupt-sidecars block must emit");
assert!(
header_pos < test_block_pos,
"header must precede per-sidecar blocks: {out}",
);
assert!(
test_block_pos < corrupt_pos,
"per-sidecar blocks must precede the trailing corrupt \
block — operators read top-down: {out}",
);
}
#[test]
fn explain_sidecar_text_omits_corrupt_block_when_no_errors() {
let (tmp, run_dir) = make_test_run("run-text-clean");
let sc = crate::test_support::SidecarResult::test_fixture();
write_sidecar(&run_dir, "t-0000000000000000", &sc);
let out = super::explain_sidecar("run-text-clean", Some(tmp.path()), false).unwrap();
assert!(
!out.contains("corrupt sidecars"),
"no parse failures — corrupt-sidecars block must be \
suppressed: {out}",
);
}
#[test]
fn explain_sidecar_does_not_flag_empty_vec_fields_as_none() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-vecs");
std::fs::create_dir(&run_dir).unwrap();
let mut sc = crate::test_support::SidecarResult::test_fixture();
sc.scheduler_commit = Some("aaaa111".to_string());
sc.project_commit = Some("bbbb222".to_string());
sc.payload = Some("payload".to_string());
sc.kernel_version = Some("6.14.2".to_string());
sc.kernel_commit = Some("cccc333".to_string());
sc.cleanup_duration_ms = Some(123);
sc.run_source = Some("local".to_string());
sc.monitor = Some(crate::monitor::MonitorSummary::default());
sc.kvm_stats = Some(crate::vmm::KvmStatsTotals::default());
sc.host = Some(crate::host_context::HostContext::test_fixture());
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-vecs", Some(tmp.path()), false).unwrap();
assert!(
out.contains("none fields: <all populated>"),
"all Options populated — must report no None fields: {out}",
);
for vec_field in SIDECAR_VEC_FIELDS {
assert!(
!out.contains(vec_field),
"Vec field '{vec_field}' is hard-required (not Option) and \
must never appear in explain-sidecar output: {out}",
);
}
}
#[test]
fn explain_sidecar_handles_old_source_key_sidecar() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-old-source-key");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
let mut value = serde_json::to_value(&sc).expect("fixture must serialize");
let obj = value.as_object_mut().expect("fixture is an Object");
obj.remove("run_source");
obj.insert(
"source".to_string(),
serde_json::Value::String("archive".to_string()),
);
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&value).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-old-source-key", Some(tmp.path()), false).unwrap();
assert!(
out.contains("run_source"),
"explain-sidecar must surface run_source as None for \
pre-rename archive: {out}",
);
assert!(
out.contains("rename"),
"run_source None cause must mention the rename: {out}",
);
}
#[test]
fn explain_sidecar_resolves_dir_default_to_runs_root() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let tmp = tempfile::tempdir().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", tmp.path());
let _env_sidecar = EnvVarGuard::remove("KTSTR_SIDECAR_DIR");
let runs_root = tmp.path().join("ktstr");
let run_dir = runs_root.join("run-default-root");
std::fs::create_dir_all(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-default-root", None, false)
.expect("dir=None must resolve via runs_root() and succeed");
assert!(
out.contains("walked 1"),
"default-dir resolution must walk into the run dir: {out}",
);
assert!(
out.contains("parsed 1 valid"),
"default-dir resolution must parse the sidecar: {out}",
);
}
#[test]
fn explain_sidecar_handles_zero_byte_file() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-zero-byte");
std::fs::create_dir(&run_dir).unwrap();
let valid = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&valid).unwrap(),
)
.unwrap();
std::fs::write(run_dir.join("b-0000000000000000.ktstr.json"), "").unwrap();
let out = super::explain_sidecar("run-zero-byte", Some(tmp.path()), false).unwrap();
assert!(
out.contains("walked 2"),
"walker must visit both files (valid + zero-byte): {out}",
);
assert!(
out.contains("parsed 1"),
"only the valid file parses; zero-byte is a parse \
failure: {out}",
);
assert!(
out.contains("corrupt sidecars (1):"),
"zero-byte file must surface in the corrupt-sidecars \
block as a parse failure, not be silently dropped: {out}",
);
}
#[test]
fn explain_sidecar_tolerates_unknown_extra_fields() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-extra-fields");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
let mut value = serde_json::to_value(&sc).expect("fixture must serialize");
let obj = value.as_object_mut().expect("fixture is an Object");
obj.insert(
"future_field".to_string(),
serde_json::Value::String("hypothetical".to_string()),
);
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&value).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-extra-fields", Some(tmp.path()), false).unwrap();
assert!(
out.contains("walked 1"),
"walker must visit the file: {out}",
);
assert!(
out.contains("parsed 1 valid"),
"extra `future_field` key must NOT block deserialize \
(SidecarResult does not deny_unknown_fields): {out}",
);
assert!(
out.contains("test: t"),
"parsed sidecar must render its test_name: {out}",
);
}
#[test]
fn explain_sidecar_classification_accuracy_per_field() {
let by_field: std::collections::HashMap<&'static str, super::NoneClassification> =
super::SIDECAR_NONE_CATALOG
.iter()
.map(|e| (e.field, e.classification))
.collect();
assert_eq!(
by_field.len(),
super::SIDECAR_NONE_CATALOG.len(),
"SIDECAR_NONE_CATALOG must have unique `field` values \
— HashMap collected {} entries, catalog has {}. Two \
entries sharing a name would silently overwrite during \
collect.",
by_field.len(),
super::SIDECAR_NONE_CATALOG.len(),
);
let expected_pairs: &[(&str, super::NoneClassification)] = &[
("scheduler_commit", super::NoneClassification::Expected),
("payload", super::NoneClassification::Expected),
("project_commit", super::NoneClassification::Actionable),
("monitor", super::NoneClassification::Actionable),
("kvm_stats", super::NoneClassification::Actionable),
("kernel_version", super::NoneClassification::Actionable),
("kernel_commit", super::NoneClassification::Actionable),
("host", super::NoneClassification::Actionable),
("cleanup_duration_ms", super::NoneClassification::Actionable),
("run_source", super::NoneClassification::Actionable),
];
assert_eq!(
expected_pairs.len(),
super::SIDECAR_NONE_CATALOG.len(),
"every catalog entry must have a pinned classification \
in this test (catalog len {}, pinned len {})",
super::SIDECAR_NONE_CATALOG.len(),
expected_pairs.len(),
);
for (field, expected) in expected_pairs {
let actual = by_field
.get(field)
.copied()
.unwrap_or_else(|| panic!("catalog must contain field {field}"));
assert_eq!(
actual, *expected,
"field {field}: classification mismatch — expected \
{expected:?}, got {actual:?}",
);
}
}
#[test]
fn explain_sidecar_io_errors_surface_in_text_block_and_json() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-io-err");
std::fs::create_dir(&run_dir).unwrap();
let sub = run_dir.join("sub");
std::fs::create_dir(&sub).unwrap();
std::fs::create_dir(sub.join("eisdir.ktstr.json")).unwrap();
let text_out = super::explain_sidecar("run-io-err", Some(tmp.path()), false).unwrap();
assert!(
text_out.contains("walked 1"),
"predicate-matching dir must count as walked: {text_out}",
);
assert!(
text_out.contains("parsed 0 valid"),
"no parsed sidecar — header must report 0 valid: {text_out}",
);
assert!(
text_out.contains("io errors (1):"),
"IO failure must surface in the trailing io-errors block: {text_out}",
);
assert!(
text_out.contains("eisdir.ktstr.json"),
"io-errors block must name the failing path: {text_out}",
);
assert!(
!text_out.contains("corrupt sidecars"),
"no parse failures — corrupt-sidecars block must be \
absent (IO and parse channels are distinct): {text_out}",
);
let json_out = super::explain_sidecar("run-io-err", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&json_out).expect("json output must round-trip parse");
let walk = parsed.get("_walk").expect("must have _walk");
assert_eq!(walk.get("walked").and_then(|v| v.as_u64()), Some(1));
assert_eq!(walk.get("valid").and_then(|v| v.as_u64()), Some(0));
let parse_errs = walk
.get("errors")
.and_then(|e| e.as_array())
.expect("_walk.errors must be present as array");
assert!(
parse_errs.is_empty(),
"no parse failures — _walk.errors must be empty: {json_out}",
);
let io_errs = walk
.get("io_errors")
.and_then(|e| e.as_array())
.expect("_walk.io_errors must be present as array (#124)");
assert_eq!(
io_errs.len(),
1,
"exactly one IO failure expected: {json_out}",
);
let entry = &io_errs[0];
let path = entry
.get("path")
.and_then(|v| v.as_str())
.expect("each io-error entry must carry a string `path`");
assert!(
path.ends_with("eisdir.ktstr.json"),
"io-error path must name the failing file: got {path}",
);
let error = entry
.get("error")
.and_then(|v| v.as_str())
.expect("each io-error entry must carry a string `error`");
assert!(
!error.is_empty(),
"io-error message must not be empty: {json_out}",
);
assert!(
entry.get("enriched_message").is_none(),
"io-error entries must NOT have enriched_message: {json_out}",
);
}
#[test]
fn explain_sidecar_walk_counts_reconcile_across_outcomes() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-mixed-outcomes");
std::fs::create_dir(&run_dir).unwrap();
let valid = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("a-0000000000000000.ktstr.json"),
serde_json::to_string(&valid).unwrap(),
)
.unwrap();
std::fs::write(run_dir.join("b-0000000000000000.ktstr.json"), "garbage{").unwrap();
let sub = run_dir.join("sub");
std::fs::create_dir(&sub).unwrap();
std::fs::create_dir(sub.join("c-0000000000000000.ktstr.json")).unwrap();
let json_out =
super::explain_sidecar("run-mixed-outcomes", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&json_out).expect("json output must round-trip parse");
let walk = parsed.get("_walk").expect("must have _walk");
let walked = walk.get("walked").and_then(|v| v.as_u64()).unwrap();
let valid_n = walk.get("valid").and_then(|v| v.as_u64()).unwrap();
let parse_errs = walk.get("errors").and_then(|e| e.as_array()).unwrap().len() as u64;
let io_errs = walk
.get("io_errors")
.and_then(|e| e.as_array())
.unwrap()
.len() as u64;
assert_eq!(
walked,
valid_n + parse_errs + io_errs,
"walked must equal valid + errors + io_errors — every \
predicate-matching file lands in exactly one bucket. \
walked={walked}, valid={valid_n}, errors={parse_errs}, \
io_errors={io_errs}: {json_out}",
);
assert_eq!(walked, 3, "three predicate-matching entries");
assert_eq!(valid_n, 1, "one valid sidecar");
assert_eq!(parse_errs, 1, "one parse failure");
assert_eq!(io_errs, 1, "one io failure");
}
#[test]
fn explain_sidecar_json_walk_io_errors_empty_when_no_io_failures() {
let tmp = tempfile::tempdir().unwrap();
let run_dir = tmp.path().join("run-clean-io");
std::fs::create_dir(&run_dir).unwrap();
let sc = crate::test_support::SidecarResult::test_fixture();
std::fs::write(
run_dir.join("t-0000000000000000.ktstr.json"),
serde_json::to_string(&sc).unwrap(),
)
.unwrap();
let out = super::explain_sidecar("run-clean-io", Some(tmp.path()), true).unwrap();
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let io_errs = parsed
.get("_walk")
.and_then(|w| w.get("io_errors"))
.and_then(|e| e.as_array())
.expect("_walk.io_errors must be present as array even when empty");
assert!(
io_errs.is_empty(),
"no IO failures — _walk.io_errors must be empty: {out}",
);
}
#[test]
fn explain_sidecar_text_e2e_enrichment_renders_in_corrupt_block() {
let parse_err = crate::test_support::SidecarParseError {
path: std::path::PathBuf::from("/tmp/example-run/sidecar.ktstr.json"),
raw_error: "missing field `host` at line 1 column 100".to_string(),
enriched_message: Some(
"ktstr_test: skipping /tmp/example-run/sidecar.ktstr.json: \
missing field `host` ... — re-run the test"
.to_string(),
),
};
let walk = super::WalkStats {
walked: 1,
valid: 0,
errors: vec![parse_err],
io_errors: Vec::new(),
};
let out = super::render_explain_sidecar_text(&[], &walk);
assert!(
out.contains("corrupt sidecars (1):"),
"non-empty errors must surface the trailing block: {out}",
);
assert!(
out.contains(" error: missing field `host`"),
"raw serde error must render verbatim: {out}",
);
assert!(
out.contains(" enriched: "),
"enriched line must render below the raw error: {out}",
);
let error_pos = out
.find(" error: ")
.expect("error: substring must be present");
let enriched_pos = out
.find(" enriched: ")
.expect("enriched: substring must be present");
assert!(
error_pos < enriched_pos,
"raw `error:` line must precede `enriched:` line in \
the rendered text — operator reads grep-friendly raw \
first, then human remediation: {out}",
);
}
#[test]
fn explain_sidecar_json_e2e_enrichment_renders_in_walk_errors() {
let prose = "ktstr_test: skipping path: missing field `host` \
— re-run the test to regenerate";
let parse_err = crate::test_support::SidecarParseError {
path: std::path::PathBuf::from("/tmp/example-run/sidecar.ktstr.json"),
raw_error: "missing field `host` at line 1 column 100".to_string(),
enriched_message: Some(prose.to_string()),
};
let walk = super::WalkStats {
walked: 1,
valid: 0,
errors: vec![parse_err],
io_errors: Vec::new(),
};
let out = super::render_explain_sidecar_json(&[], &walk);
let parsed: serde_json::Value =
serde_json::from_str(&out).expect("json output must round-trip parse");
let errors = parsed
.get("_walk")
.and_then(|w| w.get("errors"))
.and_then(|e| e.as_array())
.expect("_walk.errors must be a JSON array");
assert_eq!(
errors.len(),
1,
"synthetic input has exactly one parse error: {out}",
);
let entry = &errors[0];
let enriched = entry
.get("enriched_message")
.and_then(|v| v.as_str())
.expect("enriched_message must be a JSON string for enriched failures");
assert_eq!(
enriched, prose,
"enriched_message must round-trip the catalog prose verbatim: {out}",
);
let raw = entry
.get("error")
.and_then(|v| v.as_str())
.expect("error must be a JSON string");
assert!(
raw.contains("missing field"),
"raw error must round-trip verbatim alongside enriched: {out}",
);
}
#[test]
fn explain_sidecar_rejects_parent_dir_traversal_in_run() {
let tmp = tempfile::tempdir().unwrap();
for traversal in ["../escape", "subdir/../../escape"] {
let err = super::explain_sidecar(traversal, Some(tmp.path()), false).expect_err(
"path-traversal `..` in --run must be rejected before \
resolution",
);
let msg = format!("{err:#}");
assert!(
msg.contains("path-traversal"),
"rejection message must name the cause for {traversal}: \
{msg}",
);
assert!(
msg.contains(traversal),
"rejection message must include the offending input \
({traversal}): {msg}",
);
}
}
#[test]
fn explain_sidecar_rejects_absolute_path_in_run() {
let tmp = tempfile::tempdir().unwrap();
let err = super::explain_sidecar("/etc/passwd", Some(tmp.path()), false)
.expect_err("absolute path in --run must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("path-traversal"),
"absolute-path rejection must name the cause: {msg}",
);
}
#[test]
fn explain_sidecar_rejects_empty_run() {
let tmp = tempfile::tempdir().unwrap();
let err = super::explain_sidecar("", Some(tmp.path()), false)
.expect_err("empty --run must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("must not be empty"),
"empty-string rejection must name the cause: {msg}",
);
}
#[test]
fn explain_sidecar_rejects_curdir_run() {
let tmp = tempfile::tempdir().unwrap();
let err = super::explain_sidecar(".", Some(tmp.path()), false)
.expect_err("`.` --run must be rejected");
let msg = format!("{err:#}");
assert!(
msg.contains("path-traversal"),
"`.` rejection must surface the pool-root-aliasing \
cause: {msg}",
);
}
#[test]
fn explain_sidecar_accepts_bare_run_key_after_traversal_check() {
let tmp = tempfile::tempdir().unwrap();
let err = super::explain_sidecar("6.14-abc1234", Some(tmp.path()), false)
.expect_err("non-existent run must surface the not-found error");
let msg = format!("{err:#}");
assert!(
msg.contains("not found"),
"bare run key must reach the not-found gate, not the \
traversal gate: {msg}",
);
assert!(
!msg.contains("path-traversal"),
"bare run key must NOT trip the traversal check: {msg}",
);
}
#[test]
fn spinner_drop_without_finish_does_not_panic_in_non_tty() {
let sp = Spinner::start("test");
drop(sp);
}
#[test]
fn spinner_finish_then_drop_is_idempotent() {
let sp = Spinner::start("test");
sp.finish("done");
}
#[test]
#[cfg(debug_assertions)]
#[should_panic(expected = "Spinner::start called while another Spinner is already active")]
fn spinner_nested_start_panics_under_debug_assertions() {
let _outer = Spinner::start("outer");
let _inner = Spinner::start("inner");
}
#[test]
fn spinner_start_releases_guard_on_drop() {
{
let _sp = Spinner::start("first");
}
let _sp = Spinner::start("second");
}
#[test]
fn drain_lines_lossy_eof_terminated_happy_path() {
let input: &[u8] = b"alpha\nbeta\ngamma\n";
let mut seen = Vec::new();
let captured = drain_lines_lossy(std::io::Cursor::new(input), |line| {
seen.push(line.to_string())
})
.unwrap();
assert_eq!(captured, vec!["alpha", "beta", "gamma"]);
assert_eq!(seen, captured);
}
#[test]
fn drain_lines_lossy_strips_crlf() {
let input: &[u8] = b"one\r\ntwo\r\nthree\r\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["one", "two", "three"]);
}
#[test]
fn drain_lines_lossy_non_utf8_bytes_survive_via_replacement() {
let input: &[u8] = b"valid\n\xffbroken\ntail\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["valid", "\u{FFFD}broken", "tail"]);
}
#[test]
fn drain_lines_lossy_empty_stream_yields_empty_vec() {
let input: &[u8] = b"";
let mut calls = 0usize;
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| calls += 1).unwrap();
assert!(captured.is_empty());
assert_eq!(calls, 0);
}
#[test]
fn drain_lines_lossy_single_line_without_trailing_newline() {
let input: &[u8] = b"no-newline";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["no-newline"]);
}
#[test]
fn drain_lines_lossy_lone_cr_at_eof_is_preserved() {
let input: &[u8] = b"foo\r";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["foo\r"]);
}
#[test]
fn drain_lines_lossy_interior_cr_is_preserved() {
let input: &[u8] = b"ab\rcd\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["ab\rcd"]);
}
#[test]
fn drain_lines_lossy_propagates_io_error_after_first_read() {
use std::io::{BufReader, ErrorKind, Read};
struct FlakyReader {
calls: usize,
}
impl Read for FlakyReader {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.calls += 1;
match self.calls {
1 => {
let data = b"line1\n";
let n = data.len().min(buf.len());
buf[..n].copy_from_slice(&data[..n]);
Ok(n)
}
_ => Err(std::io::Error::new(ErrorKind::BrokenPipe, "pipe closed")),
}
}
}
let err = drain_lines_lossy(BufReader::new(FlakyReader { calls: 0 }), |_| {})
.expect_err("flaky reader must surface Err");
assert_eq!(err.kind(), ErrorKind::BrokenPipe);
}
#[test]
fn drain_lines_lossy_mixed_lf_and_crlf() {
let input: &[u8] = b"lf-line\ncrlf-line\r\nlf-again\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["lf-line", "crlf-line", "lf-again"]);
}
#[test]
fn drain_lines_lossy_empty_lines_lf() {
let input: &[u8] = b"a\n\nb\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["a", "", "b"]);
}
#[test]
fn drain_lines_lossy_empty_lines_crlf() {
let input: &[u8] = b"\r\n\r\n";
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_| {}).unwrap();
assert_eq!(captured, vec!["", ""]);
}
#[test]
fn drain_lines_lossy_callback_fires_once_per_line_in_order() {
let input: &[u8] = b"a\nb\nc\n";
let lens = std::cell::RefCell::new(Vec::<usize>::new());
let captured = drain_lines_lossy(std::io::Cursor::new(input), |_line| {
let mut v = lens.borrow_mut();
let current = v.len();
v.push(current);
})
.unwrap();
assert_eq!(captured, vec!["a", "b", "c"]);
assert_eq!(lens.into_inner(), vec![0, 1, 2]);
}
#[test]
fn run_make_with_output_surfaces_actionable_error_when_kernel_dir_missing() {
let tmp = tempfile::TempDir::new().unwrap();
let missing = tmp.path().join("nonexistent_child");
let err = run_make_with_output(&missing, &["foo"], None)
.expect_err("nonexistent kernel_dir must surface a spawn failure");
let rendered = format!("{err:#}");
assert!(
rendered.contains("spawn make foo"),
"expected `spawn make foo` context layer, got: {rendered}"
);
let has_not_found = err.chain().any(|e| {
e.downcast_ref::<std::io::Error>()
.is_some_and(|io| io.kind() == std::io::ErrorKind::NotFound)
});
assert!(
has_not_found,
"expected underlying io::Error with ErrorKind::NotFound in anyhow chain, \
got: {rendered}"
);
}
#[test]
fn run_make_with_output_drains_high_volume_failing_make_without_deadlock() {
if resolve_in_path(std::path::Path::new("make")).is_none() {
skip!("make not in PATH");
}
let dir = tempfile::TempDir::new().unwrap();
let stdout_chunk: String = "S".repeat(1024);
let stderr_chunk: String = "E".repeat(1024);
let mut recipe = String::new();
for _ in 0..100 {
recipe.push_str(&format!("\t@printf '%s\\n' '{stdout_chunk}'\n"));
recipe.push_str(&format!("\t@printf '%s\\n' '{stderr_chunk}' >&2\n"));
}
let makefile = format!("default:\n{recipe}\t@false\n");
std::fs::write(dir.path().join("Makefile"), makefile).unwrap();
let err = run_make_with_output(dir.path(), &["default"], None)
.expect_err("non-zero exit must surface as Err");
let rendered = format!("{err:#}");
assert!(
rendered.contains("make default failed"),
"expected `make default failed` wording from bail!, got: {rendered}"
);
}
use crate::test_support::test_helpers::capture_stderr as capture_test_stderr;
#[test]
fn run_make_with_output_merges_stderr_into_captured_output() {
if resolve_in_path(std::path::Path::new("make")).is_none() {
skip!("make not in PATH");
}
let dir = tempfile::TempDir::new().unwrap();
let stdout_marker = "KTSTR_STDOUT_MARKER_e7f9";
let stderr_marker = "KTSTR_STDERR_MARKER_a1b2";
let makefile = format!(
"default:\n\
\t@printf '%s\\n' '{stdout_marker}'\n\
\t@printf '%s\\n' '{stderr_marker}' >&2\n\
\t@false\n"
);
std::fs::write(dir.path().join("Makefile"), makefile).unwrap();
let (result, captured_bytes) =
capture_test_stderr(|| run_make_with_output(dir.path(), &["default"], None));
let err = result.expect_err("non-zero exit must surface as Err");
let rendered = format!("{err:#}");
assert!(
rendered.contains("make default failed"),
"expected `make default failed` wording, got: {rendered}"
);
let captured = String::from_utf8_lossy(&captured_bytes);
assert!(
captured.contains(stdout_marker),
"stdout marker missing from captured output (eprintln'd via failure path) — \
expected `{stdout_marker}` in: {captured:?}"
);
assert!(
captured.contains(stderr_marker),
"stderr marker missing from captured output — proves the merge is BROKEN: \
stderr did not reach the captured Vec. expected `{stderr_marker}` in: {captured:?}"
);
}
#[test]
fn run_make_with_output_drains_stderr_only_high_volume_without_deadlock() {
if resolve_in_path(std::path::Path::new("make")).is_none() {
skip!("make not in PATH");
}
let dir = tempfile::TempDir::new().unwrap();
let chunk: String = "X".repeat(1024);
let mut recipe = String::new();
for _ in 0..128 {
recipe.push_str(&format!("\t@printf '%s\\n' '{chunk}' >&2\n"));
}
let makefile = format!("default:\n{recipe}\t@false\n");
std::fs::write(dir.path().join("Makefile"), makefile).unwrap();
let err = run_make_with_output(dir.path(), &["default"], None)
.expect_err("non-zero exit must surface as Err");
let rendered = format!("{err:#}");
assert!(
rendered.contains("make default failed"),
"expected `make default failed` wording, got: {rendered}"
);
}
#[test]
fn run_make_with_output_releases_fds_on_spawn_failure() {
let proc_fd = std::path::Path::new("/proc/self/fd");
if !proc_fd.is_dir() {
skip!("/proc/self/fd not available");
}
let count_fds = || -> usize {
std::fs::read_dir(proc_fd)
.expect("read /proc/self/fd")
.filter_map(|e| e.ok())
.count()
};
let tmp = tempfile::TempDir::new().unwrap();
let missing = tmp.path().join("nonexistent_child");
let _ = run_make_with_output(&missing, &["foo"], None);
let before = count_fds();
const FD_LEAK_ITERATIONS: u32 = 128;
for _ in 0..FD_LEAK_ITERATIONS {
let _ = run_make_with_output(&missing, &["foo"], None);
}
let after = count_fds();
assert!(
after <= before,
"fd leak on spawn failure: {before} -> {after} \
({FD_LEAK_ITERATIONS} calls, expected no growth)"
);
}
fn spawn_sleeping_child(seconds: u64) -> (std::process::Child, u32) {
let child = std::process::Command::new("sh")
.arg("-c")
.arg(format!("sleep {seconds}"))
.spawn()
.expect("spawn sh -c sleep N");
let pid = child.id();
(child, pid)
}
fn pid_is_alive(pid: u32) -> bool {
use nix::sys::signal::kill;
use nix::unistd::Pid;
kill(Pid::from_raw(pid as i32), None).is_ok()
}
#[test]
fn poll_child_with_timeout_bails_and_reaps_on_timeout() {
let (child, pid) = spawn_sleeping_child(60);
assert!(
pid_is_alive(pid),
"fixture precondition: spawned child pid {pid} must be \
alive before the helper runs",
);
let start = std::time::Instant::now();
let result = super::poll_child_with_timeout(
child,
Duration::from_millis(100),
Duration::from_millis(1),
"make wedged-target",
);
let elapsed = start.elapsed();
let err = result.expect_err("timed-out child must surface as Err");
let rendered = format!("{err:#}");
assert!(
rendered.contains("make wedged-target"),
"timeout bail must include the label parameter; got: {rendered}",
);
assert!(
rendered.contains("timed out after"),
"timeout bail must include the literal `timed out after` \
phrase so CI log scrapers can pattern-match wedged builds; \
got: {rendered}",
);
assert!(
elapsed < Duration::from_secs(5),
"helper must return within a small multiple of the \
configured timeout (100ms); took {elapsed:?} which \
suggests the deadline check is broken",
);
let zombie_check_deadline = std::time::Instant::now() + Duration::from_secs(1);
loop {
if !pid_is_alive(pid) {
break;
}
if std::time::Instant::now() >= zombie_check_deadline {
panic!(
"child pid {pid} still alive 1s after helper returned — \
timeout path leaked a zombie (missing child.wait() \
after child.kill()?)",
);
}
std::thread::sleep(Duration::from_millis(10));
}
}
#[test]
fn poll_child_with_timeout_succeeds_when_child_exits_clean() {
let child = std::process::Command::new("true")
.spawn()
.expect("spawn true");
let pid = child.id();
let result = super::poll_child_with_timeout(
child,
Duration::from_secs(5),
Duration::from_millis(1),
"make happy-target",
);
assert!(
result.is_ok(),
"child that exits 0 must surface as Ok; got: {result:?}",
);
let zombie_check_deadline = std::time::Instant::now() + Duration::from_secs(1);
loop {
if !pid_is_alive(pid) {
break;
}
if std::time::Instant::now() >= zombie_check_deadline {
panic!(
"child pid {pid} still alive 1s after Ok return — \
successful-exit path leaked a zombie",
);
}
std::thread::sleep(Duration::from_millis(10));
}
}
#[test]
fn poll_child_with_timeout_surfaces_nonzero_exit_as_err() {
let child = std::process::Command::new("false")
.spawn()
.expect("spawn false");
let result = super::poll_child_with_timeout(
child,
Duration::from_secs(5),
Duration::from_millis(1),
"make broken-target",
);
let err = result.expect_err("child that exits non-zero must surface as Err");
let rendered = format!("{err:#}");
assert!(
rendered.contains("make broken-target"),
"non-zero-exit bail must include the label; got: {rendered}",
);
assert!(
rendered.contains("failed"),
"non-zero-exit bail must use the `failed` wording so it is \
distinguishable from the timeout-path's `timed out after`; \
got: {rendered}",
);
assert!(
!rendered.contains("timed out"),
"non-zero-exit bail must NOT contain `timed out` — that \
phrase belongs to the deadline-fired path only; got: {rendered}",
);
}
#[test]
fn cli_resolve_flags_none_returns_none() {
assert!(resolve_flags(None).unwrap().is_none());
}
#[test]
fn cli_resolve_flags_valid_single() {
let result = resolve_flags(Some(vec!["llc".into()])).unwrap().unwrap();
assert_eq!(result, vec!["llc"]);
}
#[test]
fn cli_resolve_flags_valid_multiple() {
let result = resolve_flags(Some(vec!["llc".into(), "borrow".into()]))
.unwrap()
.unwrap();
assert_eq!(result, vec!["llc", "borrow"]);
}
#[test]
fn cli_resolve_flags_all_valid() {
let all: Vec<String> = flags::ALL.iter().map(|s| s.to_string()).collect();
let result = resolve_flags(Some(all)).unwrap().unwrap();
assert_eq!(result.len(), flags::ALL.len());
}
#[test]
fn cli_resolve_flags_unknown_errors() {
let err = resolve_flags(Some(vec!["nonexistent".into()])).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("unknown flag: 'nonexistent'"), "{msg}");
assert!(msg.contains("valid flags:"), "{msg}");
}
#[test]
fn cli_resolve_flags_mixed_valid_and_unknown_errors() {
let err = resolve_flags(Some(vec!["llc".into(), "bogus".into()])).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("unknown flag: 'bogus'"), "{msg}");
}
#[test]
fn cli_parse_work_type_none_returns_none() {
assert!(parse_work_type(None).unwrap().is_none());
}
#[test]
fn cli_parse_work_type_cpu_spin() {
let wt = parse_work_type(Some("CpuSpin")).unwrap().unwrap();
assert_eq!(wt.name(), "CpuSpin");
}
#[test]
fn cli_parse_work_type_yield_heavy() {
let wt = parse_work_type(Some("YieldHeavy")).unwrap().unwrap();
assert_eq!(wt.name(), "YieldHeavy");
}
#[test]
fn cli_parse_work_type_all_valid() {
for &name in WorkType::ALL_NAMES {
if name == "Sequence" || name == "Custom" {
continue;
}
let wt = parse_work_type(Some(name)).unwrap().unwrap();
assert_eq!(wt.name(), name);
}
}
#[test]
fn cli_parse_work_type_unknown_errors() {
let err = parse_work_type(Some("Nonexistent")).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("unknown work type: 'Nonexistent'"), "{msg}");
assert!(msg.contains("valid types:"), "{msg}");
}
#[test]
fn cli_parse_work_type_sequence_errors() {
let err = parse_work_type(Some("Sequence")).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("unknown work type: 'Sequence'"), "{msg}");
}
#[test]
fn cli_parse_work_type_case_sensitive() {
let err = parse_work_type(Some("cpuspin")).unwrap_err();
assert!(format!("{err}").contains("unknown work type:"));
}
#[test]
fn cli_filter_scenarios_no_filter_returns_all() {
let scenarios = scenario::all_scenarios();
let result = filter_scenarios(&scenarios, None).unwrap();
assert_eq!(result.len(), scenarios.len());
}
#[test]
fn cli_filter_scenarios_matching_filter() {
let scenarios = scenario::all_scenarios();
let first_name = scenarios[0].name;
let result = filter_scenarios(&scenarios, Some(first_name)).unwrap();
assert!(!result.is_empty());
for s in &result {
assert!(s.name.contains(first_name));
}
}
#[test]
fn cli_filter_scenarios_no_match_errors() {
let scenarios = scenario::all_scenarios();
let err = filter_scenarios(&scenarios, Some("__nonexistent_scenario_xyz__")).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("no scenarios matched"), "{msg}");
assert!(msg.contains("ktstr list"), "{msg}");
}
#[test]
fn cli_filter_scenarios_partial_match() {
let scenarios = scenario::all_scenarios();
let result = filter_scenarios(&scenarios, Some("steady")).unwrap();
assert!(!result.is_empty());
}
#[test]
fn cli_build_run_config_defaults() {
let config = build_run_config(
"/sys/fs/cgroup/ktstr".into(),
20,
4,
None,
false,
None,
false,
None,
None,
);
assert_eq!(config.parent_cgroup, "/sys/fs/cgroup/ktstr");
assert_eq!(config.duration, Duration::from_secs(20));
assert_eq!(config.workers_per_cgroup, 4);
assert!(config.active_flags.is_none());
assert!(!config.repro);
assert!(config.probe_stack.is_none());
assert!(!config.auto_repro);
assert!(config.kernel_dir.is_none());
assert!(config.work_type_override.is_none());
}
#[test]
fn cli_build_run_config_all_fields() {
let config = build_run_config(
"/sys/fs/cgroup/test".into(),
30,
8,
Some(vec!["llc", "borrow"]),
true,
Some("do_enqueue_task".into()),
true,
Some("/usr/src/linux".into()),
Some(WorkType::Mixed),
);
assert_eq!(config.parent_cgroup, "/sys/fs/cgroup/test");
assert_eq!(config.duration, Duration::from_secs(30));
assert_eq!(config.workers_per_cgroup, 8);
let af = config.active_flags.unwrap();
assert_eq!(af, vec!["llc", "borrow"]);
assert!(config.repro);
assert_eq!(config.probe_stack.as_deref(), Some("do_enqueue_task"));
assert!(config.auto_repro);
assert_eq!(config.kernel_dir.as_deref(), Some("/usr/src/linux"));
assert!(config.work_type_override.is_some());
}
#[test]
fn cli_build_run_config_duration_converts() {
let config = build_run_config("cg".into(), 60, 1, None, false, None, false, None, None);
assert_eq!(config.duration, Duration::from_secs(60));
}
#[test]
fn cli_all_scenarios_non_empty() {
let scenarios = scenario::all_scenarios();
assert!(!scenarios.is_empty());
}
#[test]
fn cli_all_scenarios_have_names() {
for s in &scenario::all_scenarios() {
assert!(!s.name.is_empty());
assert!(!s.category.is_empty());
}
}
#[test]
fn cli_has_sched_ext_present() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(
tmp.path().join(".config"),
"CONFIG_SOMETHING=y\nCONFIG_SCHED_CLASS_EXT=y\nCONFIG_OTHER=m\n",
)
.unwrap();
assert!(has_sched_ext(tmp.path()));
}
#[test]
fn cli_has_sched_ext_absent() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(
tmp.path().join(".config"),
"CONFIG_SOMETHING=y\nCONFIG_OTHER=m\n",
)
.unwrap();
assert!(!has_sched_ext(tmp.path()));
}
#[test]
fn cli_has_sched_ext_module_not_builtin() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(tmp.path().join(".config"), "CONFIG_SCHED_CLASS_EXT=m\n").unwrap();
assert!(!has_sched_ext(tmp.path()));
}
#[test]
fn cli_has_sched_ext_commented_out() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(
tmp.path().join(".config"),
"# CONFIG_SCHED_CLASS_EXT is not set\n",
)
.unwrap();
assert!(!has_sched_ext(tmp.path()));
}
#[test]
fn cli_has_sched_ext_no_config_file() {
let tmp = tempfile::TempDir::new().unwrap();
assert!(!has_sched_ext(tmp.path()));
}
#[test]
fn cli_has_sched_ext_empty_config() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(tmp.path().join(".config"), "").unwrap();
assert!(!has_sched_ext(tmp.path()));
}
#[test]
fn cli_build_make_args_single_core() {
let args = build_make_args(1);
assert_eq!(args, vec!["-j1", "KCFLAGS=-Wno-error"]);
}
#[test]
fn cli_build_make_args_multi_core() {
let args = build_make_args(16);
assert_eq!(args, vec!["-j16", "KCFLAGS=-Wno-error"]);
}
#[test]
fn cli_analyze_sidecars_empty_dir() {
let tmp = tempfile::TempDir::new().unwrap();
let result = crate::test_support::analyze_sidecars(Some(tmp.path()));
assert!(result.is_empty());
}
#[test]
fn cli_analyze_sidecars_nonexistent_dir() {
let result =
crate::test_support::analyze_sidecars(Some(std::path::Path::new("/nonexistent/path")));
assert!(result.is_empty());
}
#[test]
fn critical_options_are_in_embedded_kconfig() {
let fragment = crate::EMBEDDED_KCONFIG;
for &(option, _) in VALIDATE_CONFIG_CRITICAL {
let enabled = format!("{option}=y");
assert!(
fragment.lines().any(|l| l.trim() == enabled),
"VALIDATE_CONFIG_CRITICAL lists {option:?} but ktstr.kconfig does not \
enable it; either add `{option}=y` to the fragment or drop the entry \
from VALIDATE_CONFIG_CRITICAL",
);
}
}
#[test]
fn validate_kernel_config_all_present() {
let dir = tempfile::TempDir::new().unwrap();
std::fs::write(
dir.path().join(".config"),
"CONFIG_SCHED_CLASS_EXT=y\n\
CONFIG_DEBUG_INFO_BTF=y\n\
CONFIG_BPF_SYSCALL=y\n\
CONFIG_FTRACE=y\n\
CONFIG_KPROBE_EVENTS=y\n\
CONFIG_BPF_EVENTS=y\n",
)
.unwrap();
assert!(validate_kernel_config(dir.path()).is_ok());
}
#[test]
fn validate_kernel_config_missing_btf() {
let dir = tempfile::TempDir::new().unwrap();
std::fs::write(
dir.path().join(".config"),
"CONFIG_SCHED_CLASS_EXT=y\n\
CONFIG_BPF_SYSCALL=y\n\
CONFIG_FTRACE=y\n\
CONFIG_KPROBE_EVENTS=y\n\
CONFIG_BPF_EVENTS=y\n",
)
.unwrap();
let err = validate_kernel_config(dir.path()).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("CONFIG_DEBUG_INFO_BTF"), "got: {msg}");
}
#[test]
fn validate_kernel_config_missing_multiple() {
let dir = tempfile::TempDir::new().unwrap();
std::fs::write(dir.path().join(".config"), "CONFIG_BPF_SYSCALL=y\n").unwrap();
let err = validate_kernel_config(dir.path()).unwrap_err();
let msg = format!("{err}");
assert!(msg.contains("CONFIG_SCHED_CLASS_EXT"), "got: {msg}");
assert!(msg.contains("CONFIG_DEBUG_INFO_BTF"), "got: {msg}");
}
#[test]
fn validate_kernel_config_no_config_file() {
let dir = tempfile::TempDir::new().unwrap();
assert!(validate_kernel_config(dir.path()).is_err());
}
#[test]
fn validate_kernel_config_trim_handles_crlf_and_trailing_whitespace() {
let dir = tempfile::TempDir::new().unwrap();
std::fs::write(
dir.path().join(".config"),
"CONFIG_SCHED_CLASS_EXT=y\r\n\
CONFIG_DEBUG_INFO_BTF=y \n\
CONFIG_BPF_SYSCALL=y\r\n\
CONFIG_FTRACE=y \n\
CONFIG_KPROBE_EVENTS=y\r\n\
CONFIG_BPF_EVENTS=y \n",
)
.unwrap();
let result = validate_kernel_config(dir.path());
assert!(
result.is_ok(),
"validate_kernel_config must trim per-line whitespace \
before the HashSet probe — a regression dropping \
`.map(str::trim)` would treat \\r-suffixed and \
trailing-space lines as distinct from the bare \
`CONFIG_X=y` form and report every option as \
missing; got: {result:?}",
);
}
#[test]
fn configure_kernel_appends_missing() {
let dir = tempfile::TempDir::new().unwrap();
std::fs::write(dir.path().join(".config"), "CONFIG_BPF=y\n").unwrap();
std::fs::write(dir.path().join("Makefile"), "olddefconfig:\n\t@true\n").unwrap();
let fragment = "CONFIG_EXTRA=y\n";
configure_kernel(dir.path(), fragment).unwrap();
let config = std::fs::read_to_string(dir.path().join(".config")).unwrap();
assert!(config.contains("CONFIG_EXTRA=y"));
assert!(config.contains("CONFIG_BPF=y"));
}
#[test]
fn configure_kernel_skips_when_present() {
let dir = tempfile::TempDir::new().unwrap();
let initial = "CONFIG_BPF=y\nCONFIG_EXTRA=y\n";
std::fs::write(dir.path().join(".config"), initial).unwrap();
let fragment = "CONFIG_EXTRA=y\n";
configure_kernel(dir.path(), fragment).unwrap();
let config = std::fs::read_to_string(dir.path().join(".config")).unwrap();
assert_eq!(config, initial);
}
#[test]
fn configure_kernel_rejects_numeric_prefix_false_match() {
let dir = tempfile::TempDir::new().unwrap();
let initial = "CONFIG_NR_CPUS=128\n";
std::fs::write(dir.path().join(".config"), initial).unwrap();
std::fs::write(dir.path().join("Makefile"), "olddefconfig:\n\t@true\n").unwrap();
let fragment = "CONFIG_NR_CPUS=1\n";
configure_kernel(dir.path(), fragment).unwrap();
let config = std::fs::read_to_string(dir.path().join(".config")).unwrap();
assert!(
config.lines().any(|l| l.trim() == "CONFIG_NR_CPUS=1"),
"CONFIG_NR_CPUS=1 must be appended as its own line: {config:?}"
);
assert!(
config.lines().any(|l| l.trim() == "CONFIG_NR_CPUS=128"),
"original CONFIG_NR_CPUS=128 must be preserved: {config:?}"
);
}
#[test]
fn all_fragment_lines_present_exact_match() {
let config = "CONFIG_FOO=y\nCONFIG_BAR=m\n";
assert!(all_fragment_lines_present("CONFIG_FOO=y\n", config));
assert!(all_fragment_lines_present("CONFIG_BAR=m\n", config));
assert!(all_fragment_lines_present(
"CONFIG_FOO=y\nCONFIG_BAR=m\n",
config
));
}
#[test]
fn all_fragment_lines_present_numeric_prefix_not_present() {
let config = "CONFIG_NR_CPUS=128\n";
assert!(!all_fragment_lines_present("CONFIG_NR_CPUS=1\n", config));
assert!(!all_fragment_lines_present("CONFIG_NR_CPUS=12\n", config));
}
#[test]
fn all_fragment_lines_present_disable_directive_participates() {
let config = "CONFIG_BPF=y\n";
assert!(!all_fragment_lines_present(
"# CONFIG_BPF is not set\n",
config
));
}
#[test]
fn all_fragment_lines_present_empty_lines_skipped() {
let config = "CONFIG_FOO=y\n";
assert!(all_fragment_lines_present("\n\nCONFIG_FOO=y\n\n", config));
}
#[test]
fn resolve_kernel_parallelism_unset_returns_host_default() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::remove(crate::KTSTR_KERNEL_PARALLELISM_ENV);
let n = super::resolve_kernel_parallelism();
assert!(
n >= 1,
"fallback must yield at least 1; got {n} which would defeat \
ThreadPoolBuilder::num_threads",
);
}
#[test]
fn resolve_kernel_parallelism_valid_override_wins() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::set(crate::KTSTR_KERNEL_PARALLELISM_ENV, "4");
assert_eq!(
super::resolve_kernel_parallelism(),
4,
"valid usize env value must override the host-CPU default; \
a regression that ignored the env var would yield \
available_parallelism() instead",
);
}
#[test]
fn resolve_kernel_parallelism_zero_falls_through_to_default() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::set(crate::KTSTR_KERNEL_PARALLELISM_ENV, "0");
let n = super::resolve_kernel_parallelism();
assert!(
n >= 1,
"zero env value must fall through to host-CPU default \
(always ≥ 1); got {n} which would crash the pool builder",
);
}
#[test]
fn resolve_kernel_parallelism_unparseable_falls_through_to_default() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::set(crate::KTSTR_KERNEL_PARALLELISM_ENV, "abc");
let n = super::resolve_kernel_parallelism();
assert!(
n >= 1,
"unparseable env value must fall through to host-CPU \
default (always ≥ 1); got {n}",
);
}
#[test]
fn resolve_kernel_parallelism_negative_falls_through_to_default() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::set(crate::KTSTR_KERNEL_PARALLELISM_ENV, "-1");
let n = super::resolve_kernel_parallelism();
assert!(
n >= 1,
"negative env value must fall through to host-CPU \
default (usize::from_str rejects leading `-`); got {n}",
);
}
#[test]
fn resolve_kernel_parallelism_trims_surrounding_whitespace() {
use crate::test_support::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let _guard = EnvVarGuard::set(crate::KTSTR_KERNEL_PARALLELISM_ENV, " 8 ");
assert_eq!(
super::resolve_kernel_parallelism(),
8,
"trimmed env value must parse; whitespace tolerance \
matches the rest of the KTSTR_* env-reading suite",
);
}
#[test]
fn ktstr_kernel_parallelism_env_const_matches_literal() {
assert_eq!(
crate::KTSTR_KERNEL_PARALLELISM_ENV,
"KTSTR_KERNEL_PARALLELISM",
);
}
#[test]
fn resolve_in_path_finds_sh() {
let result = resolve_in_path(std::path::Path::new("sh"));
assert!(result.is_some(), "sh should be in PATH");
assert!(result.unwrap().exists());
}
#[test]
fn resolve_in_path_nonexistent() {
let result = resolve_in_path(std::path::Path::new("nonexistent_binary_xyz_12345"));
assert!(result.is_none());
}
#[test]
fn resolve_include_files_single_file() {
let dir = tempfile::TempDir::new().unwrap();
let file = dir.path().join("test.txt");
std::fs::write(&file, "hello").unwrap();
let result = resolve_include_files(&[file]).unwrap();
assert_eq!(result.len(), 1);
assert!(result[0].0.contains("test.txt"));
}
#[test]
fn resolve_include_files_nonexistent() {
let result = resolve_include_files(&[std::path::PathBuf::from("/nonexistent/file.txt")]);
assert!(result.is_err());
}
#[test]
fn resolve_include_files_bare_name_in_path() {
let result = resolve_include_files(&[std::path::PathBuf::from("sh")]);
assert!(result.is_ok());
let entries = result.unwrap();
assert_eq!(entries.len(), 1);
assert!(entries[0].0.contains("sh"));
}
#[test]
fn kernel_list_stale_kconfig_json_human_parity() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
fn metadata_with_hash(hash: Option<&str>) -> KernelMetadata {
KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
.with_ktstr_kconfig_hash(hash.map(str::to_string))
}
let cases: &[(&str, Option<&str>, &str)] = &[
("matches", Some("same"), "same"),
("stale", Some("old"), "new"),
("untracked", None, "anything"),
];
for &(label, entry_hash, current_hash) in cases {
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src = tempfile::TempDir::new().unwrap();
let image = src.path().join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let meta = metadata_with_hash(entry_hash);
let entry = cache
.store(label, &CacheArtifacts::new(&image), &meta)
.unwrap();
let json_stale = entry.kconfig_status(current_hash).is_stale();
let human_row = format_entry_row(&entry, current_hash, &[]);
let human_stale = human_row.contains("stale kconfig");
assert_eq!(
json_stale, human_stale,
"kernel_list JSON/human stale-kconfig disagreement on `{label}` \
(entry_hash={entry_hash:?}, current_hash={current_hash:?}); \
json_stale={json_stale}, human_row={human_row:?}"
);
}
}
#[test]
fn version_prefix_stable_release() {
assert_eq!(version_prefix("6.14.2").as_deref(), Some("6.14"));
assert_eq!(version_prefix("6.12.81").as_deref(), Some("6.12"));
assert_eq!(version_prefix("7.0").as_deref(), Some("7.0"));
}
#[test]
fn version_prefix_strips_rc_suffix() {
assert_eq!(version_prefix("6.15-rc1").as_deref(), Some("6.15"));
assert_eq!(version_prefix("6.15-rc3").as_deref(), Some("6.15"));
assert_eq!(version_prefix("7.0-rc1").as_deref(), Some("7.0"));
}
#[test]
fn version_prefix_strips_linux_next_suffix() {
assert_eq!(
version_prefix("6.16-rc2-next-20260420").as_deref(),
Some("6.16"),
);
assert_eq!(
version_prefix("7.1-rc1-next-20260501").as_deref(),
Some("7.1"),
);
}
#[test]
fn version_prefix_rejects_no_dot() {
assert!(version_prefix("abc").is_none());
assert!(version_prefix("6").is_none());
assert!(version_prefix("").is_none());
}
#[test]
fn version_prefix_rejects_non_numeric_minor() {
assert!(version_prefix("6.x").is_none());
assert!(version_prefix("6.-rc1").is_none());
assert!(version_prefix("6.").is_none());
}
#[test]
fn is_eol_empty_active_prefixes_returns_false() {
assert!(!is_eol("6.14.2", &[]));
}
#[test]
fn is_eol_prefix_in_active_list_returns_false() {
assert!(!is_eol("6.14.2", &["6.14".to_string()]));
}
#[test]
fn is_eol_prefix_absent_from_active_list_returns_true() {
assert!(is_eol(
"5.10.200",
&["6.14".to_string(), "6.12".to_string()],
));
}
#[test]
fn is_eol_unparseable_version_returns_false() {
assert!(!is_eol("abc", &["6.14".to_string()]));
}
#[test]
fn is_eol_rc_suffix_mismatch_does_not_flag() {
let active = ["6.15".to_string()];
assert!(!is_eol("6.15-rc1", &active));
assert!(!is_eol("6.15-rc4", &active));
}
#[test]
fn is_eol_linux_next_matches_mainline_prefix() {
let active = ["6.16".to_string()];
assert!(!is_eol("6.16-rc2-next-20260420", &active));
}
#[test]
fn is_eol_brand_new_major_matches_rc_variant() {
assert!(!is_eol("7.0", &["7.0".to_string()]));
assert!(!is_eol("7.0-rc1", &["7.0".to_string()]));
}
#[test]
fn is_eol_brand_new_zero_release_in_active_list() {
let active = ["7.0".to_string()];
assert!(
!is_eol("7.0", &active),
"brand-new 7.0 release matching active prefix 7.0 must not be EOL",
);
assert!(
!is_eol("7.0.0", &active),
"7.0.0 carries prefix 7.0 via version_prefix and must not be EOL",
);
}
#[test]
fn is_eol_linux_next_version_not_falsely_tagged() {
assert!(
is_eol("6.16-rc1", &["6.14".to_string(), "6.13".to_string()]),
"linux-next cache entry whose merge-window target is ahead \
of every stable series must be tagged EOL",
);
}
fn owned(pairs: &[(&str, &str)]) -> Vec<crate::fetch::Release> {
pairs
.iter()
.map(|(m, v)| crate::fetch::Release {
moniker: (*m).to_string(),
version: (*v).to_string(),
})
.collect()
}
#[test]
fn active_prefixes_from_releases_normalizes_rc_versions() {
let releases = owned(&[
("mainline", "6.16-rc3"),
("stable", "6.15.2"),
("longterm", "6.12.81"),
]);
let prefixes = active_prefixes_from_releases(&releases);
assert_eq!(
prefixes,
vec!["6.16".to_string(), "6.15".to_string(), "6.12".to_string()],
"RC-suffixed mainline entry must normalize to its merge-window series",
);
}
#[test]
fn active_prefixes_from_releases_skips_linux_next_moniker() {
let releases = owned(&[
("linux-next", "6.17-rc2-next-20260421"),
("mainline", "6.16-rc3"),
("stable", "6.15.2"),
]);
let prefixes = active_prefixes_from_releases(&releases);
assert!(
!prefixes.contains(&"6.17".to_string()),
"linux-next moniker must not seed a 6.17 prefix, got {prefixes:?}",
);
assert_eq!(
prefixes,
vec!["6.16".to_string(), "6.15".to_string()],
"surviving prefixes come from mainline + stable only",
);
}
#[test]
fn active_prefixes_from_releases_dedups_in_input_order() {
let releases = owned(&[
("stable", "6.14.2"),
("longterm", "6.14.1"),
("longterm", "6.12.81"),
]);
let prefixes = active_prefixes_from_releases(&releases);
assert_eq!(
prefixes,
vec!["6.14".to_string(), "6.12".to_string()],
"dedup preserves first-seen order; 6.14 appears once",
);
}
#[test]
fn kernel_list_eol_json_human_parity() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = tmp.path().join("src");
std::fs::create_dir_all(&src_dir).unwrap();
let image = src_dir.join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let make_entry = |key: &str, version: &str| {
let meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some(version.to_string()));
cache
.store(key, &CacheArtifacts::new(&image), &meta)
.unwrap()
};
let cases: &[(&str, &str, &[&str])] = &[
("active", "6.14.2", &["6.14"]),
("eol", "2.6.32", &["6.14"]),
("fetch-fail", "2.6.32", &[]),
];
for (label, version, active) in cases {
let entry = make_entry(&format!("parity-{label}"), version);
let active_vec: Vec<String> = active.iter().map(|s| s.to_string()).collect();
let row = format_entry_row(&entry, "kconfig_hash", &active_vec);
let json_eol = entry_is_eol(&entry, &active_vec);
let human_eol = row.contains("(EOL)");
assert_eq!(
json_eol, human_eol,
"JSON/human parity broken for case {label}: \
json_eol={json_eol}, human_eol={human_eol}, row={row:?}",
);
}
}
#[test]
fn kernel_list_corrupt_footer_fires_iff_any_corrupt() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = tmp.path().join("src");
std::fs::create_dir_all(&src_dir).unwrap();
let image = src_dir.join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-22T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()));
let valid_1 = cache
.store("valid-entry-a", &CacheArtifacts::new(&image), &meta)
.unwrap();
let valid_2 = cache
.store("valid-entry-b", &CacheArtifacts::new(&image), &meta)
.unwrap();
let corrupt_entry = crate::cache::ListedEntry::Corrupt {
key: "corrupt-entry".to_string(),
path: cache.root().join("corrupt-entry"),
reason: "metadata.json missing".to_string(),
};
let entries_with_corrupt = [
crate::cache::ListedEntry::Valid(Box::new(valid_1)),
corrupt_entry,
];
let entries_clean_only = [crate::cache::ListedEntry::Valid(Box::new(valid_2))];
fn any_corrupt(entries: &[crate::cache::ListedEntry]) -> bool {
entries
.iter()
.any(|e| matches!(e, crate::cache::ListedEntry::Corrupt { .. }))
}
assert!(
any_corrupt(&entries_with_corrupt),
"mixed list must trip the footer",
);
assert!(
!any_corrupt(&entries_clean_only),
"clean-only list must not trip the footer",
);
let footer = format_corrupt_footer(cache.root());
assert!(
footer.contains("(corrupt)"),
"footer must reference the tag users see",
);
assert!(
footer.contains("kernel clean --force"),
"footer must offer a remediation command",
);
assert!(
footer.contains("ALL cached entries"),
"footer must spell out that `kernel clean --force` is not surgical",
);
assert!(
footer.contains("kernel clean --keep N --force"),
"footer must offer a partial-cleanup alternative",
);
assert!(
footer.contains(&cache.root().display().to_string()),
"footer must name the cache root so operators know where to inspect",
);
}
#[test]
fn eol_legend_if_any_branches() {
assert_eq!(eol_legend_if_any(true), Some(EOL_EXPLANATION));
assert_eq!(eol_legend_if_any(false), None);
}
#[test]
fn kernel_list_long_about_exposes_json_schema() {
assert!(
KERNEL_LIST_LONG_ABOUT.starts_with(EOL_EXPLANATION),
"KERNEL_LIST_LONG_ABOUT must embed EOL_EXPLANATION verbatim at its \
head so the --help and post-table legend share one source of \
truth; got: {KERNEL_LIST_LONG_ABOUT:?}",
);
for wrapper_field in [
"current_ktstr_kconfig_hash",
"active_prefixes_fetch_error",
"entries",
] {
assert!(
KERNEL_LIST_LONG_ABOUT.contains(wrapper_field),
"KERNEL_LIST_LONG_ABOUT must mention top-level wrapper field \
`{wrapper_field}` so scripted consumers discover the \
schema without `cargo doc`; got: {KERNEL_LIST_LONG_ABOUT:?}",
);
}
for valid_entry_field in [
"key",
"path",
"version",
"source",
"arch",
"built_at",
"ktstr_kconfig_hash",
"kconfig_status",
"eol",
"config_hash",
"image_name",
"image_path",
"has_vmlinux",
"vmlinux_stripped",
"git_hash",
"\"ref\"",
"source_tree_path",
] {
assert!(
KERNEL_LIST_LONG_ABOUT.contains(valid_entry_field),
"KERNEL_LIST_LONG_ABOUT must mention valid-entry JSON \
field `{valid_entry_field}`; a JSON emitter that adds \
a field without updating the help copy silently \
breaks the discoverability contract; got: \
{KERNEL_LIST_LONG_ABOUT:?}",
);
}
{
let corrupt_entry_field = "error";
assert!(
KERNEL_LIST_LONG_ABOUT.contains(corrupt_entry_field),
"KERNEL_LIST_LONG_ABOUT must mention corrupt-entry JSON \
field `{corrupt_entry_field}` so consumers know the \
corrupt-entry shape and can branch on its presence; \
got: {KERNEL_LIST_LONG_ABOUT:?}",
);
}
for nullable_field in ["version", "ktstr_kconfig_hash", "config_hash"] {
let marker = format!("{nullable_field} (nullable)");
assert!(
KERNEL_LIST_LONG_ABOUT.contains(&marker),
"KERNEL_LIST_LONG_ABOUT must mark `{nullable_field}` \
as `(nullable)` (expected substring `{marker}`) so \
consumers know to handle `null`; got: \
{KERNEL_LIST_LONG_ABOUT:?}",
);
}
for source_variant_tag in ["\"tarball\"", "\"git\"", "\"local\""] {
assert!(
KERNEL_LIST_LONG_ABOUT.contains(source_variant_tag),
"KERNEL_LIST_LONG_ABOUT must list source variant tag \
`{source_variant_tag}` so consumers can dispatch on \
the internally-tagged `source.type` field; got: \
{KERNEL_LIST_LONG_ABOUT:?}",
);
}
for status_variant in ["\"matches\"", "\"stale\"", "\"untracked\""] {
assert!(
KERNEL_LIST_LONG_ABOUT.contains(status_variant),
"KERNEL_LIST_LONG_ABOUT must list kconfig_status variant \
`{status_variant}` so consumers can branch on the \
three-value enum without reading source; got: \
{KERNEL_LIST_LONG_ABOUT:?}",
);
}
}
#[test]
fn kernel_list_long_about_wired_via_clap() {
use clap::CommandFactory as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let cmd = TestCli::command();
let list = cmd
.find_subcommand("list")
.expect("clap must register a `list` subcommand on KernelCommand");
let long_about = list
.get_long_about()
.expect(
"`list` subcommand must have a long_about set (drives \
`kernel list --help`)",
)
.to_string();
assert_eq!(
long_about, KERNEL_LIST_LONG_ABOUT,
"clap's registered long_about for `list` must equal \
KERNEL_LIST_LONG_ABOUT byte-for-byte; a mismatch means \
the `#[command(long_about = ...)]` attribute is missing, \
pointing at a different const, or clap mutated the \
content on its way into the registry",
);
}
#[test]
fn untracked_legend_if_any_branches() {
assert_eq!(
untracked_legend_if_any(true),
Some(UNTRACKED_KCONFIG_EXPLANATION),
);
assert_eq!(untracked_legend_if_any(false), None);
}
#[test]
fn stale_legend_if_any_branches() {
assert_eq!(stale_legend_if_any(true), Some(STALE_KCONFIG_EXPLANATION),);
assert_eq!(stale_legend_if_any(false), None);
}
#[test]
fn stale_kconfig_explanation_shape() {
assert!(
STALE_KCONFIG_EXPLANATION.starts_with("warning"),
"stale legend must keep the warning preamble: {STALE_KCONFIG_EXPLANATION}",
);
assert!(
STALE_KCONFIG_EXPLANATION.contains("(stale kconfig)"),
"stale legend must name the tag verbatim: {STALE_KCONFIG_EXPLANATION}",
);
assert!(
STALE_KCONFIG_EXPLANATION.contains("different ktstr.kconfig"),
"stale legend must name the cause: {STALE_KCONFIG_EXPLANATION}",
);
assert!(
STALE_KCONFIG_EXPLANATION.contains("kernel build --force <entry version>"),
"stale legend must name the rebuild remediation with the \
`<entry version>` placeholder: {STALE_KCONFIG_EXPLANATION}",
);
}
#[test]
fn corrupt_footer_if_any_branches() {
let root = std::path::Path::new("/tmp/ktstr-cache-test-root");
assert_eq!(corrupt_footer_if_any(0, root), None);
let one = corrupt_footer_if_any(1, root).expect("positive count must yield Some(footer)");
assert!(
one.contains("1 corrupt entry."),
"singular form (count == 1) must render as `1 corrupt entry.`; got: {one}",
);
assert!(
one.contains("cargo ktstr kernel clean --corrupt-only"),
"summary must name the surgical-cleanup command: {one}",
);
assert!(
one.contains(&format_corrupt_footer(root)),
"footer must embed the full format_corrupt_footer \
detail after the count summary: {one}",
);
let many = corrupt_footer_if_any(3, root).expect("positive count must yield Some(footer)");
assert!(
many.contains("3 corrupt entries."),
"plural form (count > 1) must render as `N corrupt entries.`; got: {many}",
);
}
#[test]
fn corrupt_footer_is_self_documenting() {
let root = std::path::Path::new("/tmp/ktstr-cache-test-root");
let footer = format_corrupt_footer(root);
let first_sentence = footer
.split_once(". ")
.map(|(head, _)| head)
.expect("footer must terminate legend sentence with period-space");
assert!(
first_sentence.contains("(corrupt)"),
"first sentence must name the tag so a reader who sees \
`(corrupt)` in a row finds the definition in the \
first line of the footer, not buried after the \
remediation block; got: {first_sentence:?}",
);
assert!(
first_sentence.contains("cannot be used"),
"first sentence must carry the definitional meaning \
(legend-equivalent wording); got: {first_sentence:?}",
);
for reason_token in ["metadata is missing", "malformed", "missing image"] {
assert!(
first_sentence.contains(reason_token),
"legend sentence must enumerate corruption modes; \
expected `{reason_token}`, got: {first_sentence:?}",
);
}
assert!(
footer.contains(&root.display().to_string()),
"footer must surface the cache-root path verbatim so \
operators know which directory to inspect; got: \
{footer:?}",
);
assert!(
footer.contains("kernel clean --corrupt-only --force"),
"footer must name the `kernel clean --corrupt-only \
--force` surgical variant — the zero-risk option for \
operators with valid alongside corrupt entries; got: \
{footer:?}",
);
assert!(
footer.contains("kernel clean --force"),
"footer must name the `kernel clean --force` escalation \
variant (removes ALL entries, valid and corrupt); \
got: {footer:?}",
);
assert!(
footer.contains("kernel clean --keep N --force"),
"footer must name the `kernel clean --keep N --force` \
escalation variant (preserves the N newest entries) \
alongside the surgical `--corrupt-only --force` and \
the broader `--force`, so every operator position \
(corrupt-only, preserve-N-newest, everything) has a \
documented command; got: {footer:?}",
);
assert!(
footer.contains("ALL cached entries"),
"footer must carry the safety wording that distinguishes \
the surgical `--corrupt-only --force` (leaves valid \
entries alone) from its escalation paths `--force` \
(removes ALL) and `--keep N --force` (preserves N \
newest); got: {footer:?}",
);
let pos_corrupt_only = footer
.find("kernel clean --corrupt-only --force")
.expect("`--corrupt-only --force` must appear in footer");
let pos_force = footer
.find("kernel clean --force")
.expect("`--force` must appear in footer");
let pos_keep = footer
.find("kernel clean --keep N --force")
.expect("`--keep N --force` must appear in footer");
assert!(
pos_corrupt_only < pos_force,
"`--corrupt-only --force` must precede `--force` in the footer \
(surgical option goes before escalation); got positions \
corrupt_only={pos_corrupt_only}, force={pos_force} in: {footer:?}",
);
assert!(
pos_force < pos_keep,
"`--force` must precede `--keep N --force` in the footer so the \
escalation path reads in widening order (surgical → broadest \
→ preserve-N); got positions force={pos_force}, keep={pos_keep} \
in: {footer:?}",
);
}
#[test]
fn dirty_tree_cache_skip_hint_shape() {
assert!(
DIRTY_TREE_CACHE_SKIP_HINT.contains("skipping cache"),
"dirty-tree hint must name the cache-skip outcome: {DIRTY_TREE_CACHE_SKIP_HINT}",
);
assert!(
DIRTY_TREE_CACHE_SKIP_HINT.contains("uncommitted changes"),
"dirty-tree hint must name the cause: {DIRTY_TREE_CACHE_SKIP_HINT}",
);
assert!(
DIRTY_TREE_CACHE_SKIP_HINT.contains("commit")
&& DIRTY_TREE_CACHE_SKIP_HINT.contains("stash"),
"dirty-tree hint must name the commit-or-stash remediation: {DIRTY_TREE_CACHE_SKIP_HINT}",
);
}
#[test]
fn non_git_tree_cache_skip_hint_shape() {
assert!(
NON_GIT_TREE_CACHE_SKIP_HINT.starts_with("skipping cache"),
"non-git hint must be left-anchored on the cache-skip outcome: {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
NON_GIT_TREE_CACHE_SKIP_HINT.contains("not a git repository"),
"non-git hint must name the cause: {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
NON_GIT_TREE_CACHE_SKIP_HINT.contains("put the source under git"),
"non-git hint must name the actionable remediation: {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
NON_GIT_TREE_CACHE_SKIP_HINT.contains("kernel build VERSION"),
"non-git hint must name the concrete tarball-fetch alternative: {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
NON_GIT_TREE_CACHE_SKIP_HINT.contains("kernel build --git URL --ref REF"),
"non-git hint must name the concrete git-clone alternative: {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
!NON_GIT_TREE_CACHE_SKIP_HINT.contains("stash"),
"non-git hint must NOT suggest stash (no git = no stash): {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
assert!(
!NON_GIT_TREE_CACHE_SKIP_HINT.contains("commit"),
"non-git hint must NOT suggest committing existing tree changes (no git = no commit): {NON_GIT_TREE_CACHE_SKIP_HINT}",
);
}
#[test]
fn show_thresholds_known_test_returns_populated_report() {
let Some(entry) = crate::test_support::KTSTR_TESTS.iter().next() else {
eprintln!(
"ktstr: SKIP: show_thresholds_known_test_returns_populated_report — no entries in KTSTR_TESTS",
);
return;
};
let out = show_thresholds(entry.name).expect("show_thresholds must resolve known test");
assert!(
out.contains("Test:"),
"output missing `Test:` header: {out}",
);
assert!(
out.contains("Scheduler:"),
"output missing `Scheduler:` header: {out}",
);
assert!(
out.contains("Resolved assertion thresholds:"),
"output missing thresholds section: {out}",
);
let test_idx = out.find("Test:").unwrap();
let thresholds_idx = out.find("Resolved assertion thresholds:").unwrap();
assert!(
test_idx < thresholds_idx,
"`Test:` header must precede threshold dump",
);
}
#[test]
fn show_thresholds_unknown_test_returns_actionable_error() {
let err = show_thresholds("definitely_not_a_registered_test_xyz123").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("no registered ktstr test named"),
"error must name the missing-test condition: {msg}",
);
assert!(
msg.contains("cargo nextest list"),
"error must point at the discovery command: {msg}",
);
assert!(
msg.contains("function-name component"),
"error must flag the nextest binary:: prefix caveat: {msg}",
);
}
#[test]
fn suggest_closest_test_name_finds_near_match() {
let Some(entry) = crate::test_support::KTSTR_TESTS.iter().find(|e| {
e.name.len() >= 10 && !(e.name.starts_with("__unit_test_") && e.name.ends_with("__"))
}) else {
skip!(
"no registered non-sentinel test with name >= 10 chars \
— cannot construct a positive strsim probe"
);
};
let mut mutated: Vec<u8> = entry.name.bytes().collect();
mutated[0] = if mutated[0] == b'z' { b'a' } else { b'z' };
let query = std::str::from_utf8(&mutated).expect("ASCII mutation stays UTF-8");
let suggestion = suggest_closest_test_name(query)
.expect("distance-1 typo on a registered name must yield a suggestion");
assert_eq!(
suggestion, entry.name,
"a single-byte typo must suggest the exact name it was derived from",
);
}
#[test]
fn suggest_closest_test_name_returns_none_for_unrelated_query() {
let unrelated = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
assert_eq!(
suggest_closest_test_name(unrelated),
None,
"a query with no lexical relationship to any registered \
test name must yield no suggestion (not an over-reach)",
);
}
#[test]
fn suggest_closest_test_name_accepts_at_threshold_boundary() {
let Some(entry) = crate::test_support::KTSTR_TESTS.iter().find(|e| {
e.name.len() >= 12 && !(e.name.starts_with("__unit_test_") && e.name.ends_with("__"))
}) else {
skip!(
"no registered non-sentinel test with name >= 12 chars \
— cannot construct a boundary strsim probe"
);
};
let mut mutated: Vec<u8> = entry.name.bytes().collect();
for &pos in &[2usize, 4, 6, 8] {
if pos >= mutated.len() {
skip!("entry.name too short for boundary probe");
}
mutated[pos] = if mutated[pos] == b'z' { b'a' } else { b'z' };
}
let query = std::str::from_utf8(&mutated).expect("ASCII mutation stays UTF-8");
if strsim::levenshtein(query, entry.name) != 4 {
skip!("mutation did not produce distance-4 against source");
}
let threshold = std::cmp::max(3, query.len() / 3);
assert_eq!(
threshold, 4,
"boundary test presumes threshold == 4 for 12-char query; \
got {threshold}. If query length changed, update the \
test OR the mutation count to maintain the boundary.",
);
let suggestion = suggest_closest_test_name(query)
.expect("distance equal to threshold must still yield a suggestion");
assert!(
!suggestion.is_empty(),
"boundary-distance query must yield a non-empty suggestion",
);
}
#[test]
fn suggest_closest_run_key_finds_near_match() {
let tmp = tempfile::tempdir().unwrap();
std::fs::create_dir(tmp.path().join("6.14-abc1234")).expect("plant run dir");
let suggestion = suggest_closest_run_key("6.14-abc1235", tmp.path())
.expect("distance-1 typo on a planted run dir must yield a suggestion");
assert_eq!(
suggestion, "6.14-abc1234",
"a single-character typo must suggest the planted dir name",
);
}
#[test]
fn suggest_closest_run_key_returns_none_for_distant_query() {
let tmp = tempfile::tempdir().unwrap();
std::fs::create_dir(tmp.path().join("6.14-abc1234")).expect("plant run dir");
assert_eq!(
suggest_closest_run_key("xxxxxxxxxxxxx", tmp.path()),
None,
"a query with no lexical relationship to any planted run \
dir must yield no suggestion",
);
}
#[test]
fn suggest_closest_run_key_returns_none_for_empty_root() {
let tmp = tempfile::tempdir().unwrap();
assert_eq!(
suggest_closest_run_key("6.14-abc1234", tmp.path()),
None,
"empty root must yield None — no candidates to match against",
);
}
#[test]
fn suggest_closest_run_key_skips_files() {
let tmp = tempfile::tempdir().unwrap();
std::fs::write(tmp.path().join("6.14-abc1234"), b"not a dir").expect("plant file");
std::fs::create_dir(tmp.path().join("6.14-abc1235")).expect("plant dir");
let suggestion = suggest_closest_run_key("6.14-abc1234", tmp.path())
.expect("the planted directory must yield a suggestion despite the same-name file");
assert_eq!(
suggestion, "6.14-abc1235",
"a regression that drops the is_dir() filter would surface \
here as `Some(\"6.14-abc1234\")` (the file at distance 0) \
instead of `Some(\"6.14-abc1235\")` (the dir at distance 1)",
);
}
#[test]
fn suggest_closest_scenario_name_finds_near_match() {
let scenarios = crate::scenario::all_scenarios();
let Some(s) = scenarios.iter().find(|s| s.name.len() >= 10) else {
skip!(
"no registered scenario with name >= 10 chars — cannot \
construct a positive strsim probe"
);
};
let mut mutated: Vec<u8> = s.name.bytes().collect();
mutated[0] = if mutated[0] == b'z' { b'a' } else { b'z' };
let query = std::str::from_utf8(&mutated).expect("ASCII mutation stays UTF-8");
let suggestion = suggest_closest_scenario_name(query)
.expect("distance-1 typo of a registered scenario must yield a suggestion");
assert_eq!(
suggestion, s.name,
"single-byte typo must resolve back to the exact scenario name",
);
}
#[test]
fn suggest_closest_scenario_name_returns_none_for_unrelated_query() {
let unrelated = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
assert_eq!(
suggest_closest_scenario_name(unrelated),
None,
"a query unrelated to every registered scenario must yield \
no suggestion (no over-reach)",
);
}
#[test]
fn suggest_closest_scenario_name_handles_any_registry_size() {
let _ = suggest_closest_scenario_name("arbitrary");
}
#[test]
fn scenario_filter_hint_formats_suffix_on_near_match() {
let scenarios = crate::scenario::all_scenarios();
let Some(s) = scenarios.iter().find(|s| s.name.len() >= 10) else {
skip!("no registered scenario with name >= 10 chars");
};
let mut mutated: Vec<u8> = s.name.bytes().collect();
mutated[0] = if mutated[0] == b'z' { b'a' } else { b'z' };
let query = std::str::from_utf8(&mutated).unwrap();
let hint = scenario_filter_hint(query).expect("near match must produce a hint");
assert!(
hint.starts_with(" Did you mean `"),
"hint must start with ` Did you mean \\`` prefix: {hint}",
);
assert!(
hint.ends_with("`?"),
"hint must end with the backtick-close + question mark: {hint}",
);
assert!(
hint.contains(s.name),
"hint must embed the matched scenario name: {hint}",
);
}
#[test]
fn scenario_filter_hint_returns_none_on_unrelated_query() {
assert!(
scenario_filter_hint("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx").is_none(),
"unrelated query must produce no hint, not a blank suffix",
);
}
#[test]
fn filter_scenarios_empty_match_includes_did_you_mean_hint() {
let scenarios = crate::scenario::all_scenarios();
let Some(s) = scenarios.iter().find(|s| s.name.len() >= 10) else {
skip!("no registered scenario with name >= 10 chars");
};
let mut mutated: Vec<u8> = s.name.bytes().collect();
mutated[0] = if mutated[0] == b'z' { b'a' } else { b'z' };
let query = std::str::from_utf8(&mutated).unwrap().to_string();
if scenarios.iter().any(|sc| sc.name.contains(&query)) {
skip!(
"mutated query accidentally substring-matches a \
registered scenario; cannot exercise the bail branch"
);
}
let err =
filter_scenarios(&scenarios, Some(&query)).expect_err("non-matching filter must bail");
let msg = format!("{err:#}");
assert!(
msg.contains("no scenarios matched filter"),
"bail must name the condition: {msg}",
);
assert!(
msg.contains("Did you mean"),
"bail must include the strsim suggestion on a near match: {msg}",
);
assert!(
msg.contains(s.name),
"bail must name the suggested scenario: {msg}",
);
}
#[test]
fn filter_scenarios_unrelated_filter_bails_without_hint() {
let scenarios = crate::scenario::all_scenarios();
let err = filter_scenarios(&scenarios, Some("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))
.expect_err("unrelated filter must bail");
let msg = format!("{err:#}");
assert!(
msg.contains("no scenarios matched filter"),
"bail must still name the condition: {msg}",
);
assert!(
!msg.contains("Did you mean"),
"unrelated filter must NOT over-suggest a distant match: {msg}",
);
assert!(
msg.contains("ktstr list"),
"bail must fall back to the generic 'ktstr list' pointer: {msg}",
);
}
#[test]
fn untracked_legend_names_the_tag_word() {
assert!(
UNTRACKED_KCONFIG_EXPLANATION.contains("(untracked kconfig)"),
"legend must name the tag it explains: {UNTRACKED_KCONFIG_EXPLANATION}",
);
}
#[test]
fn format_entry_row_renders_eol_kconfig_matrix() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src_dir = tmp.path().join("src");
std::fs::create_dir_all(&src_dir).unwrap();
let image = src_dir.join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let current_hash = "a1b2c3d4";
let active_prefixes = ["6.14".to_string()];
let build_row = |key: &str, version: Option<&str>, entry_hash: Option<&str>| -> String {
let meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(version.map(str::to_string))
.with_ktstr_kconfig_hash(entry_hash.map(str::to_string));
let entry = cache
.store(key, &CacheArtifacts::new(&image), &meta)
.unwrap();
format_entry_row(&entry, current_hash, &active_prefixes)
};
let c8_key = "c8-long-key-exactly-forty-eight-chars-xxxxxxxxxx";
let c9_key = "c9-key-longer-than-forty-eight-chars-by-twelve-xxxxxxxxxxxx";
debug_assert_eq!(c8_key.len(), 48);
debug_assert_eq!(c9_key.len(), 59);
let rows = [
build_row("c1-active-matches", Some("6.14.2"), Some(current_hash)),
build_row("c2-active-stale", Some("6.14.2"), Some("deadbeef")),
build_row("c3-active-untracked", Some("6.14.2"), None),
build_row("c4-eol-matches", Some("2.6.32"), Some(current_hash)),
build_row("c5-eol-stale", Some("2.6.32"), Some("deadbeef")),
build_row("c6-eol-untracked", Some("2.6.32"), None),
build_row("c7-active-no-version", None, Some(current_hash)),
build_row(c8_key, Some("6.14.2"), Some(current_hash)),
build_row(c9_key, Some("6.14.2"), Some(current_hash)),
build_row("c10-active-rc", Some("6.14-rc2"), Some(current_hash)),
build_row("c11-eol-rc", Some("7.0-rc1"), Some(current_hash)),
];
let joined = rows.join("\n");
insta::assert_snapshot!(joined, @r"
c1-active-matches 6.14.2 tarball x86_64 2026-04-12T10:00:00Z
c2-active-stale 6.14.2 tarball x86_64 2026-04-12T10:00:00Z (stale kconfig)
c3-active-untracked 6.14.2 tarball x86_64 2026-04-12T10:00:00Z (untracked kconfig)
c4-eol-matches 2.6.32 tarball x86_64 2026-04-12T10:00:00Z (EOL)
c5-eol-stale 2.6.32 tarball x86_64 2026-04-12T10:00:00Z (stale kconfig) (EOL)
c6-eol-untracked 2.6.32 tarball x86_64 2026-04-12T10:00:00Z (untracked kconfig) (EOL)
c7-active-no-version - tarball x86_64 2026-04-12T10:00:00Z
c8-long-key-exactly-forty-eight-chars-xxxxxxxxxx 6.14.2 tarball x86_64 2026-04-12T10:00:00Z
c9-key-longer-than-forty-eight-chars-by-twelve-xxxxxxxxxxxx 6.14.2 tarball x86_64 2026-04-12T10:00:00Z
c10-active-rc 6.14-rc2 tarball x86_64 2026-04-12T10:00:00Z
c11-eol-rc 7.0-rc1 tarball x86_64 2026-04-12T10:00:00Z (EOL)
");
}
#[test]
fn format_entry_row_empty_active_prefixes_does_not_tag_eol() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src = tempfile::TempDir::new().unwrap();
let image = src.path().join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some("2.6.32".to_string()));
let entry = cache
.store("fetch-failed-fallback", &CacheArtifacts::new(&image), &meta)
.unwrap();
let row_fallback = format_entry_row(&entry, "kconfig_hash", &[]);
assert!(
!row_fallback.contains("(EOL)"),
"empty active_prefixes (fetch-failed fallback) must not tag any entry EOL, \
got row: {row_fallback:?}",
);
let row_with_active = format_entry_row(&entry, "kconfig_hash", &["6.14".to_string()]);
assert!(
row_with_active.contains("(EOL)"),
"non-empty active_prefixes excluding entry's prefix must tag EOL, \
got row: {row_with_active:?}",
);
}
#[test]
fn format_entry_row_tags_appear_in_stable_order() {
use crate::cache::{CacheArtifacts, CacheDir, KernelMetadata, KernelSource};
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let src = tempfile::TempDir::new().unwrap();
let image = src.path().join("bzImage");
std::fs::write(&image, b"fake kernel").unwrap();
let current_hash = "a1b2c3d4";
let active_prefixes = ["6.14".to_string()];
let stale_meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some("2.6.32".to_string()))
.with_ktstr_kconfig_hash(Some("deadbeef".to_string()));
let stale_entry = cache
.store("stale-eol", &CacheArtifacts::new(&image), &stale_meta)
.unwrap();
let stale_row = format_entry_row(&stale_entry, current_hash, &active_prefixes);
let stale_idx = stale_row
.find("(stale kconfig)")
.expect("stale-kconfig tag must appear on dual-tag row");
let eol_idx = stale_row
.find("(EOL)")
.expect("EOL tag must appear on dual-tag row");
assert!(
stale_idx < eol_idx,
"(stale kconfig) must precede (EOL) in the rendered row — a \
regression that reordered the two tags would break operator \
grep pipelines that key on the kconfig-tag being the first \
tag on a row:\n{stale_row}",
);
let untracked_meta = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some("2.6.32".to_string()))
.with_ktstr_kconfig_hash(None);
let untracked_entry = cache
.store(
"untracked-eol",
&CacheArtifacts::new(&image),
&untracked_meta,
)
.unwrap();
let untracked_row = format_entry_row(&untracked_entry, current_hash, &active_prefixes);
let untracked_idx = untracked_row
.find("(untracked kconfig)")
.expect("untracked-kconfig tag must appear on dual-tag row");
let eol_idx = untracked_row
.find("(EOL)")
.expect("EOL tag must appear on dual-tag row");
assert!(
untracked_idx < eol_idx,
"(untracked kconfig) must precede (EOL) — same ordering \
contract as the stale branch:\n{untracked_row}",
);
}
fn mk_valid(key: &str) -> crate::cache::ListedEntry {
use crate::cache::{CacheEntry, KernelMetadata, KernelSource};
let path = std::path::PathBuf::from(format!("/tmp/fixture/{key}"));
let metadata = KernelMetadata::new(
KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-22T00:00:00Z".to_string(),
);
crate::cache::ListedEntry::Valid(Box::new(CacheEntry {
key: key.to_string(),
path,
metadata,
}))
}
fn mk_corrupt(key: &str) -> crate::cache::ListedEntry {
crate::cache::ListedEntry::Corrupt {
key: key.to_string(),
path: std::path::PathBuf::from(format!("/tmp/fixture/{key}")),
reason: "test fixture corrupt".to_string(),
}
}
#[test]
fn partition_clean_candidates_empty_input_yields_empty_output() {
let out = partition_clean_candidates(&[], None, false);
assert!(out.is_empty());
let out = partition_clean_candidates(&[], Some(5), true);
assert!(out.is_empty());
}
#[test]
fn partition_clean_candidates_corrupt_only_skips_valid_entries() {
let entries = vec![mk_valid("v1"), mk_corrupt("c1"), mk_valid("v2")];
let out = partition_clean_candidates(&entries, None, true);
assert_eq!(out.len(), 1);
assert_eq!(out[0].key(), "c1");
}
#[test]
fn partition_clean_candidates_no_keep_removes_every_entry() {
let entries = vec![mk_valid("v1"), mk_corrupt("c1"), mk_valid("v2")];
let out = partition_clean_candidates(&entries, None, false);
let keys: Vec<&str> = out.iter().map(|e| e.key()).collect();
assert_eq!(keys, vec!["v1", "c1", "v2"]);
}
#[test]
fn partition_clean_candidates_keep_retains_n_newest_valid_preserves_corrupt() {
let entries = vec![
mk_valid("v_new1"),
mk_corrupt("c_mid"),
mk_valid("v_new2"),
mk_valid("v_old"),
];
let out = partition_clean_candidates(&entries, Some(2), false);
let keys: Vec<&str> = out.iter().map(|e| e.key()).collect();
assert_eq!(keys, vec!["c_mid", "v_old"]);
}
#[test]
fn partition_clean_candidates_keep_never_preserves_corrupt() {
let entries = vec![mk_corrupt("c1"), mk_valid("v1"), mk_valid("v2")];
let out = partition_clean_candidates(&entries, Some(3), false);
let keys: Vec<&str> = out.iter().map(|e| e.key()).collect();
assert_eq!(keys, vec!["c1"]);
}
#[test]
fn partition_clean_candidates_corrupt_only_ignores_keep() {
let entries = vec![
mk_valid("v_new1"),
mk_corrupt("c_mid"),
mk_valid("v_new2"),
mk_valid("v_old"),
];
let out = partition_clean_candidates(&entries, Some(2), true);
let keys: Vec<&str> = out.iter().map(|e| e.key()).collect();
assert_eq!(
keys,
vec!["c_mid"],
"corrupt_only=true must make keep inert: valid entries preserved, only corrupt removed",
);
}
#[test]
fn kernel_clean_rejects_corrupt_only_with_keep() {
use clap::Parser as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let err = TestCli::try_parse_from(["prog", "clean", "--keep", "2", "--corrupt-only"])
.expect_err("--keep together with --corrupt-only must fail parsing");
let msg = err.to_string();
assert!(
msg.to_ascii_lowercase().contains("cannot be used with")
|| msg.to_ascii_lowercase().contains("conflict"),
"clap error must surface the conflict between --keep and --corrupt-only, got: {msg}",
);
}
#[test]
fn kernel_clean_accepts_corrupt_only_alone() {
use clap::Parser as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let parsed = TestCli::try_parse_from(["prog", "clean", "--corrupt-only"])
.expect("--corrupt-only without --keep must parse cleanly");
match parsed.cmd {
KernelCommand::Clean {
keep,
force,
corrupt_only,
} => {
assert_eq!(keep, None);
assert!(!force);
assert!(corrupt_only);
}
other => panic!("expected KernelCommand::Clean, got {other:?}"),
}
}
#[test]
fn kernel_build_parses_cpu_cap_without_extra_flags() {
use clap::Parser as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let parsed = TestCli::try_parse_from(["prog", "build", "6.14.2", "--cpu-cap", "4"])
.expect("kernel build --cpu-cap N must parse");
match parsed.cmd {
KernelCommand::Build {
cpu_cap, version, ..
} => {
assert_eq!(cpu_cap, Some(4));
assert_eq!(version.as_deref(), Some("6.14.2"));
}
other => panic!("expected KernelCommand::Build, got {other:?}"),
}
}
#[test]
fn kernel_build_without_cpu_cap_defaults_to_none() {
use clap::Parser as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let parsed = TestCli::try_parse_from(["prog", "build", "6.14.2"])
.expect("kernel build without --cpu-cap must parse");
match parsed.cmd {
KernelCommand::Build { cpu_cap, .. } => {
assert_eq!(cpu_cap, None, "no --cpu-cap must produce None, not Some(0)",);
}
other => panic!("expected KernelCommand::Build, got {other:?}"),
}
}
#[test]
fn kernel_build_cpu_cap_zero_passes_clap() {
use clap::Parser as _;
#[derive(clap::Parser, Debug)]
struct TestCli {
#[command(subcommand)]
cmd: KernelCommand,
}
let parsed = TestCli::try_parse_from(["prog", "build", "6.14.2", "--cpu-cap", "0"])
.expect("clap-level parse must accept 0; runtime validation rejects");
match parsed.cmd {
KernelCommand::Build { cpu_cap, .. } => {
assert_eq!(
cpu_cap,
Some(0),
"clap parses 0 verbatim; validation is downstream",
);
}
other => panic!("expected KernelCommand::Build, got {other:?}"),
}
}
#[test]
fn locks_snapshot_json_field_names_are_stable() {
let snap = LocksSnapshot {
llcs: vec![LlcLockRow {
llc_idx: 0,
numa_node: Some(1),
lockfile: "/tmp/ktstr-llc-0.lock".to_string(),
holders: Vec::new(),
}],
cpus: vec![CpuLockRow {
cpu: 3,
numa_node: None,
lockfile: "/tmp/ktstr-cpu-3.lock".to_string(),
holders: Vec::new(),
}],
cache: vec![CacheLockRow {
cache_key: "6.14.2-tarball-x86_64".to_string(),
lockfile: "/tmp/.locks/6.14.2-tarball-x86_64.lock".to_string(),
holders: Vec::new(),
}],
run_dirs: vec![RunDirLockRow {
run_key: "6.14-abc1234".to_string(),
lockfile: "/tmp/.locks/6.14-abc1234.lock".to_string(),
holders: Vec::new(),
}],
};
let val = serde_json::to_value(&snap).expect("serde serialize");
assert!(
val.get("llcs").is_some(),
"top-level must have 'llcs': {val}"
);
assert!(
val.get("cpus").is_some(),
"top-level must have 'cpus': {val}"
);
assert!(
val.get("cache").is_some(),
"top-level must have 'cache': {val}"
);
assert!(
val.get("run_dirs").is_some(),
"top-level must have 'run_dirs': {val}"
);
let llc0 = &val["llcs"][0];
assert!(
llc0.get("llc_idx").is_some(),
"llc_idx (snake_case): {llc0}"
);
assert!(llc0.get("numa_node").is_some(), "numa_node: {llc0}");
assert!(llc0.get("lockfile").is_some(), "lockfile: {llc0}");
assert!(llc0.get("holders").is_some(), "holders: {llc0}");
let cpu0 = &val["cpus"][0];
assert!(cpu0.get("cpu").is_some());
assert!(cpu0.get("numa_node").is_some());
let cache0 = &val["cache"][0];
assert!(cache0.get("cache_key").is_some(), "cache_key: {cache0}");
let run0 = &val["run_dirs"][0];
assert!(run0.get("run_key").is_some(), "run_key: {run0}");
assert!(run0.get("lockfile").is_some(), "lockfile: {run0}");
assert!(run0.get("holders").is_some(), "holders: {run0}");
}
#[test]
fn collect_locks_snapshot_empty_roots() {
use tempfile::TempDir;
let tmp_dir = TempDir::new().expect("tempdir tmp_root");
let cache_dir = TempDir::new().expect("tempdir cache_root");
let runs_dir = TempDir::new().expect("tempdir runs_root");
let snap = collect_locks_snapshot_from(
tmp_dir.path(),
Some(cache_dir.path()),
Some(runs_dir.path()),
)
.expect("collect must succeed on empty roots");
assert!(snap.llcs.is_empty(), "no ktstr-llc-*.lock → empty llcs");
assert!(snap.cpus.is_empty(), "no ktstr-cpu-*.lock → empty cpus");
assert!(snap.cache.is_empty(), "no .locks/ → empty cache");
assert!(
snap.run_dirs.is_empty(),
"no .locks/ under runs_root → empty run_dirs",
);
}
#[test]
fn collect_locks_snapshot_discovers_lockfiles() {
use tempfile::TempDir;
let tmp_dir = TempDir::new().expect("tempdir");
let path = tmp_dir.path();
std::fs::write(path.join("ktstr-llc-5.lock"), b"").expect("plant llc-5");
std::fs::write(path.join("ktstr-llc-2.lock"), b"").expect("plant llc-2");
std::fs::write(path.join("ktstr-cpu-7.lock"), b"").expect("plant cpu-7");
std::fs::write(path.join("ktstr-llc-oops.lock"), b"").expect("plant junk");
let snap = collect_locks_snapshot_from(path, None, None).expect("collect must succeed");
assert_eq!(snap.llcs.len(), 2);
assert_eq!(snap.llcs[0].llc_idx, 2, "sort ascending: llc 2 first");
assert_eq!(snap.llcs[1].llc_idx, 5, "sort ascending: llc 5 second");
assert_eq!(snap.cpus.len(), 1);
assert_eq!(snap.cpus[0].cpu, 7);
assert!(snap.cache.is_empty());
assert!(snap.run_dirs.is_empty());
}
#[test]
fn collect_locks_snapshot_discovers_run_dir_lockfiles() {
use tempfile::TempDir;
let runs_dir = TempDir::new().expect("tempdir runs_root");
let locks_dir = runs_dir.path().join(crate::flock::LOCK_DIR_NAME);
std::fs::create_dir_all(&locks_dir).expect("mkdir .locks/");
std::fs::write(locks_dir.join("7.0-def5678.lock"), b"").expect("plant 7.0");
std::fs::write(locks_dir.join("6.14-abc1234.lock"), b"").expect("plant 6.14");
let tmp_dir = TempDir::new().expect("tempdir tmp_root");
let snap = collect_locks_snapshot_from(tmp_dir.path(), None, Some(runs_dir.path()))
.expect("collect must succeed");
assert_eq!(snap.run_dirs.len(), 2);
assert_eq!(
snap.run_dirs[0].run_key, "6.14-abc1234",
"sort ascending: 6.14 lexically before 7.0",
);
assert_eq!(snap.run_dirs[1].run_key, "7.0-def5678");
}
fn bypass_env_lock() -> std::sync::MutexGuard<'static, ()> {
use std::sync::{Mutex, OnceLock};
static ENV_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
ENV_LOCK
.get_or_init(|| Mutex::new(()))
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
struct BypassGuard;
impl BypassGuard {
fn set(value: &str) -> Self {
unsafe {
std::env::set_var("KTSTR_BYPASS_LLC_LOCKS", value);
}
BypassGuard
}
fn remove() -> Self {
unsafe {
std::env::remove_var("KTSTR_BYPASS_LLC_LOCKS");
}
BypassGuard
}
}
impl Drop for BypassGuard {
fn drop(&mut self) {
unsafe {
std::env::remove_var("KTSTR_BYPASS_LLC_LOCKS");
}
}
}
#[test]
fn acquire_build_reservation_bypass_returns_no_reservation() {
let _lock = bypass_env_lock();
let _env = BypassGuard::set("1");
let r = acquire_build_reservation("test", None).expect("bypass + no cap must succeed");
assert!(r.plan.is_none(), "bypass must produce no LLC plan");
assert!(
r._sandbox.is_none(),
"bypass must produce no cgroup sandbox",
);
assert!(
r.make_jobs.is_none(),
"bypass must fall back to nproc (None signals to caller)",
);
}
#[test]
fn acquire_build_reservation_bypass_with_cap_errors() {
let _lock = bypass_env_lock();
let _env = BypassGuard::set("1");
let cap = crate::vmm::host_topology::CpuCap::new(2).expect("cap=2 valid");
let err =
acquire_build_reservation("test", Some(cap)).expect_err("bypass + cap must error");
let msg = format!("{err:#}");
assert!(
msg.contains("resource contract"),
"err must name the resource contract: {msg}",
);
}
#[test]
fn acquire_build_reservation_plan_and_make_jobs_consistent() {
let _lock = bypass_env_lock();
let _env = BypassGuard::remove();
match acquire_build_reservation("test", None) {
Ok(r) => {
assert_eq!(
r.plan.is_some(),
r.make_jobs.is_some(),
"plan and make_jobs must agree on reservation presence",
);
if let (Some(p), Some(jobs)) = (r.plan.as_ref(), r.make_jobs) {
assert_eq!(
jobs,
crate::vmm::host_topology::make_jobs_for_plan(p),
"make_jobs must equal make_jobs_for_plan(&plan)",
);
}
assert_eq!(
r.plan.is_some(),
r._sandbox.is_some(),
"sandbox and plan must agree on reservation presence",
);
}
Err(e) => {
eprintln!("acquire_build_reservation unavailable on this host: {e:#}");
}
}
}
#[test]
fn resolve_cached_kernel_surfaces_inverted_range_diagnostic() {
let id = crate::kernel_path::KernelId::Range {
start: "6.16".to_string(),
end: "6.12".to_string(),
};
let err = resolve_cached_kernel(&id, "ktstr-test").expect_err("inverted range must error");
let msg = format!("{err:#}");
assert!(
msg.contains("inverted kernel range"),
"validate() diagnostic must surface ahead of the generic \
'not yet supported' bail; got: {msg}",
);
assert!(
msg.contains("6.12..6.16"),
"swap suggestion must appear in the error; got: {msg}",
);
assert!(
!msg.contains("not yet supported in this context"),
"validate() must short-circuit before the generic bail; got: {msg}",
);
}
#[test]
fn resolve_kernel_image_surfaces_inverted_range_diagnostic() {
let policy = KernelResolvePolicy {
cli_label: "ktstr-test",
accept_raw_image: false,
};
let err = resolve_kernel_image(Some("6.16..6.12"), &policy)
.expect_err("inverted range must error");
let msg = format!("{err:#}");
assert!(
msg.contains("inverted kernel range"),
"validate() diagnostic must surface ahead of the generic \
'not yet supported' bail; got: {msg}",
);
assert!(
msg.contains("6.12..6.16"),
"swap suggestion must appear in the error; got: {msg}",
);
assert!(
!msg.contains("not yet supported in this context"),
"validate() must short-circuit before the generic bail; got: {msg}",
);
}
fn release(moniker: &str, version: &str) -> crate::fetch::Release {
crate::fetch::Release {
moniker: moniker.to_string(),
version: version.to_string(),
}
}
#[test]
fn filter_and_sort_range_basic() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("mainline", "6.18-rc2"),
release("stable", "6.16.5"),
release("longterm", "6.12.40"),
release("linux-next", "6.18-rc2-next-20260420"),
release("longterm", "6.6.99"),
release("stable", "6.14.10"),
release("stable", "6.10.0"),
];
let start_key = decompose_version_for_compare("6.12").unwrap();
let end_key = decompose_version_for_compare("6.16.5").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(
out,
vec![
"6.12.40".to_string(),
"6.14.10".to_string(),
"6.16.5".to_string(),
],
"stable+longterm only, ascending, [start, end] inclusive",
);
}
#[test]
fn filter_and_sort_range_endpoints_absent_from_releases() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("stable", "6.12.5"),
release("stable", "6.14.2"),
release("stable", "6.15.0"),
];
let start_key = decompose_version_for_compare("6.10").unwrap();
let end_key = decompose_version_for_compare("6.16").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(
out,
vec![
"6.12.5".to_string(),
"6.14.2".to_string(),
"6.15.0".to_string(),
],
);
}
#[test]
fn filter_and_sort_range_inclusive_both_endpoints() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("stable", "6.12.5"),
release("stable", "6.13.0"),
release("stable", "6.14.2"),
];
let start_key = decompose_version_for_compare("6.12.5").unwrap();
let end_key = decompose_version_for_compare("6.14.2").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(
out,
vec![
"6.12.5".to_string(),
"6.13.0".to_string(),
"6.14.2".to_string(),
],
);
}
#[test]
fn filter_and_sort_range_rc_under_stable_moniker_orders_after_release() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("stable", "6.14.0-rc3"),
release("stable", "6.14.0"),
release("stable", "6.13.0"),
];
let start_key = decompose_version_for_compare("6.13").unwrap();
let end_key = decompose_version_for_compare("6.15").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(
out,
vec![
"6.13.0".to_string(),
"6.14.0-rc3".to_string(),
"6.14.0".to_string(),
],
);
}
#[test]
fn filter_and_sort_range_empty_when_no_overlap() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![release("stable", "5.10.0"), release("stable", "5.15.0")];
let start_key = decompose_version_for_compare("6.10").unwrap();
let end_key = decompose_version_for_compare("6.16").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert!(out.is_empty(), "no overlap → empty result, got {out:?}");
}
#[test]
fn filter_and_sort_range_drops_non_stable_monikers() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("mainline", "6.14.0"),
release("linux-next", "6.14.0-next-20260420"),
release("stable", "6.14.5"),
];
let start_key = decompose_version_for_compare("6.14").unwrap();
let end_key = decompose_version_for_compare("6.15").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(
out,
vec!["6.14.5".to_string()],
"only stable/longterm survive the filter"
);
}
#[test]
fn filter_and_sort_range_drops_unparseable_versions() {
use crate::kernel_path::decompose_version_for_compare;
let releases = vec![
release("stable", "6.14.0"),
release("stable", "embargoed-cve-tag"),
release("stable", "6.14.5"),
];
let start_key = decompose_version_for_compare("6.14").unwrap();
let end_key = decompose_version_for_compare("6.15").unwrap();
let out = filter_and_sort_range(&releases, start_key, end_key);
assert_eq!(out, vec!["6.14.0".to_string(), "6.14.5".to_string()],);
}
#[test]
fn expand_kernel_range_rejects_unparseable_start() {
let err = expand_kernel_range("garbage", "6.14", "ktstr-test")
.expect_err("unparseable start must error");
let msg = format!("{err:#}");
assert!(
msg.contains("kernel range start `garbage`"),
"error must cite the bad endpoint, got: {msg}"
);
}
#[test]
fn expand_kernel_range_rejects_unparseable_end() {
let err = expand_kernel_range("6.10", "garbage", "ktstr-test")
.expect_err("unparseable end must error");
let msg = format!("{err:#}");
assert!(
msg.contains("kernel range end `garbage`"),
"error must cite the bad endpoint, got: {msg}"
);
}
#[test]
fn kernel_list_range_preview_rejects_non_range_spec() {
let err = run_kernel_list_range(false, "6.14.2")
.expect_err("bare version must not parse as a Range");
let msg = format!("{err:#}");
assert!(
msg.contains("does not parse as a `START..END` range"),
"error must name the expected range shape, got: {msg}"
);
assert!(
msg.contains("`6.14.2`"),
"error must cite the bad input verbatim, got: {msg}"
);
}
#[test]
fn kernel_list_range_preview_rejects_inverted_range() {
let err = run_kernel_list_range(false, "6.16..6.12")
.expect_err("inverted range must not be accepted");
let msg = format!("{err:#}");
assert!(
msg.contains("kernel list --range 6.16..6.12"),
"error must cite the operator-supplied range, got: {msg}"
);
}
fn init_repo_with_commit_for_resolve_test(dir: &std::path::Path) {
use std::process::Command;
let run = |args: &[&str]| {
let out = Command::new("git")
.args(args)
.current_dir(dir)
.env("GIT_CONFIG_GLOBAL", "/dev/null")
.env("GIT_CONFIG_SYSTEM", "/dev/null")
.env("GIT_AUTHOR_NAME", "ktstr-test")
.env("GIT_AUTHOR_EMAIL", "ktstr-test@localhost")
.env("GIT_COMMITTER_NAME", "ktstr-test")
.env("GIT_COMMITTER_EMAIL", "ktstr-test@localhost")
.output()
.expect("spawn git");
assert!(
out.status.success(),
"git {:?} failed: {}",
args,
String::from_utf8_lossy(&out.stderr)
);
};
run(&["init", "-q", "-b", "main"]);
std::fs::write(dir.join("Makefile"), "# kernel makefile fixture\n").unwrap();
std::fs::write(dir.join("Kconfig"), "# kernel kconfig fixture\n").unwrap();
std::fs::write(dir.join("README"), "fixture\n").unwrap();
run(&["add", "Makefile", "Kconfig", "README"]);
run(&[
"-c",
"commit.gpgsign=false",
"commit",
"-q",
"-m",
"initial",
]);
}
fn populate_cache_entry_for_resolve_test(
cache_root: &std::path::Path,
cache_key: &str,
) -> std::path::PathBuf {
let cache = crate::cache::CacheDir::with_root(cache_root.to_path_buf());
let (arch, image_name) = crate::fetch::arch_info();
let staging = tempfile::TempDir::new().expect("staging tempdir");
let fake_image = staging.path().join(image_name);
std::fs::write(&fake_image, b"fake kernel image bytes").expect("write fake image");
let metadata = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Local {
source_tree_path: None,
git_hash: None,
},
arch.to_string(),
image_name.to_string(),
"2026-04-12T10:00:00Z".to_string(),
);
let artifacts = crate::cache::CacheArtifacts::new(&fake_image);
let entry = cache
.store(cache_key, &artifacts, &metadata)
.expect("pre-populate cache entry");
entry.path
}
#[test]
fn resolve_kernel_dir_to_entry_clean_tree_cache_hit() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let _lock = crate::test_support::test_helpers::lock_env();
let cache_tmp = tempfile::TempDir::new().expect("cache tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_tmp.path(),
);
let src_tmp = tempfile::TempDir::new().expect("src tempdir");
init_repo_with_commit_for_resolve_test(src_tmp.path());
let acquired =
crate::fetch::local_source(src_tmp.path()).expect("local_source must succeed");
assert!(!acquired.is_dirty, "fixture must be clean before lookup");
let cache_key = acquired.cache_key.clone();
let entry_path = populate_cache_entry_for_resolve_test(cache_tmp.path(), &cache_key);
let outcome = resolve_kernel_dir_to_entry(src_tmp.path(), "test", None)
.expect("resolve must succeed on cache hit");
assert_eq!(
outcome.dir, entry_path,
"cache-hit path must return the cache entry directory, NOT the source tree"
);
let hit = outcome
.cache_hit
.expect("cache hit must produce KernelDirCacheHit");
assert_eq!(
hit.cache_key, cache_key,
"cache hit must report the resolved key"
);
assert_eq!(
hit.built_at, "2026-04-12T10:00:00Z",
"cache hit must surface the persisted built_at timestamp",
);
assert!(
!outcome.is_dirty,
"cache-hit gate requires a clean tree; outcome.is_dirty must be false",
);
}
#[test]
fn resolve_kernel_dir_to_entry_dirty_tree_skips_cache_lookup() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
if std::process::Command::new("make")
.arg("--version")
.output()
.is_err()
{
skip!("make not in PATH");
}
let _lock = crate::test_support::test_helpers::lock_env();
let cache_tmp = tempfile::TempDir::new().expect("cache tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_tmp.path(),
);
let _bypass_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_BYPASS_LLC_LOCKS", "1");
let src_tmp = tempfile::TempDir::new().expect("src tempdir");
init_repo_with_commit_for_resolve_test(src_tmp.path());
std::fs::write(src_tmp.path().join("README"), "modified\n").expect("dirty README");
let dirty_acquired = crate::fetch::local_source(src_tmp.path())
.expect("local_source on dirty tree must succeed");
assert!(
dirty_acquired.is_dirty,
"post-mutation tree must be dirty for the test to be meaningful"
);
populate_cache_entry_for_resolve_test(cache_tmp.path(), &dirty_acquired.cache_key);
let result = resolve_kernel_dir_to_entry(src_tmp.path(), "test", None);
match result {
Ok(outcome) => panic!(
"dirty tree must skip the cache lookup, but resolve returned \
Ok with dir={:?}, cache_hit={:?}, is_dirty={}",
outcome.dir, outcome.cache_hit, outcome.is_dirty,
),
Err(_) => {
let entry_dir = cache_tmp.path().join(&dirty_acquired.cache_key);
assert!(
entry_dir.is_dir(),
"pre-populated entry must still be present after the \
dirty resolve; the gate proved short-circuit by NOT \
returning this directory as the outcome.dir",
);
}
}
}
#[test]
fn resolve_kernel_dir_to_entry_clean_tree_cache_miss_attempts_build() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
if std::process::Command::new("make")
.arg("--version")
.output()
.is_err()
{
skip!("make not in PATH");
}
let _lock = crate::test_support::test_helpers::lock_env();
let cache_tmp = tempfile::TempDir::new().expect("cache tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_tmp.path(),
);
let _bypass_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_BYPASS_LLC_LOCKS", "1");
let src_tmp = tempfile::TempDir::new().expect("src tempdir");
init_repo_with_commit_for_resolve_test(src_tmp.path());
let acquired =
crate::fetch::local_source(src_tmp.path()).expect("local_source must succeed");
assert!(!acquired.is_dirty, "fixture must be clean before resolve");
let result = resolve_kernel_dir_to_entry(src_tmp.path(), "test", None);
assert!(
result.is_err(),
"cache miss without a real kernel toolchain must surface the build failure, \
got Ok({:?})",
result.as_ref().ok().map(|o| &o.dir),
);
}
}