#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
use std::os::unix::process::CommandExt;
use std::path::{Path, PathBuf};
use std::process::Command;
use clap::{ArgAction, CommandFactory, Parser, Subcommand};
use ktstr::cache::{CacheDir, CacheEntry};
use ktstr::cli;
use ktstr::cli::KernelCommand;
use ktstr::cli::{KERNEL_HELP_NO_RAW, KERNEL_HELP_RAW_OK};
use ktstr::fetch;
#[derive(Parser)]
#[command(name = "cargo-ktstr", bin_name = "cargo")]
struct Cargo {
#[command(subcommand)]
command: CargoSub,
}
#[derive(Subcommand)]
enum CargoSub {
Ktstr(Ktstr),
}
#[derive(Parser)]
struct Ktstr {
#[command(subcommand)]
command: KtstrCommand,
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand)]
enum KtstrCommand {
#[command(visible_alias = "nextest")]
Test {
#[arg(long, action = ArgAction::Append, help = KERNEL_HELP_NO_RAW)]
kernel: Vec<String>,
#[arg(long)]
no_perf_mode: bool,
#[arg(long)]
release: bool,
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<String>,
},
Coverage {
#[arg(long, action = ArgAction::Append, help = KERNEL_HELP_NO_RAW)]
kernel: Vec<String>,
#[arg(long)]
no_perf_mode: bool,
#[arg(long)]
release: bool,
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<String>,
},
LlvmCov {
#[arg(long, action = ArgAction::Append, help = KERNEL_HELP_NO_RAW)]
kernel: Vec<String>,
#[arg(long)]
no_perf_mode: bool,
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<String>,
},
Stats {
#[command(subcommand)]
command: Option<StatsCommand>,
},
Kernel {
#[command(subcommand)]
command: KernelCommand,
},
Model {
#[command(subcommand)]
command: ModelCommand,
},
Verifier {
#[arg(long)]
scheduler: Option<String>,
#[arg(long, conflicts_with = "scheduler")]
scheduler_bin: Option<PathBuf>,
#[arg(long, action = ArgAction::Append, help = KERNEL_HELP_NO_RAW)]
kernel: Vec<String>,
#[arg(long)]
raw: bool,
#[arg(long)]
all_profiles: bool,
#[arg(long, value_delimiter = ',')]
profiles: Vec<String>,
},
Completions {
shell: clap_complete::Shell,
#[arg(long, default_value = "cargo")]
binary: String,
},
ShowHost,
ShowThresholds {
test: String,
},
Cleanup {
#[arg(long)]
parent_cgroup: Option<String>,
},
Locks {
#[arg(long)]
json: bool,
#[arg(long, value_parser = humantime::parse_duration)]
watch: Option<std::time::Duration>,
},
Shell {
#[arg(long, help = KERNEL_HELP_RAW_OK)]
kernel: Option<String>,
#[arg(long, default_value = "1,1,1,1")]
topology: String,
#[arg(short = 'i', long = "include-files", action = ArgAction::Append)]
include_files: Vec<PathBuf>,
#[arg(long = "memory-mb", value_parser = clap::value_parser!(u32).range(128..))]
memory_mb: Option<u32>,
#[arg(long)]
dmesg: bool,
#[arg(long)]
exec: Option<String>,
#[arg(long)]
no_perf_mode: bool,
#[arg(long, requires = "no_perf_mode", help = ktstr::cli::CPU_CAP_HELP)]
cpu_cap: Option<usize>,
},
}
#[derive(Subcommand)]
enum ModelCommand {
Fetch,
Status,
Clean,
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand)]
enum StatsCommand {
List,
ListMetrics {
#[arg(long)]
json: bool,
},
ListValues {
#[arg(long)]
json: bool,
#[arg(long)]
dir: Option<std::path::PathBuf>,
},
ShowHost {
#[arg(long)]
run: String,
#[arg(long)]
dir: Option<std::path::PathBuf>,
},
ExplainSidecar {
#[arg(long)]
run: String,
#[arg(long)]
dir: Option<std::path::PathBuf>,
#[arg(long)]
json: bool,
},
Compare {
#[arg(short = 'E', long)]
filter: Option<String>,
#[arg(long, conflicts_with = "policy")]
threshold: Option<f64>,
#[arg(long, conflicts_with = "threshold")]
policy: Option<std::path::PathBuf>,
#[arg(long)]
dir: Option<std::path::PathBuf>,
#[arg(long, action = ArgAction::Append)]
kernel: Vec<String>,
#[arg(long = "project-commit", action = ArgAction::Append)]
project_commit: Vec<String>,
#[arg(long, action = ArgAction::Append)]
kernel_commit: Vec<String>,
#[arg(long, action = ArgAction::Append)]
scheduler: Vec<String>,
#[arg(long, action = ArgAction::Append)]
topology: Vec<String>,
#[arg(long = "work-type", action = ArgAction::Append)]
work_type: Vec<String>,
#[arg(long = "run-source", action = ArgAction::Append)]
run_source: Vec<String>,
#[arg(long = "flag")]
flags: Vec<String>,
#[arg(long = "a-kernel", action = ArgAction::Append)]
a_kernel: Vec<String>,
#[arg(long = "a-project-commit", action = ArgAction::Append)]
a_project_commit: Vec<String>,
#[arg(long = "a-kernel-commit", action = ArgAction::Append)]
a_kernel_commit: Vec<String>,
#[arg(long = "a-run-source", action = ArgAction::Append)]
a_run_source: Vec<String>,
#[arg(long = "a-scheduler", action = ArgAction::Append)]
a_scheduler: Vec<String>,
#[arg(long = "a-topology", action = ArgAction::Append)]
a_topology: Vec<String>,
#[arg(long = "a-work-type", action = ArgAction::Append)]
a_work_type: Vec<String>,
#[arg(long = "a-flag")]
a_flags: Vec<String>,
#[arg(long = "b-kernel", action = ArgAction::Append)]
b_kernel: Vec<String>,
#[arg(long = "b-project-commit", action = ArgAction::Append)]
b_project_commit: Vec<String>,
#[arg(long = "b-kernel-commit", action = ArgAction::Append)]
b_kernel_commit: Vec<String>,
#[arg(long = "b-run-source", action = ArgAction::Append)]
b_run_source: Vec<String>,
#[arg(long = "b-scheduler", action = ArgAction::Append)]
b_scheduler: Vec<String>,
#[arg(long = "b-topology", action = ArgAction::Append)]
b_topology: Vec<String>,
#[arg(long = "b-work-type", action = ArgAction::Append)]
b_work_type: Vec<String>,
#[arg(long = "b-flag")]
b_flags: Vec<String>,
#[arg(long = "no-average")]
no_average: bool,
},
}
fn resolve_path_kernel(p: &Path, raw_input: &str) -> Result<(PathBuf, bool), String> {
let outcome = cli::resolve_kernel_dir_to_entry(p, "cargo ktstr", None).map_err(|e| {
format!(
"--kernel {raw_input}: {e:#}. {hint}",
hint = ktstr::KTSTR_KERNEL_HINT,
)
})?;
if let Some(hit) = outcome.cache_hit {
eprintln!(
"cargo ktstr: cache hit for {raw_input} ({key}{age})",
key = hit.cache_key,
age = format_built_age(&hit.built_at),
);
}
Ok((outcome.dir, outcome.is_dirty))
}
fn format_built_age(built_at: &str) -> String {
let Ok(parsed) = humantime::parse_rfc3339(built_at) else {
return String::new();
};
let Ok(elapsed) = std::time::SystemTime::now().duration_since(parsed) else {
return String::new();
};
let elapsed = std::time::Duration::from_secs(elapsed.as_secs());
format!(", built {} ago", humantime::format_duration(elapsed))
}
fn canonicalize_cache_dir(cache_dir: PathBuf) -> PathBuf {
std::fs::canonicalize(&cache_dir).unwrap_or(cache_dir)
}
fn resolve_one(id: ktstr::kernel_path::KernelId) -> Result<(String, PathBuf), String> {
use ktstr::kernel_path::KernelId;
match id {
KernelId::Path(p) => {
let raw_input = p.display().to_string();
let canon_input = std::fs::canonicalize(&p).map_err(|e| {
format!(
"--kernel {}: path does not exist or cannot be \
canonicalized ({e:#}). {hint}",
p.display(),
hint = ktstr::KTSTR_KERNEL_HINT,
)
})?;
let base_label = path_kernel_label(&canon_input);
let (dir, is_dirty) = resolve_path_kernel(&p, &raw_input)?;
let label = decorate_path_label_for_dirty(&base_label, is_dirty);
Ok((label, dir))
}
KernelId::Version(ref ver) => {
let cache_dir = ktstr::cli::resolve_cached_kernel(&id, "cargo ktstr")
.map_err(|e| format!("{e:#}"))?;
let dir = canonicalize_cache_dir(cache_dir);
Ok((ver.clone(), dir))
}
KernelId::CacheKey(ref key) => {
let cache_dir = ktstr::cli::resolve_cached_kernel(&id, "cargo ktstr")
.map_err(|e| format!("{e:#}"))?;
let dir = canonicalize_cache_dir(cache_dir);
let label = cache_key_to_version_label(key).to_string();
Ok((label, dir))
}
KernelId::Git {
ref url,
ref git_ref,
} => {
let cache_dir = ktstr::cli::resolve_git_kernel(url, git_ref, "cargo ktstr")
.map_err(|e| format!("resolve git+{url}#{git_ref}: {e:#}"))?;
let dir = canonicalize_cache_dir(cache_dir);
let label = git_kernel_label(url, git_ref);
Ok((label, dir))
}
KernelId::Range { start, end } => {
Err(format!(
"internal: resolve_one called with Range {start}..{end}; \
caller must expand Range via `expand_kernel_range` and \
call `resolve_one` per version"
))
}
}
}
fn resolve_kernel_set(specs: &[String]) -> Result<Vec<(String, PathBuf)>, String> {
use ktstr::kernel_path::KernelId;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
preflight_collision_check(specs)?;
let max_threads = ktstr::cli::resolve_kernel_parallelism();
let bounded_pool = rayon::ThreadPoolBuilder::new()
.num_threads(max_threads)
.build()
.ok();
let resolve_one_with_progress = |id: KernelId| -> Result<(String, PathBuf), String> {
let result = resolve_one(id);
if let Ok((label, _)) = &result {
eprintln!("cargo ktstr: resolved kernel {label:?}");
}
result
};
let resolve_in_pool = || -> Result<Vec<(String, PathBuf)>, String> {
specs
.into_par_iter()
.filter_map(|raw| {
let trimmed = raw.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
})
.flat_map_iter(|trimmed| {
let id = KernelId::parse(&trimmed);
if let Err(e) = id.validate() {
return vec![Err(format!("--kernel {id}: {e}"))].into_iter();
}
match id {
KernelId::Range { start, end } => {
match ktstr::cli::expand_kernel_range(&start, &end, "cargo ktstr") {
Ok(versions) => versions
.into_iter()
.map(|ver| {
resolve_one_with_progress(KernelId::Version(ver.clone()))
.map_err(|e| format!("resolve kernel {ver}: {e}"))
})
.collect::<Vec<_>>()
.into_iter(),
Err(e) => vec![Err(format!("{e:#}"))].into_iter(),
}
}
other => vec![resolve_one_with_progress(other)].into_iter(),
}
})
.collect::<Result<Vec<_>, _>>()
};
let resolved: Vec<(String, PathBuf)> = match bounded_pool {
Some(pool) => pool.install(resolve_in_pool)?,
None => resolve_in_pool()?,
};
let resolved = dedupe_resolved(resolved);
detect_label_collisions(&resolved)?;
Ok(resolved)
}
fn preflight_collision_check(specs: &[String]) -> Result<(), String> {
use ktstr::kernel_path::KernelId;
let mut preflight: std::collections::HashMap<String, String> = std::collections::HashMap::new();
for raw in specs {
let trimmed = raw.trim();
if trimmed.is_empty() {
continue;
}
let id = KernelId::parse(trimmed);
if let Err(e) = id.validate() {
return Err(format!("--kernel {id}: {e}"));
}
let label: Option<String> = match &id {
KernelId::Version(v) => Some(v.clone()),
KernelId::CacheKey(k) => Some(cache_key_to_version_label(k).to_string()),
KernelId::Git { url, git_ref } => Some(git_kernel_label(url, git_ref)),
KernelId::Path(_) | KernelId::Range { .. } => None,
};
if let Some(label) = label {
let sanitized = ktstr::test_support::sanitize_kernel_label(&label);
if let Some(prior) = preflight.insert(sanitized.clone(), label.clone())
&& prior != label
{
return Err(format!(
"--kernel: pre-flight check found collision before any \
download or build started — labels {prior:?} and {label:?} \
both sanitize to {sanitized:?}, which the nextest \
test-name suffix cannot disambiguate. Spell each \
--kernel value distinctly so its sanitized form is \
unique. (Path and Range specs are checked post-resolve.)"
));
}
}
}
Ok(())
}
fn dedupe_resolved(resolved: Vec<(String, PathBuf)>) -> Vec<(String, PathBuf)> {
let mut seen: std::collections::HashSet<(String, PathBuf)> =
std::collections::HashSet::with_capacity(resolved.len());
let mut deduped: Vec<(String, PathBuf)> = Vec::with_capacity(resolved.len());
for entry in resolved {
if seen.insert(entry.clone()) {
deduped.push(entry);
}
}
deduped
}
fn detect_label_collisions(resolved: &[(String, PathBuf)]) -> Result<(), String> {
let mut seen: std::collections::HashMap<String, &str> =
std::collections::HashMap::with_capacity(resolved.len());
for (label, _) in resolved {
let sanitized = ktstr::test_support::sanitize_kernel_label(label);
if let Some(prior) = seen.insert(sanitized.clone(), label.as_str()) {
return Err(format!(
"--kernel: labels {prior:?} and {label:?} both sanitize to {sanitized:?} — \
the nextest test-name suffix cannot disambiguate them. \
Spell each --kernel value distinctly so its sanitized form is unique."
));
}
}
Ok(())
}
fn path_kernel_label(dir: &Path) -> String {
let basename = dir.file_name().and_then(|n| n.to_str()).unwrap_or("kernel");
let hash = crc32fast::hash(dir.display().to_string().as_bytes());
format!("path_{basename}_{:06x}", hash & 0x00ff_ffff)
}
fn decorate_path_label_for_dirty(base_label: &str, is_dirty: bool) -> String {
if is_dirty {
format!("{base_label}_dirty")
} else {
base_label.to_string()
}
}
fn cache_key_to_version_label(key: &str) -> std::borrow::Cow<'_, str> {
use std::borrow::Cow;
if key == "local" {
return Cow::Borrowed("local");
}
if let Some(rest) = key.strip_prefix("local-") {
let discriminator = rest.split('-').next().unwrap_or("");
if discriminator.is_empty() {
return Cow::Borrowed("local");
}
let suffix: String = if discriminator == "unknown" {
"unknown".to_string()
} else {
discriminator.chars().take(6).collect::<String>()
};
return Cow::Owned(format!("local_{suffix}"));
}
for tag in &["-tarball-", "-git-"] {
if let Some(prefix_end) = key.find(tag) {
return Cow::Borrowed(&key[..prefix_end]);
}
}
Cow::Borrowed(key)
}
fn git_kernel_label(url: &str, git_ref: &str) -> String {
let after_scheme = url.split_once("://").map(|(_, rest)| rest).unwrap_or(url);
let path = after_scheme
.split_once('/')
.map(|(_, rest)| rest)
.unwrap_or(after_scheme);
let trimmed = path.trim_start_matches('/').trim_end_matches('/');
let trimmed = trimmed.strip_suffix(".git").unwrap_or(trimmed);
let mut segments: Vec<&str> = trimmed.split('/').filter(|s| !s.is_empty()).collect();
let repo = segments.pop().unwrap_or("repo");
let owner = segments.pop().unwrap_or("");
if owner.is_empty() {
format!("git_{repo}_{git_ref}")
} else {
format!("git_{owner}_{repo}_{git_ref}")
}
}
fn encode_kernel_list(resolved: &[(String, PathBuf)]) -> Result<String, String> {
for (label, _) in resolved {
if label.contains(';') {
return Err(format!(
"kernel label {label:?} contains a `;`; \
KTSTR_KERNEL_LIST uses `;` as the entry separator. \
The label-emission path must produce `;`-free identifiers — \
if a producer is emitting this label, fix the producer to \
sanitize/strip `;` from its output."
));
}
if label.contains('=') {
return Err(format!(
"kernel label {label:?} contains a `=`; \
KTSTR_KERNEL_LIST uses `=` to separate label from path within an entry. \
The label-emission path must produce `=`-free identifiers — \
if a producer is emitting this label, fix the producer to \
sanitize/strip `=` from its output."
));
}
}
for (label, dir) in resolved {
let path = dir.display().to_string();
if path.contains(';') {
return Err(format!(
"kernel directory path for {label:?} contains a `;` ({path:?}); \
KTSTR_KERNEL_LIST uses `;` as the entry separator and cannot encode \
such paths. Move or symlink the kernel cache to a path without `;`."
));
}
}
let mut out = String::new();
for (i, (label, dir)) in resolved.iter().enumerate() {
if i > 0 {
out.push(';');
}
out.push_str(label);
out.push('=');
out.push_str(&dir.display().to_string());
}
Ok(out)
}
fn profraw_inject_for(
sub_argv: &[&str],
existing_env: Option<std::ffi::OsString>,
) -> Option<PathBuf> {
if sub_argv != TEST_SUB_ARGV || existing_env.is_some() {
return None;
}
let dir = ktstr::test_support::profraw_target_dir();
Some(dir.join("default-%p-%m.profraw"))
}
fn run_cargo_sub(
sub_argv: &[&str],
label: &str,
kernel: Vec<String>,
no_perf_mode: bool,
release: bool,
args: Vec<String>,
) -> Result<(), String> {
let mut cmd = Command::new("cargo");
cmd.args(sub_argv);
if release {
cmd.args(["--cargo-profile", "release"]);
}
cmd.args(&args);
if no_perf_mode {
cmd.env("KTSTR_NO_PERF_MODE", "1");
}
if let Some(pat) = profraw_inject_for(sub_argv, std::env::var_os("LLVM_PROFILE_FILE")) {
cmd.env("LLVM_PROFILE_FILE", pat);
}
if !kernel.is_empty() {
let resolved = resolve_kernel_set(&kernel)?;
if resolved.is_empty() {
return Err(
"--kernel: every supplied value parsed to empty / whitespace; \
omit the flag for auto-discovery, or supply a kernel \
identifier"
.to_string(),
);
}
let first_dir = &resolved[0].1;
eprintln!("cargo ktstr: using kernel {}", first_dir.display());
cmd.env(ktstr::KTSTR_KERNEL_ENV, first_dir);
if resolved.len() > 1 {
let encoded = encode_kernel_list(&resolved)?;
eprintln!(
"cargo ktstr: fanning gauntlet across {n} kernels",
n = resolved.len(),
);
cmd.env(ktstr::KTSTR_KERNEL_LIST_ENV, encoded);
}
}
eprintln!("cargo ktstr: running {label}");
let err = cmd.exec();
Err(format!("exec cargo {}: {err}", sub_argv.join(" ")))
}
const TEST_SUB_ARGV: &[&str] = &["nextest", "run"];
const COVERAGE_SUB_ARGV: &[&str] = &["llvm-cov", "nextest"];
const LLVM_COV_SUB_ARGV: &[&str] = &["llvm-cov"];
fn run_test(
kernel: Vec<String>,
no_perf_mode: bool,
release: bool,
args: Vec<String>,
) -> Result<(), String> {
run_cargo_sub(TEST_SUB_ARGV, "tests", kernel, no_perf_mode, release, args)
}
fn run_coverage(
kernel: Vec<String>,
no_perf_mode: bool,
release: bool,
args: Vec<String>,
) -> Result<(), String> {
run_cargo_sub(
COVERAGE_SUB_ARGV,
"coverage",
kernel,
no_perf_mode,
release,
args,
)
}
fn run_llvm_cov(kernel: Vec<String>, no_perf_mode: bool, args: Vec<String>) -> Result<(), String> {
run_cargo_sub(
LLVM_COV_SUB_ARGV,
"llvm-cov",
kernel,
no_perf_mode,
false,
args,
)
}
fn run_stats(command: &Option<StatsCommand>) -> Result<(), String> {
match command {
None => {
if let Some(output) = cli::print_stats_report() {
print!("{output}");
}
Ok(())
}
Some(StatsCommand::List) => cli::list_runs().map_err(|e| format!("{e:#}")),
Some(StatsCommand::ListMetrics { json }) => match cli::list_metrics(*json) {
Ok(s) => {
print!("{s}");
Ok(())
}
Err(e) => Err(format!("{e:#}")),
},
Some(StatsCommand::ListValues { json, dir }) => {
match cli::list_values(*json, dir.as_deref()) {
Ok(s) => {
print!("{s}");
Ok(())
}
Err(e) => Err(format!("{e:#}")),
}
}
Some(StatsCommand::ShowHost { run, dir }) => {
match cli::show_run_host(run, dir.as_deref()) {
Ok(s) => {
print!("{s}");
Ok(())
}
Err(e) => Err(format!("{e:#}")),
}
}
Some(StatsCommand::ExplainSidecar { run, dir, json }) => {
match cli::explain_sidecar(run, dir.as_deref(), *json) {
Ok(s) => {
print!("{s}");
Ok(())
}
Err(e) => Err(format!("{e:#}")),
}
}
Some(StatsCommand::Compare {
filter,
threshold,
policy,
dir,
kernel,
project_commit,
kernel_commit,
run_source,
scheduler,
topology,
work_type,
flags,
a_kernel,
a_project_commit,
a_kernel_commit,
a_run_source,
a_scheduler,
a_topology,
a_work_type,
a_flags,
b_kernel,
b_project_commit,
b_kernel_commit,
b_run_source,
b_scheduler,
b_topology,
b_work_type,
b_flags,
no_average,
}) => {
let resolved_policy = match (threshold, policy.as_ref()) {
(Some(t), None) => {
let p = ktstr::cli::ComparisonPolicy::uniform(*t);
p.validate().map_err(|e| format!("{e:#}"))?;
p
}
(None, Some(path)) => {
ktstr::cli::ComparisonPolicy::load_json(path).map_err(|e| format!("{e:#}"))?
}
(None, None) => ktstr::cli::ComparisonPolicy::default(),
(Some(_), Some(_)) => {
unreachable!(
"clap `conflicts_with` on --threshold / --policy \
must enforce mutual exclusion at parse time",
);
}
};
let project_repo = std::env::current_dir()
.ok()
.and_then(|cwd| gix::discover(cwd).ok());
let kernel_repo = ktstr::ktstr_kernel_env()
.map(std::path::PathBuf::from)
.and_then(|p| {
let target = ktstr::cache::recover_local_source_tree(&p).unwrap_or(p);
gix::open(target).ok()
});
let project_commit =
resolve_commit_specs(project_repo.as_ref(), project_commit, "project-commit");
let kernel_commit =
resolve_commit_specs(kernel_repo.as_ref(), kernel_commit, "kernel-commit");
let a_project_commit =
resolve_commit_specs(project_repo.as_ref(), a_project_commit, "a-project-commit");
let a_kernel_commit =
resolve_commit_specs(kernel_repo.as_ref(), a_kernel_commit, "a-kernel-commit");
let b_project_commit =
resolve_commit_specs(project_repo.as_ref(), b_project_commit, "b-project-commit");
let b_kernel_commit =
resolve_commit_specs(kernel_repo.as_ref(), b_kernel_commit, "b-kernel-commit");
let build = BuildCompareFilters {
shared_kernel: kernel.clone(),
shared_project_commit: project_commit,
shared_kernel_commit: kernel_commit,
shared_run_source: run_source.clone(),
shared_scheduler: scheduler.clone(),
shared_topology: topology.clone(),
shared_work_type: work_type.clone(),
shared_flags: flags.clone(),
a_kernel: a_kernel.clone(),
a_project_commit,
a_kernel_commit,
a_run_source: a_run_source.clone(),
a_scheduler: a_scheduler.clone(),
a_topology: a_topology.clone(),
a_work_type: a_work_type.clone(),
a_flags: a_flags.clone(),
b_kernel: b_kernel.clone(),
b_project_commit,
b_kernel_commit,
b_run_source: b_run_source.clone(),
b_scheduler: b_scheduler.clone(),
b_topology: b_topology.clone(),
b_work_type: b_work_type.clone(),
b_flags: b_flags.clone(),
};
let (filter_a, filter_b) = build.build();
let exit = cli::compare_partitions(
&filter_a,
&filter_b,
filter.as_deref(),
&resolved_policy,
dir.as_deref(),
*no_average,
)
.map_err(|e| format!("{e:#}"))?;
if exit != 0 {
std::process::exit(exit);
}
Ok(())
}
}
}
fn looks_like_literal_hash(input: &str) -> bool {
let core = input.strip_suffix("-dirty").unwrap_or(input);
let len = core.len();
if !(7..=40).contains(&len) {
return false;
}
core.bytes().all(|b| b.is_ascii_hexdigit())
}
fn resolve_commit_specs(
repo: Option<&gix::Repository>,
raw: &[String],
flag_name: &str,
) -> Vec<String> {
let Some(repo) = repo else {
return raw.to_vec();
};
let head_oid: Option<gix::ObjectId> = repo.head_id().ok().map(|id| id.detach());
let head_dirty: bool = head_oid
.as_ref()
.and_then(|_| ktstr::test_support::repo_is_dirty(repo))
.unwrap_or(false);
let format_short = |id: gix::ObjectId| -> String {
let short = id.to_hex_with_len(7).to_string();
if head_dirty && head_oid == Some(id) {
format!("{short}-dirty")
} else {
short
}
};
let mut out: Vec<String> = Vec::with_capacity(raw.len());
for input in raw {
match repo.rev_parse(input.as_str()) {
Ok(spec) => match spec.detach() {
gix::revision::plumbing::Spec::Include(id)
| gix::revision::plumbing::Spec::ExcludeParents(id) => {
out.push(format_short(id));
}
gix::revision::plumbing::Spec::Range { from, to } => {
match repo.rev_walk([to]).with_hidden([from]).all() {
Ok(walk) => {
for info in walk.flatten() {
out.push(info.id.to_hex_with_len(7).to_string());
}
}
Err(err) => {
eprintln!(
"cargo ktstr: --{flag_name} range '{input}' could \
not be expanded: {err}; using as literal filter",
);
out.push(input.clone());
}
}
}
gix::revision::plumbing::Spec::Exclude(_)
| gix::revision::plumbing::Spec::Merge { .. }
| gix::revision::plumbing::Spec::IncludeOnlyParents(_) => {
eprintln!(
"cargo ktstr: --{flag_name} '{input}' uses an unsupported \
revspec form (Exclude/Merge/IncludeOnlyParents); using \
as literal filter",
);
out.push(input.clone());
}
},
Err(_) => {
if !looks_like_literal_hash(input) {
eprintln!(
"cargo ktstr: --{flag_name} '{input}' did not resolve as \
a revspec; using as literal filter",
);
}
out.push(input.clone());
}
}
}
out
}
#[derive(Debug, Clone, Default)]
struct BuildCompareFilters {
shared_kernel: Vec<String>,
shared_project_commit: Vec<String>,
shared_kernel_commit: Vec<String>,
shared_run_source: Vec<String>,
shared_scheduler: Vec<String>,
shared_topology: Vec<String>,
shared_work_type: Vec<String>,
shared_flags: Vec<String>,
a_kernel: Vec<String>,
a_project_commit: Vec<String>,
a_kernel_commit: Vec<String>,
a_run_source: Vec<String>,
a_scheduler: Vec<String>,
a_topology: Vec<String>,
a_work_type: Vec<String>,
a_flags: Vec<String>,
b_kernel: Vec<String>,
b_project_commit: Vec<String>,
b_kernel_commit: Vec<String>,
b_run_source: Vec<String>,
b_scheduler: Vec<String>,
b_topology: Vec<String>,
b_work_type: Vec<String>,
b_flags: Vec<String>,
}
impl BuildCompareFilters {
fn build(&self) -> (ktstr::cli::RowFilter, ktstr::cli::RowFilter) {
let pick_vec = |a: &[String], shared: &[String]| -> Vec<String> {
if a.is_empty() {
shared.to_vec()
} else {
a.to_vec()
}
};
let filter_a = ktstr::cli::RowFilter {
kernels: pick_vec(&self.a_kernel, &self.shared_kernel),
project_commits: pick_vec(&self.a_project_commit, &self.shared_project_commit),
kernel_commits: pick_vec(&self.a_kernel_commit, &self.shared_kernel_commit),
run_sources: pick_vec(&self.a_run_source, &self.shared_run_source),
schedulers: pick_vec(&self.a_scheduler, &self.shared_scheduler),
topologies: pick_vec(&self.a_topology, &self.shared_topology),
work_types: pick_vec(&self.a_work_type, &self.shared_work_type),
flags: pick_vec(&self.a_flags, &self.shared_flags),
};
let filter_b = ktstr::cli::RowFilter {
kernels: pick_vec(&self.b_kernel, &self.shared_kernel),
project_commits: pick_vec(&self.b_project_commit, &self.shared_project_commit),
kernel_commits: pick_vec(&self.b_kernel_commit, &self.shared_kernel_commit),
run_sources: pick_vec(&self.b_run_source, &self.shared_run_source),
schedulers: pick_vec(&self.b_scheduler, &self.shared_scheduler),
topologies: pick_vec(&self.b_topology, &self.shared_topology),
work_types: pick_vec(&self.b_work_type, &self.shared_work_type),
flags: pick_vec(&self.b_flags, &self.shared_flags),
};
(filter_a, filter_b)
}
}
fn kernel_build(
version: Option<String>,
source: Option<PathBuf>,
git: Option<String>,
git_ref: Option<String>,
force: bool,
clean: bool,
cpu_cap: Option<usize>,
) -> Result<(), String> {
if source.is_none()
&& git.is_none()
&& let Some(ref v) = version
{
use ktstr::kernel_path::KernelId;
let id = KernelId::parse(v);
id.validate().map_err(|e| format!("--kernel {id}: {e}"))?;
if let KernelId::Range { start, end } = id {
let versions = ktstr::cli::expand_kernel_range(&start, &end, "cargo ktstr")
.map_err(|e| format!("{e:#}"))?;
let total = versions.len();
let mut failures: Vec<(String, String)> = Vec::new();
for (i, ver) in versions.iter().enumerate() {
eprintln!("cargo ktstr: [{}/{total}] kernel build {ver}", i + 1);
if let Err(e) =
kernel_build_one(Some(ver.clone()), None, None, None, force, clean, cpu_cap)
{
eprintln!("cargo ktstr: {ver}: {e}");
failures.push((ver.clone(), e));
}
}
if failures.is_empty() {
Ok(())
} else {
Err(format!(
"kernel build range {start}..{end}: {failed}/{total} \
version(s) failed: {names}",
start = start,
end = end,
failed = failures.len(),
names = failures
.iter()
.map(|(v, _)| v.as_str())
.collect::<Vec<_>>()
.join(", "),
))
}
} else {
kernel_build_one(version, source, git, git_ref, force, clean, cpu_cap)
}
} else {
kernel_build_one(version, source, git, git_ref, force, clean, cpu_cap)
}
}
fn kernel_build_one(
version: Option<String>,
source: Option<PathBuf>,
git: Option<String>,
git_ref: Option<String>,
force: bool,
clean: bool,
cpu_cap: Option<usize>,
) -> Result<(), String> {
if cpu_cap.is_some()
&& std::env::var("KTSTR_BYPASS_LLC_LOCKS")
.ok()
.is_some_and(|v| !v.is_empty())
{
return Err(
"--cpu-cap conflicts with KTSTR_BYPASS_LLC_LOCKS=1; unset one of them. \
--cpu-cap is a resource contract; bypass disables the contract entirely."
.to_string(),
);
}
let resolved_cap = cli::CpuCap::resolve(cpu_cap).map_err(|e| format!("{e:#}"))?;
let cache = CacheDir::new().map_err(|e| format!("open cache: {e:#}"))?;
let tmp_dir = tempfile::TempDir::new().map_err(|e| format!("create temp dir: {e:#}"))?;
let client = fetch::shared_client();
let acquired = if let Some(ref src_path) = source {
fetch::local_source(src_path).map_err(|e| format!("{e:#}"))?
} else if let Some(ref url) = git {
let ref_name = git_ref.as_deref().expect("clap requires --ref with --git");
fetch::git_clone(url, ref_name, tmp_dir.path(), "cargo ktstr")
.map_err(|e| format!("{e:#}"))?
} else {
let ver = match version {
Some(v) if fetch::is_major_minor_prefix(&v) => {
fetch::fetch_version_for_prefix(client, &v, "cargo ktstr")
.map_err(|e| format!("{e:#}"))?
}
Some(v) => v,
None => fetch::fetch_latest_stable_version(client, "cargo ktstr")
.map_err(|e| format!("{e:#}"))?,
};
let (arch, _) = fetch::arch_info();
let cache_key = format!("{ver}-tarball-{arch}-kc{}", ktstr::cache_key_suffix());
if !force && let Some(entry) = cache_lookup(&cache, &cache_key) {
eprintln!("cargo ktstr: cached kernel found: {}", entry.path.display());
eprintln!("cargo ktstr: use --force to rebuild");
return Ok(());
}
let sp = cli::Spinner::start("Downloading kernel...");
let result = fetch::download_tarball(client, &ver, tmp_dir.path(), "cargo ktstr");
drop(sp);
result.map_err(|e| format!("{e:#}"))?
};
if !force
&& (source.is_some() || git.is_some())
&& !acquired.is_dirty
&& let Some(entry) = cache_lookup(&cache, &acquired.cache_key)
{
eprintln!("cargo ktstr: cached kernel found: {}", entry.path.display());
eprintln!("cargo ktstr: use --force to rebuild");
return Ok(());
}
if force {
let _force_check = cache
.try_acquire_exclusive_lock(&acquired.cache_key)
.map_err(|e| format!("{e:#}"))?;
}
cli::kernel_build_pipeline(
&acquired,
&cache,
"cargo ktstr",
clean,
source.is_some(),
resolved_cap,
)
.map_err(|e| format!("{e:#}"))?;
Ok(())
}
fn cache_lookup(cache: &CacheDir, cache_key: &str) -> Option<CacheEntry> {
cli::cache_lookup(cache, cache_key, "cargo ktstr")
}
const KERNEL_POLICY: ktstr::cli::KernelResolvePolicy<'static> = ktstr::cli::KernelResolvePolicy {
accept_raw_image: true,
cli_label: "cargo ktstr",
};
fn resolve_kernel_image(kernel: Option<&str>) -> Result<PathBuf, String> {
ktstr::cli::resolve_kernel_image(kernel, &KERNEL_POLICY).map_err(|e| format!("{e:#}"))
}
#[allow(clippy::too_many_arguments)]
fn run_shell(
kernel: Option<String>,
topology: String,
include_files: Vec<PathBuf>,
memory_mb: Option<u32>,
dmesg: bool,
exec: Option<String>,
no_perf_mode: bool,
cpu_cap: Option<usize>,
) -> Result<(), String> {
if no_perf_mode {
unsafe { std::env::set_var("KTSTR_NO_PERF_MODE", "1") };
}
if let Some(cap) = cpu_cap {
if std::env::var("KTSTR_BYPASS_LLC_LOCKS")
.ok()
.is_some_and(|v| !v.is_empty())
{
return Err(
"--cpu-cap conflicts with KTSTR_BYPASS_LLC_LOCKS=1; unset one of them. \
--cpu-cap is a resource contract; bypass disables the contract entirely."
.to_string(),
);
}
cli::CpuCap::new(cap).map_err(|e| format!("{e:#}"))?;
unsafe { std::env::set_var("KTSTR_CPU_CAP", cap.to_string()) };
}
cli::check_kvm().map_err(|e| format!("{e:#}"))?;
let kernel_path = resolve_kernel_image(kernel.as_deref())?;
let (numa_nodes, llcs, cores, threads) =
cli::parse_topology_string(&topology).map_err(|e| format!("{e:#}"))?;
let resolved_includes =
cli::resolve_include_files(&include_files).map_err(|e| format!("{e:#}"))?;
let include_refs: Vec<(&str, &Path)> = resolved_includes
.iter()
.map(|(a, p)| (a.as_str(), p.as_path()))
.collect();
ktstr::run_shell(
kernel_path,
numa_nodes,
llcs,
cores,
threads,
&include_refs,
memory_mb,
dmesg,
exec.as_deref(),
)
.map_err(|e| format!("{e:#}"))
}
fn query_scheduler_flags(
sched_bin: &Path,
) -> Result<Vec<ktstr::scenario::flags::FlagDeclJson>, String> {
let output = Command::new(sched_bin)
.arg("--ktstr-list-flags")
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.map_err(|e| format!("run scheduler --ktstr-list-flags: {e:#}"))?;
if !output.status.success() {
return Ok(Vec::new());
}
let stdout = String::from_utf8_lossy(&output.stdout);
let trimmed = stdout.trim();
if trimmed.is_empty() {
return Ok(Vec::new());
}
serde_json::from_str(trimmed).map_err(|e| format!("parse --ktstr-list-flags output: {e:#}"))
}
fn generate_flag_profiles(
flags: &[ktstr::scenario::flags::FlagDeclJson],
) -> Vec<(String, Vec<String>)> {
let n = flags.len();
if n > 31 {
eprintln!(
"cargo ktstr: error: scheduler has {n} flags, power set too large (2^{n}). \
Use --profiles to select specific profiles."
);
return Vec::new();
}
let all: Vec<String> = flags.iter().map(|f| f.name.clone()).collect();
let requires_fn = |name: &String| -> Vec<String> {
flags
.iter()
.find(|f| f.name == *name)
.map(|f| f.requires.clone())
.unwrap_or_default()
};
ktstr::scenario::compute_flag_profiles(&all, requires_fn, &[], &[])
.into_iter()
.map(|flag_names| {
let name = if flag_names.is_empty() {
"default".to_string()
} else {
flag_names.join("+")
};
(name, flag_names)
})
.collect()
}
fn profile_sched_args(
active_flags: &[String],
all_flags: &[ktstr::scenario::flags::FlagDeclJson],
) -> Result<Vec<String>, String> {
let mut args = Vec::new();
for flag_name in active_flags {
match all_flags.iter().find(|f| f.name == *flag_name) {
Some(decl) => args.extend(decl.args.iter().cloned()),
None => {
let known: Vec<&str> = all_flags.iter().map(|f| f.name.as_str()).collect();
return Err(format!(
"unknown flag {flag_name:?} (known: {})",
known.join(", ")
));
}
}
}
Ok(args)
}
fn run_verifier(
scheduler: Option<String>,
scheduler_bin: Option<PathBuf>,
kernel: Vec<String>,
raw: bool,
all_profiles: bool,
profiles_filter: Vec<String>,
) -> Result<(), String> {
cli::check_kvm().map_err(|e| format!("{e:#}"))?;
let sched_bin = match (scheduler, scheduler_bin) {
(Some(package), None) => {
ktstr::build_and_find_binary(&package).map_err(|e| format!("build scheduler: {e:#}"))?
}
(None, Some(path)) => {
if !path.exists() {
return Err(format!("scheduler binary not found: {}", path.display()));
}
path
}
(None, None) => {
return Err("either --scheduler or --scheduler-bin is required".to_string());
}
(Some(_), Some(_)) => unreachable!(),
};
let kernel_paths: Vec<(String, PathBuf)> = if kernel.is_empty() {
let path = resolve_kernel_image(None)?;
vec![("auto".to_string(), path)]
} else {
let resolved = resolve_kernel_set(&kernel)?;
if resolved.is_empty() {
return Err(
"--kernel: every supplied value parsed to empty / whitespace; \
omit the flag for auto-discovery, or supply a kernel \
identifier"
.to_string(),
);
}
let mut out: Vec<(String, PathBuf)> = Vec::with_capacity(resolved.len());
for (label, dir) in resolved {
let image = ktstr::kernel_path::find_image_in_dir(&dir).ok_or_else(|| {
format!(
"no kernel image found in {} (resolved from --kernel {label})",
dir.display()
)
})?;
out.push((label, image));
}
out
};
let ktstr_bin =
ktstr::build_and_find_binary("ktstr").map_err(|e| format!("build ktstr: {e:#}"))?;
let multi_kernel = kernel_paths.len() > 1;
for (i, (label, kernel_path)) in kernel_paths.iter().enumerate() {
if multi_kernel {
eprintln!(
"cargo ktstr: [kernel {}/{}] {label}",
i + 1,
kernel_paths.len(),
);
println!("\n=== kernel: {label} ===");
}
if all_profiles || !profiles_filter.is_empty() {
run_verifier_all_profiles(&sched_bin, &ktstr_bin, kernel_path, raw, &profiles_filter)?;
continue;
}
eprintln!("cargo ktstr: collecting verifier stats");
let result =
ktstr::verifier::collect_verifier_output(&sched_bin, &ktstr_bin, kernel_path, &[])
.map_err(|e| format!("collect verifier output: {e:#}"))?;
let output = ktstr::verifier::format_verifier_output("verifier", &result, raw);
print!("{output}");
}
Ok(())
}
fn run_verifier_all_profiles(
sched_bin: &Path,
ktstr_bin: &Path,
kernel_path: &Path,
raw: bool,
profiles_filter: &[String],
) -> Result<(), String> {
let flags = query_scheduler_flags(sched_bin)?;
if flags.is_empty() {
eprintln!(
"cargo ktstr: scheduler does not support --ktstr-list-flags, \
running with default profile only"
);
let result =
ktstr::verifier::collect_verifier_output(sched_bin, ktstr_bin, kernel_path, &[])
.map_err(|e| format!("collect verifier output: {e:#}"))?;
let output = ktstr::verifier::format_verifier_output("default", &result, raw);
print!("{output}");
return Ok(());
}
let all_profiles = generate_flag_profiles(&flags);
let profiles: Vec<&(String, Vec<String>)> = if profiles_filter.is_empty() {
all_profiles.iter().collect()
} else {
let filtered: Vec<_> = all_profiles
.iter()
.filter(|(name, _)| profiles_filter.iter().any(|f| f == name))
.collect();
if filtered.is_empty() {
return Err(format!(
"no matching profiles found. Available: {}",
all_profiles
.iter()
.map(|(n, _)| n.as_str())
.collect::<Vec<_>>()
.join(", ")
));
}
filtered
};
let total = profiles.len();
if total == 0 {
return Err(if flags.len() > 31 {
format!(
"no profiles to verify: power-set generation is capped at \
31 flags (found {}); use --profiles to select a subset",
flags.len(),
)
} else {
format!(
"no profiles to verify: {} flag(s) advertised but profile \
generation produced 0 profiles — check `requires` \
dependencies and exclusions for cycles or conflicts",
flags.len(),
)
});
}
if total > 32 {
eprintln!(
"cargo ktstr: warning: {total} profiles to verify (>32). \
Use --profiles to select a subset."
);
}
eprintln!(
"cargo ktstr: verifying {total} profile{}",
if total == 1 { "" } else { "s" }
);
let mut summary: Vec<(String, Vec<(String, u32)>)> = Vec::new();
for (i, (profile_name, active_flags)) in profiles.iter().enumerate() {
eprintln!(
"cargo ktstr: [{}/{}] profile: {}",
i + 1,
total,
profile_name
);
let extra_args = profile_sched_args(active_flags, &flags)
.map_err(|e| format!("profile {profile_name}: {e}"))?;
let result = ktstr::verifier::collect_verifier_output(
sched_bin,
ktstr_bin,
kernel_path,
&extra_args,
)
.map_err(|e| format!("profile {profile_name}: {e:#}"))?;
let output = ktstr::verifier::format_verifier_output(profile_name, &result, raw);
print!("{output}");
let prog_stats: Vec<(String, u32)> = result
.stats
.iter()
.map(|ps| (ps.name.clone(), ps.verified_insns))
.collect();
summary.push((profile_name.clone(), prog_stats));
}
if summary.len() > 1 {
print_profile_summary(&summary);
}
Ok(())
}
fn print_profile_summary(summary: &[(String, Vec<(String, u32)>)]) {
let mut prog_names: Vec<String> = Vec::new();
for (_, progs) in summary {
for (name, _) in progs {
if !prog_names.contains(name) {
prog_names.push(name.clone());
}
}
}
println!("\n--- profile summary ---");
let profile_names: Vec<&str> = summary.iter().map(|(n, _)| n.as_str()).collect();
let mut table = ktstr::cli::new_table();
let mut header: Vec<&str> = Vec::with_capacity(1 + profile_names.len());
header.push("program");
header.extend(profile_names.iter().copied());
table.set_header(header);
for prog in &prog_names {
let mut row: Vec<String> = Vec::with_capacity(1 + profile_names.len());
row.push(prog.clone());
for (_, progs) in summary {
let insns = progs
.iter()
.find(|(n, _)| n == prog)
.map(|(_, v)| *v)
.unwrap_or(0);
row.push(insns.to_string());
}
table.add_row(row);
}
println!("{table}");
}
fn run_completions(shell: clap_complete::Shell, binary: &str) {
let mut cmd = Cargo::command();
clap_complete::generate(shell, &mut cmd, binary, &mut std::io::stdout());
}
fn run_model_fetch() -> Result<(), String> {
let spec = ktstr::test_support::DEFAULT_MODEL;
match ktstr::test_support::ensure(&spec) {
Ok(path) => {
println!(
"ktstr: model '{}' ready at {}",
spec.file_name,
path.display()
);
Ok(())
}
Err(e) => Err(format!("fetch model '{}': {e:#}", spec.file_name)),
}
}
fn run_model_status() -> Result<(), String> {
let spec = ktstr::test_support::DEFAULT_MODEL;
let status = ktstr::test_support::status(&spec).map_err(|e| format!("{e:#}"))?;
println!("model: {}", status.spec.file_name);
println!("path: {}", status.path.display());
println!("cached: {}", status.sha_verdict.is_cached());
println!("checked: {}", status.sha_verdict.is_match());
const RE_FETCH_TAIL: &str = "re-fetch to replace it";
match &status.sha_verdict {
ktstr::test_support::ShaVerdict::NotCached => println!(
"(no cached copy — run `cargo ktstr model fetch` to download {} MiB)",
status.spec.size_bytes / 1024 / 1024,
),
ktstr::test_support::ShaVerdict::CheckFailed(err) => {
let single_line = err.replace('\n', "; ");
println!(
"(cached file could not be checked: {single_line}; \
inspect the cache entry or {RE_FETCH_TAIL})",
);
}
ktstr::test_support::ShaVerdict::Mismatches => {
println!("(cached file failed SHA-256 check; {RE_FETCH_TAIL})",);
}
ktstr::test_support::ShaVerdict::Matches => {}
}
Ok(())
}
fn run_model_clean() -> Result<(), String> {
let spec = ktstr::test_support::DEFAULT_MODEL;
let report =
ktstr::test_support::clean(&spec).map_err(|e| format!("clean model cache: {e:#}"))?;
if report.is_empty() {
println!(
"no cached model found at {}",
report.artifact_path.display()
);
return Ok(());
}
if let Some(bytes) = report.artifact_freed_bytes {
println!(
"removed {} ({})",
report.artifact_path.display(),
indicatif::HumanBytes(bytes),
);
}
if let Some(bytes) = report.sidecar_freed_bytes {
println!(
"removed {} ({})",
report.sidecar_path.display(),
indicatif::HumanBytes(bytes),
);
}
println!(
"freed {} total",
indicatif::HumanBytes(report.total_freed_bytes()),
);
Ok(())
}
fn main() {
ktstr::cli::restore_sigpipe_default();
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
)
.with_writer(std::io::stderr)
.init();
let Cargo {
command: CargoSub::Ktstr(ktstr),
} = Cargo::parse();
let result = match ktstr.command {
KtstrCommand::Test {
kernel,
no_perf_mode,
release,
args,
} => run_test(kernel, no_perf_mode, release, args),
KtstrCommand::Coverage {
kernel,
no_perf_mode,
release,
args,
} => run_coverage(kernel, no_perf_mode, release, args),
KtstrCommand::LlvmCov {
kernel,
no_perf_mode,
args,
} => run_llvm_cov(kernel, no_perf_mode, args),
KtstrCommand::Stats { ref command } => run_stats(command),
KtstrCommand::Kernel { command } => match command {
KernelCommand::List { json, range } => match range {
Some(r) => cli::kernel_list_range_preview(json, &r).map_err(|e| format!("{e:#}")),
None => cli::kernel_list(json).map_err(|e| format!("{e:#}")),
},
KernelCommand::Build {
version,
source,
git,
git_ref,
force,
clean,
cpu_cap,
} => kernel_build(version, source, git, git_ref, force, clean, cpu_cap),
KernelCommand::Clean {
keep,
force,
corrupt_only,
} => cli::kernel_clean(keep, force, corrupt_only).map_err(|e| format!("{e:#}")),
},
KtstrCommand::Model { command } => match command {
ModelCommand::Fetch => run_model_fetch(),
ModelCommand::Status => run_model_status(),
ModelCommand::Clean => run_model_clean(),
},
KtstrCommand::Verifier {
scheduler,
scheduler_bin,
kernel,
raw,
all_profiles,
profiles,
} => run_verifier(
scheduler,
scheduler_bin,
kernel,
raw,
all_profiles,
profiles,
),
KtstrCommand::Completions { shell, binary } => {
run_completions(shell, &binary);
Ok(())
}
KtstrCommand::ShowHost => {
print!("{}", cli::show_host());
Ok(())
}
KtstrCommand::ShowThresholds { test } => match cli::show_thresholds(&test) {
Ok(s) => {
print!("{s}");
Ok(())
}
Err(e) => Err(format!("{e:#}")),
},
KtstrCommand::Cleanup { parent_cgroup } => {
cli::cleanup(parent_cgroup).map_err(|e| format!("{e:#}"))
}
KtstrCommand::Locks { json, watch } => {
cli::list_locks(json, watch).map_err(|e| format!("{e:#}"))
}
KtstrCommand::Shell {
kernel,
topology,
include_files,
memory_mb,
dmesg,
exec,
no_perf_mode,
cpu_cap,
} => run_shell(
kernel,
topology,
include_files,
memory_mb,
dmesg,
exec,
no_perf_mode,
cpu_cap,
),
};
if let Err(e) = result {
eprintln!("error: {e:#}");
std::process::exit(1);
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::CommandFactory;
use ktstr::cache::KernelMetadata;
#[test]
fn cli_debug_assert() {
Cargo::command().debug_assert();
}
#[test]
fn parse_test_minimal() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "test"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_test_with_kernel() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "test", "--kernel", "6.14.2"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_test_with_release_flag() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "test", "--release"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Test { release, .. } => {
assert!(release, "`--release` must set `release=true`");
}
_ => panic!("expected Test"),
}
}
#[test]
fn parse_test_with_passthrough_args() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"test",
"--",
"-p",
"ktstr",
"--no-capture",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Test { args, .. } => {
assert_eq!(args, vec!["-p", "ktstr", "--no-capture"]);
}
_ => panic!("expected Test"),
}
}
#[test]
fn parse_nextest_alias_dispatches_to_test() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "nextest"]).unwrap_or_else(|e| panic!("{e}"));
assert!(
matches!(k.command, KtstrCommand::Test { .. }),
"`nextest` alias must dispatch to the Test variant",
);
}
#[test]
fn parse_nextest_alias_with_passthrough_args() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"nextest",
"--",
"-p",
"ktstr",
"--no-capture",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Test { args, .. } => {
assert_eq!(args, vec!["-p", "ktstr", "--no-capture"]);
}
_ => panic!("expected Test (via `nextest` alias)"),
}
}
#[test]
fn parse_nextest_alias_with_kernel_and_no_perf_mode() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"nextest",
"--kernel",
"6.14.2",
"--no-perf-mode",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Test {
kernel,
no_perf_mode,
release,
args,
} => {
assert_eq!(kernel, vec!["6.14.2".to_string()]);
assert!(no_perf_mode);
assert!(!release, "bare invocation must default --release to false");
assert!(args.is_empty());
}
_ => panic!("expected Test (via `nextest` alias)"),
}
}
#[test]
fn parse_coverage_minimal() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "coverage"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_coverage_with_kernel() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "coverage", "--kernel", "6.14.2"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_coverage_with_release_flag() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "coverage", "--release"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Coverage { release, .. } => {
assert!(release, "`--release` must set `release=true`");
}
_ => panic!("expected Coverage"),
}
}
#[test]
fn parse_coverage_with_passthrough_args() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"coverage",
"--",
"--workspace",
"--lcov",
"--output-path",
"lcov.info",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Coverage { args, .. } => {
assert_eq!(
args,
vec!["--workspace", "--lcov", "--output-path", "lcov.info"]
);
}
_ => panic!("expected Coverage"),
}
}
#[test]
fn parse_coverage_with_kernel_and_no_perf_mode() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"coverage",
"--kernel",
"6.14.2",
"--no-perf-mode",
"--",
"--workspace",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Coverage {
kernel,
no_perf_mode,
release,
args,
} => {
assert_eq!(kernel, vec!["6.14.2".to_string()]);
assert!(no_perf_mode);
assert!(!release, "bare invocation must default --release to false");
assert_eq!(args, vec!["--workspace"]);
}
_ => panic!("expected Coverage"),
}
}
#[test]
fn parse_llvm_cov_minimal() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "llvm-cov"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_llvm_cov_with_kernel() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "llvm-cov", "--kernel", "6.14.2"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::LlvmCov { kernel, .. } => {
assert_eq!(kernel, vec!["6.14.2".to_string()]);
}
_ => panic!("expected LlvmCov"),
}
}
#[test]
fn parse_llvm_cov_with_passthrough_args() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"llvm-cov",
"--",
"report",
"--lcov",
"--output-path",
"lcov.info",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::LlvmCov { args, .. } => {
assert_eq!(args, vec!["report", "--lcov", "--output-path", "lcov.info"]);
}
_ => panic!("expected LlvmCov"),
}
}
#[test]
fn parse_llvm_cov_with_kernel_and_no_perf_mode() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"llvm-cov",
"--kernel",
"6.14.2",
"--no-perf-mode",
"--",
"report",
"--lcov",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::LlvmCov {
kernel,
no_perf_mode,
args,
} => {
assert_eq!(kernel, vec!["6.14.2".to_string()]);
assert!(no_perf_mode);
assert_eq!(args, vec!["report", "--lcov"]);
}
_ => panic!("expected LlvmCov"),
}
}
#[test]
fn parse_llvm_cov_underscore_rejected() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "llvm_cov"]);
assert!(
rejected.is_err(),
"`llvm_cov` (underscore) must be rejected — the \
canonical name is `llvm-cov` (kebab-case)",
);
}
#[test]
fn parse_shell_minimal() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "shell"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_shell_with_topology() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "shell", "--topology", "1,2,4,1"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Shell { topology, .. } => {
assert_eq!(topology, "1,2,4,1");
}
_ => panic!("expected Shell"),
}
}
#[test]
fn parse_shell_default_topology() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "shell"]).unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Shell { topology, .. } => {
assert_eq!(topology, "1,1,1,1");
}
_ => panic!("expected Shell"),
}
}
#[test]
fn parse_shell_include_files() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "shell", "-i", "/tmp/a", "-i", "/tmp/b"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Shell { include_files, .. } => {
assert_eq!(include_files.len(), 2);
}
_ => panic!("expected Shell"),
}
}
#[test]
fn parse_stats_bare() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "stats"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_stats_list() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_stats_list_metrics_bare() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-metrics"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ListMetrics { json }),
..
} => {
assert!(
!json,
"bare `list-metrics` must default to text mode (json=false)",
);
}
_ => panic!("expected Stats ListMetrics"),
}
}
#[test]
fn parse_stats_list_metrics_json() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-metrics", "--json"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ListMetrics { json }),
..
} => {
assert!(json, "--json must set the flag true");
}
_ => panic!("expected Stats ListMetrics"),
}
}
#[test]
fn parse_stats_list_metrics_rejects_positional() {
let rejected =
Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-metrics", "worst_spread"]);
assert!(
rejected.is_err(),
"list-metrics must reject positional arguments",
);
}
#[test]
fn parse_stats_list_values_bare() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-values"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ListValues { json, dir }),
..
} => {
assert!(!json, "bare `list-values` must default to text mode");
assert!(
dir.is_none(),
"bare `list-values` must default to no --dir override"
);
}
_ => panic!("expected Stats ListValues"),
}
}
#[test]
fn parse_stats_list_values_json() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-values", "--json"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ListValues { json, .. }),
..
} => {
assert!(json, "--json must set the flag true");
}
_ => panic!("expected Stats ListValues"),
}
}
#[test]
fn parse_stats_list_values_with_dir() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"list-values",
"--dir",
"/tmp/archived-runs",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ListValues { dir, json }),
..
} => {
assert_eq!(
dir.as_deref(),
Some(std::path::Path::new("/tmp/archived-runs")),
"--dir must round-trip to Some(PathBuf)",
);
assert!(!json, "bare --dir must not spuriously set --json");
}
_ => panic!("expected Stats ListValues"),
}
}
#[test]
fn parse_stats_list_values_rejects_positional() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "stats", "list-values", "kernel"]);
assert!(
rejected.is_err(),
"list-values must reject positional arguments",
);
}
#[test]
fn parse_stats_compare() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_stats_compare_with_filter() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"-E",
"cgroup_steady",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
filter,
threshold,
policy,
dir,
a_kernel,
b_kernel,
..
}),
..
} => {
assert_eq!(a_kernel, vec!["6.14"]);
assert_eq!(b_kernel, vec!["6.15"]);
assert_eq!(filter.as_deref(), Some("cgroup_steady"));
assert!(threshold.is_none());
assert!(policy.is_none());
assert!(dir.is_none());
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_threshold() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--threshold",
"5.0",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
threshold, filter, ..
}),
..
} => {
assert_eq!(threshold, Some(5.0));
assert!(filter.is_none());
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_dir() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--dir",
"/tmp/archived-runs",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
filter,
threshold,
policy,
dir,
..
}),
..
} => {
assert_eq!(
dir.as_deref(),
Some(std::path::Path::new("/tmp/archived-runs")),
"--dir must round-trip to Some(PathBuf); \
parse-scope only — resolver coverage lives \
with compare_partitions' own tests",
);
assert!(
filter.is_none(),
"bare --dir must not spuriously populate filter",
);
assert!(
threshold.is_none(),
"bare --dir must not spuriously populate threshold",
);
assert!(
policy.is_none(),
"bare --dir must not spuriously populate policy",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_policy() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--policy",
"/tmp/policy.json",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
threshold, policy, ..
}),
..
} => {
assert_eq!(
policy.as_deref(),
Some(std::path::Path::new("/tmp/policy.json")),
"--policy must round-trip to Some(PathBuf); got {policy:?}",
);
assert!(
threshold.is_none(),
"bare --policy must not populate --threshold",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_rejects_both_threshold_and_policy() {
let result = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--threshold",
"5.0",
"--policy",
"/tmp/policy.json",
]);
let err = match result {
Err(e) => e,
Ok(_) => panic!(
"clap conflicts_with must reject both --threshold \
and --policy being set together"
),
};
let rendered = err.to_string();
assert!(
rendered
.to_ascii_lowercase()
.contains("cannot be used with")
|| rendered.to_ascii_lowercase().contains("conflict"),
"clap error must surface the conflict between \
--threshold and --policy; got: {rendered}",
);
}
#[test]
fn parse_stats_compare_no_average_default_false() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { no_average, .. }),
..
} => {
assert!(
!no_average,
"bare compare must default --no-average to false so \
averaging-on remains the default — operators get \
trial-set folding without an explicit flag.",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_no_average() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--no-average",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
no_average,
threshold,
policy,
dir,
..
}),
..
} => {
assert!(no_average, "--no-average must lift the flag to true");
assert!(
threshold.is_none(),
"bare --no-average must not spuriously populate --threshold",
);
assert!(
policy.is_none(),
"bare --no-average must not spuriously populate --policy",
);
assert!(
dir.is_none(),
"bare --no-average must not spuriously populate --dir",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_project_commit_single() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--project-commit",
"abc1234",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
project_commit,
a_project_commit,
b_project_commit,
..
}),
..
} => {
assert_eq!(project_commit, vec!["abc1234"]);
assert!(
a_project_commit.is_empty(),
"shared --project-commit must not populate --a-project-commit",
);
assert!(
b_project_commit.is_empty(),
"shared --project-commit must not populate --b-project-commit",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_project_commit_repeatable() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--project-commit",
"a",
"--project-commit",
"b",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { project_commit, .. }),
..
} => {
assert_eq!(project_commit, vec!["a", "b"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_kernel_commit_single() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--kernel-commit",
"abc1234",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
kernel_commit,
a_kernel_commit,
b_kernel_commit,
..
}),
..
} => {
assert_eq!(kernel_commit, vec!["abc1234"]);
assert!(
a_kernel_commit.is_empty(),
"shared --kernel-commit must not populate --a-kernel-commit",
);
assert!(
b_kernel_commit.is_empty(),
"shared --kernel-commit must not populate --b-kernel-commit",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_kernel_commit_repeatable() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--kernel-commit",
"a",
"--kernel-commit",
"b",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { kernel_commit, .. }),
..
} => {
assert_eq!(kernel_commit, vec!["a", "b"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_scheduler_repeatable() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--scheduler",
"scx_alpha",
"--scheduler",
"scx_beta",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { scheduler, .. }),
..
} => {
assert_eq!(scheduler, vec!["scx_alpha", "scx_beta"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_topology_repeatable() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--topology",
"1n2l4c2t",
"--topology",
"1n4l2c1t",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { topology, .. }),
..
} => {
assert_eq!(topology, vec!["1n2l4c2t", "1n4l2c1t"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_work_type_repeatable() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--work-type",
"CpuSpin",
"--work-type",
"PageFaultChurn",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::Compare { work_type, .. }),
..
} => {
assert_eq!(work_type, vec!["CpuSpin", "PageFaultChurn"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_per_side_kernel_commit() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel-commit",
"abc1234",
"--b-kernel-commit",
"def5678",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
kernel_commit,
a_kernel_commit,
b_kernel_commit,
..
}),
..
} => {
assert!(
kernel_commit.is_empty(),
"per-side --a-kernel-commit / --b-kernel-commit must not \
populate the shared --kernel-commit vec",
);
assert_eq!(a_kernel_commit, vec!["abc1234"]);
assert_eq!(b_kernel_commit, vec!["def5678"]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn build_compare_filters_empty_yields_default_default() {
let b = BuildCompareFilters::default();
let (fa, fb) = b.build();
assert!(fa.kernels.is_empty());
assert!(fa.project_commits.is_empty());
assert!(fa.kernel_commits.is_empty());
assert!(fa.run_sources.is_empty());
assert!(fa.schedulers.is_empty());
assert!(fa.topologies.is_empty());
assert!(fa.work_types.is_empty());
assert!(fa.flags.is_empty());
assert_eq!(fa.kernels, fb.kernels);
assert_eq!(fa.project_commits, fb.project_commits);
assert_eq!(fa.kernel_commits, fb.kernel_commits);
assert_eq!(fa.run_sources, fb.run_sources);
assert_eq!(fa.schedulers, fb.schedulers);
assert_eq!(fa.topologies, fb.topologies);
assert_eq!(fa.work_types, fb.work_types);
}
#[test]
fn build_compare_filters_per_side_kernel_commit_overrides_shared() {
let b = BuildCompareFilters {
shared_kernel_commit: vec!["abcdef1".to_string(), "fedcba2".to_string()],
a_kernel_commit: vec!["111aaaa".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(
fa.kernel_commits,
vec!["111aaaa"],
"A overrides shared kernel-commit",
);
assert_eq!(
fb.kernel_commits,
vec!["abcdef1", "fedcba2"],
"B retains shared kernel-commit default",
);
}
#[test]
fn build_compare_filters_disjoint_per_side_kernel_commit_slices() {
let b = BuildCompareFilters {
a_kernel_commit: vec!["abcdef1".to_string()],
b_kernel_commit: vec!["fedcba2".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.kernel_commits, vec!["abcdef1"]);
assert_eq!(fb.kernel_commits, vec!["fedcba2"]);
let slicing = ktstr::cli::derive_slicing_dims(&fa, &fb);
assert_eq!(
slicing,
vec![ktstr::cli::Dimension::KernelCommit],
"differing per-side kernel-commit must derive as a single \
KernelCommit slicing dim",
);
}
#[test]
fn build_compare_filters_shared_kernel_pins_both_sides() {
let b = BuildCompareFilters {
shared_kernel: vec!["6.14".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.kernels, vec!["6.14"]);
assert_eq!(fb.kernels, vec!["6.14"]);
}
#[test]
fn build_compare_filters_per_side_overrides_shared_for_that_side_only() {
let b = BuildCompareFilters {
shared_kernel: vec!["6.14".to_string(), "6.15".to_string()],
a_kernel: vec!["6.13".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.kernels, vec!["6.13"], "A overrides shared");
assert_eq!(fb.kernels, vec!["6.14", "6.15"], "B retains shared default",);
}
#[test]
fn build_compare_filters_disjoint_per_side_kernel_yields_two_filters() {
let b = BuildCompareFilters {
a_kernel: vec!["6.14".to_string()],
b_kernel: vec!["6.15".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.kernels, vec!["6.14"]);
assert_eq!(fb.kernels, vec!["6.15"]);
}
#[test]
fn build_compare_filters_per_side_scheduler_overrides_shared() {
let b = BuildCompareFilters {
shared_scheduler: vec!["scx_default".to_string()],
a_scheduler: vec!["scx_alpha".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(
fa.schedulers,
vec!["scx_alpha".to_string()],
"A overrides shared scheduler",
);
assert_eq!(
fb.schedulers,
vec!["scx_default".to_string()],
"B retains shared scheduler when only --a-scheduler overrides",
);
}
#[test]
fn build_compare_filters_shared_pin_plus_per_side_slice() {
let b = BuildCompareFilters {
shared_kernel: vec!["6.14".to_string()],
a_scheduler: vec!["scx_alpha".to_string()],
b_scheduler: vec!["scx_beta".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.kernels, vec!["6.14"]);
assert_eq!(fb.kernels, vec!["6.14"]);
assert_eq!(fa.schedulers, vec!["scx_alpha".to_string()]);
assert_eq!(fb.schedulers, vec!["scx_beta".to_string()]);
let slicing = ktstr::cli::derive_slicing_dims(&fa, &fb);
assert_eq!(slicing, vec![ktstr::cli::Dimension::Scheduler]);
}
#[test]
fn build_compare_filters_per_side_flag_overrides_shared() {
let b = BuildCompareFilters {
shared_flags: vec!["llc".to_string()],
a_flags: vec!["steal".to_string(), "borrow".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.flags, vec!["steal", "borrow"]);
assert_eq!(fb.flags, vec!["llc"]);
}
#[test]
fn build_compare_filters_empty_run_sources_field_equal_on_both_sides() {
let b = BuildCompareFilters::default();
let (fa, fb) = b.build();
assert!(
fa.run_sources.is_empty(),
"empty BuildCompareFilters must produce A-side filter with empty run_sources",
);
assert!(
fb.run_sources.is_empty(),
"empty BuildCompareFilters must produce B-side filter with empty run_sources",
);
assert_eq!(
fa.run_sources, fb.run_sources,
"both sides must agree on empty run_sources",
);
}
#[test]
fn build_compare_filters_disjoint_per_side_source_yields_two_filters() {
let b = BuildCompareFilters {
a_run_source: vec!["ci".to_string()],
b_run_source: vec!["local".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.run_sources, vec!["ci".to_string()]);
assert_eq!(fb.run_sources, vec!["local".to_string()]);
}
#[test]
fn build_compare_filters_shared_source_pins_both_sides() {
let b = BuildCompareFilters {
shared_run_source: vec!["ci".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.run_sources, vec!["ci".to_string()]);
assert_eq!(fb.run_sources, vec!["ci".to_string()]);
}
#[test]
fn build_compare_filters_per_side_source_overrides_shared_for_that_side_only() {
let b = BuildCompareFilters {
shared_run_source: vec!["local".to_string(), "archive".to_string()],
a_run_source: vec!["ci".to_string()],
..BuildCompareFilters::default()
};
let (fa, fb) = b.build();
assert_eq!(fa.run_sources, vec!["ci".to_string()], "A overrides shared");
assert_eq!(
fb.run_sources,
vec!["local".to_string(), "archive".to_string()],
"B retains shared default",
);
}
#[test]
fn parse_stats_show_host_with_run() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "stats", "show-host", "--run", "my-run-id"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ShowHost { run, dir }),
..
} => {
assert_eq!(run, "my-run-id");
assert!(dir.is_none(), "bare --run must not populate --dir");
}
_ => panic!("expected Stats ShowHost"),
}
}
#[test]
fn parse_stats_show_host_with_dir() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"show-host",
"--run",
"archive-2024-01-15",
"--dir",
"/tmp/archived-runs",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ShowHost { run, dir }),
..
} => {
assert_eq!(run, "archive-2024-01-15");
assert_eq!(
dir.as_deref(),
Some(std::path::Path::new("/tmp/archived-runs")),
);
}
_ => panic!("expected Stats ShowHost"),
}
}
#[test]
fn parse_stats_show_host_missing_run_rejected() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "stats", "show-host"]);
assert!(rejected.is_err(), "stats show-host must require --run",);
}
#[test]
fn parse_stats_explain_sidecar_with_run() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"explain-sidecar",
"--run",
"my-run-id",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ExplainSidecar { run, dir, json }),
..
} => {
assert_eq!(run, "my-run-id");
assert!(dir.is_none(), "bare --run must not populate --dir");
assert!(!json, "default output is text, not json");
}
_ => panic!("expected Stats ExplainSidecar"),
}
}
#[test]
fn parse_stats_explain_sidecar_with_dir_and_json() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"explain-sidecar",
"--run",
"archive-2024-01-15",
"--dir",
"/tmp/archived-runs",
"--json",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command: Some(StatsCommand::ExplainSidecar { run, dir, json }),
..
} => {
assert_eq!(run, "archive-2024-01-15");
assert_eq!(
dir.as_deref(),
Some(std::path::Path::new("/tmp/archived-runs")),
);
assert!(json, "--json must toggle aggregate JSON output");
}
_ => panic!("expected Stats ExplainSidecar"),
}
}
#[test]
fn parse_stats_explain_sidecar_missing_run_rejected() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "stats", "explain-sidecar"]);
assert!(
rejected.is_err(),
"stats explain-sidecar must require --run",
);
}
#[test]
fn parse_kernel_list() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "list"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_list_json() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "list", "--json"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_list_range() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "list", "--range", "6.12..6.14"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Kernel { command } => match command {
KernelCommand::List { json, range } => {
assert!(!json, "bare --range must not enable --json");
assert_eq!(
range.as_deref(),
Some("6.12..6.14"),
"--range must round-trip the literal spec for \
dispatch to pass to `expand_kernel_range`",
);
}
other => panic!("expected KernelCommand::List, got {other:?}"),
},
_ => panic!("expected Kernel"),
}
}
#[test]
fn parse_kernel_list_range_with_json() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"kernel",
"list",
"--range",
"6.12..6.14",
"--json",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Kernel { command } => match command {
KernelCommand::List { json, range } => {
assert!(json, "--json must round-trip alongside --range");
assert_eq!(range.as_deref(), Some("6.12..6.14"));
}
other => panic!("expected KernelCommand::List, got {other:?}"),
},
_ => panic!("expected Kernel"),
}
}
#[test]
fn parse_stats_compare_with_run_source_single() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-kernel",
"6.14",
"--b-kernel",
"6.15",
"--run-source",
"ci",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
run_source,
a_run_source,
b_run_source,
..
}),
..
} => {
assert_eq!(
run_source,
vec!["ci".to_string()],
"shared --run-source must populate the shared vec",
);
assert!(
a_run_source.is_empty(),
"shared --run-source must not populate --a-run-source",
);
assert!(
b_run_source.is_empty(),
"shared --run-source must not populate --b-run-source",
);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_stats_compare_with_run_source_per_side() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"stats",
"compare",
"--a-run-source",
"ci",
"--b-run-source",
"local",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Stats {
command:
Some(StatsCommand::Compare {
run_source,
a_run_source,
b_run_source,
..
}),
..
} => {
assert!(
run_source.is_empty(),
"per-side flags must not populate the shared --run-source vec",
);
assert_eq!(a_run_source, vec!["ci".to_string()]);
assert_eq!(b_run_source, vec!["local".to_string()]);
}
_ => panic!("expected Stats Compare"),
}
}
#[test]
fn parse_kernel_build_version() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "build", "6.14.2"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_build_source() {
let m =
Cargo::try_parse_from(["cargo", "ktstr", "kernel", "build", "--source", "../linux"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_build_source_conflicts_with_version() {
let m = Cargo::try_parse_from([
"cargo", "ktstr", "kernel", "build", "--source", "../linux", "6.14.2",
]);
assert!(m.is_err());
}
#[test]
fn parse_kernel_build_git_requires_ref() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"kernel",
"build",
"--git",
"https://example.com/linux.git",
]);
assert!(m.is_err());
}
#[test]
fn parse_kernel_build_git_with_ref() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"kernel",
"build",
"--git",
"https://example.com/linux.git",
"--ref",
"v6.14",
]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_build_git_conflicts_with_source() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"kernel",
"build",
"--git",
"https://example.com/linux.git",
"--ref",
"v6.14",
"--source",
"../linux",
]);
assert!(m.is_err());
}
#[test]
fn parse_kernel_clean() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "clean"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_kernel_clean_keep() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "kernel", "clean", "--keep", "3"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Kernel {
command: KernelCommand::Clean { keep, .. },
} => {
assert_eq!(keep, Some(3));
}
_ => panic!("expected Kernel Clean"),
}
}
#[test]
fn parse_verifier_with_scheduler() {
let m =
Cargo::try_parse_from(["cargo", "ktstr", "verifier", "--scheduler", "scx_rustland"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_verifier_with_scheduler_bin() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"verifier",
"--scheduler-bin",
"/tmp/sched",
]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_verifier_scheduler_conflicts_with_scheduler_bin() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"verifier",
"--scheduler",
"scx_rustland",
"--scheduler-bin",
"/tmp/sched",
]);
assert!(m.is_err());
}
#[test]
fn parse_verifier_all_profiles() {
let m = Cargo::try_parse_from([
"cargo",
"ktstr",
"verifier",
"--scheduler",
"scx_rustland",
"--all-profiles",
]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_verifier_profiles_filter() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"verifier",
"--scheduler",
"scx_rustland",
"--profiles",
"default,llc,llc+steal",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Verifier { profiles, .. } => {
assert_eq!(profiles, vec!["default", "llc", "llc+steal"]);
}
_ => panic!("expected Verifier"),
}
}
#[test]
fn parse_completions_bash() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "completions", "bash"]);
assert!(m.is_ok(), "{}", m.err().unwrap());
}
#[test]
fn parse_completions_invalid_shell() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "completions", "noshell"]);
assert!(m.is_err());
}
#[test]
fn parse_missing_subcommand() {
let m = Cargo::try_parse_from(["cargo", "ktstr"]);
assert!(m.is_err());
}
#[test]
fn parse_unknown_subcommand() {
let m = Cargo::try_parse_from(["cargo", "ktstr", "nonexistent"]);
assert!(m.is_err());
}
#[test]
fn topology_valid() {
let parts: Vec<&str> = "1,2,4,1".split(',').collect();
assert_eq!(parts.len(), 4);
assert!(parts[0].parse::<u32>().is_ok());
assert!(parts[1].parse::<u32>().is_ok());
assert!(parts[2].parse::<u32>().is_ok());
assert!(parts[3].parse::<u32>().is_ok());
}
#[test]
fn topology_invalid_one_component() {
let parts: Vec<&str> = "abc".split(',').collect();
assert_ne!(parts.len(), 4);
}
#[test]
fn topology_invalid_non_numeric() {
let parts: Vec<&str> = "a,b,c,d".split(',').collect();
assert_eq!(parts.len(), 4);
assert!(parts[0].parse::<u32>().is_err());
}
#[test]
fn topology_invalid_three_components() {
let parts: Vec<&str> = "1,2,1".split(',').collect();
assert_ne!(parts.len(), 4);
}
#[test]
fn topology_invalid_zero_component() {
let parts: Vec<&str> = "0,1,1,1".split(',').collect();
assert_eq!(parts.len(), 4);
let val: u32 = parts[0].parse().unwrap();
assert_eq!(val, 0);
}
#[test]
fn completions_bash_non_empty() {
let mut buf = Vec::new();
let mut cmd = Cargo::command();
clap_complete::generate(clap_complete::Shell::Bash, &mut cmd, "cargo", &mut buf);
assert!(!buf.is_empty());
}
#[test]
fn completions_zsh_contains_subcommands() {
let mut buf = Vec::new();
let mut cmd = Cargo::command();
clap_complete::generate(clap_complete::Shell::Zsh, &mut cmd, "cargo", &mut buf);
let output = String::from_utf8(buf).expect("completions should be valid UTF-8");
assert!(
output.contains("'test:"),
"zsh completions missing 'test:' describe-list entry"
);
assert!(
output.contains("'coverage:"),
"zsh completions missing 'coverage:' describe-list entry"
);
assert!(
output.contains("'shell:"),
"zsh completions missing 'shell:' describe-list entry"
);
assert!(
output.contains("'kernel:"),
"zsh completions missing 'kernel:' describe-list entry"
);
assert!(
output.contains("'nextest:"),
"zsh completions missing 'nextest:' describe-list \
entry (visible alias of `test`)"
);
assert!(
output.contains("'llvm-cov:"),
"zsh completions missing 'llvm-cov:' describe-list entry"
);
}
#[test]
fn cargo_sub_argv_constants_are_pinned() {
assert_eq!(TEST_SUB_ARGV, &["nextest", "run"]);
assert_eq!(COVERAGE_SUB_ARGV, &["llvm-cov", "nextest"]);
assert_eq!(LLVM_COV_SUB_ARGV, &["llvm-cov"]);
}
#[test]
fn profraw_inject_for_test_path_returns_pattern() {
let pat = profraw_inject_for(TEST_SUB_ARGV, None)
.expect("test path without LLVM_PROFILE_FILE must inject");
assert!(
pat.ends_with("default-%p-%m.profraw"),
"injected pattern must end with default-%%p-%%m.profraw, got {}",
pat.display(),
);
assert_ne!(
pat.as_os_str(),
"default-%p-%m.profraw",
"pattern must be absolute (carry a target dir prefix), \
not bare so the LLVM runtime never falls back to cwd",
);
}
#[test]
fn profraw_inject_for_coverage_path_skips() {
assert!(
profraw_inject_for(COVERAGE_SUB_ARGV, None).is_none(),
"coverage path must not inject — cargo-llvm-cov owns LLVM_PROFILE_FILE",
);
}
#[test]
fn profraw_inject_for_llvm_cov_path_skips() {
assert!(
profraw_inject_for(LLVM_COV_SUB_ARGV, None).is_none(),
"llvm-cov passthrough path must not inject — user owns env decisions",
);
}
#[test]
fn profraw_inject_for_respects_operator_override() {
let existing = std::ffi::OsString::from("/tmp/operator-pinned-%p.profraw");
assert!(
profraw_inject_for(TEST_SUB_ARGV, Some(existing)).is_none(),
"an operator-set LLVM_PROFILE_FILE must not be overridden",
);
}
#[test]
fn generate_flag_profiles_empty() {
let profiles = generate_flag_profiles(&[]);
assert_eq!(profiles.len(), 1);
assert_eq!(profiles[0].0, "default");
assert!(profiles[0].1.is_empty());
}
#[test]
fn generate_flag_profiles_single_flag() {
let flags = vec![ktstr::scenario::flags::FlagDeclJson {
name: "llc".to_string(),
args: vec!["--llc".to_string()],
requires: vec![],
}];
let profiles = generate_flag_profiles(&flags);
assert_eq!(profiles.len(), 2);
assert_eq!(profiles[0].0, "default");
assert_eq!(profiles[1].0, "llc");
}
#[test]
fn generate_flag_profiles_requires_constraint() {
let flags = vec![
ktstr::scenario::flags::FlagDeclJson {
name: "llc".to_string(),
args: vec!["--llc".to_string()],
requires: vec![],
},
ktstr::scenario::flags::FlagDeclJson {
name: "steal".to_string(),
args: vec!["--steal".to_string()],
requires: vec!["llc".to_string()],
},
];
let profiles = generate_flag_profiles(&flags);
let names: Vec<&str> = profiles.iter().map(|(n, _)| n.as_str()).collect();
assert_eq!(profiles.len(), 3);
assert!(names.contains(&"default"));
assert!(names.contains(&"llc"));
assert!(names.contains(&"llc+steal"));
assert!(!names.contains(&"steal"));
}
#[test]
fn profile_sched_args_collects_args() {
let flags = vec![
ktstr::scenario::flags::FlagDeclJson {
name: "llc".to_string(),
args: vec!["--llc".to_string()],
requires: vec![],
},
ktstr::scenario::flags::FlagDeclJson {
name: "steal".to_string(),
args: vec!["--steal".to_string(), "--aggressive".to_string()],
requires: vec![],
},
];
let active = vec!["llc".to_string(), "steal".to_string()];
let args = profile_sched_args(&active, &flags).unwrap();
assert_eq!(args, vec!["--llc", "--steal", "--aggressive"]);
}
#[test]
fn profile_sched_args_empty() {
let flags = vec![ktstr::scenario::flags::FlagDeclJson {
name: "llc".to_string(),
args: vec!["--llc".to_string()],
requires: vec![],
}];
let active: Vec<String> = vec![];
let args = profile_sched_args(&active, &flags).unwrap();
assert!(args.is_empty());
}
#[test]
fn profile_sched_args_unknown_flag_errors() {
let flags = vec![ktstr::scenario::flags::FlagDeclJson {
name: "llc".to_string(),
args: vec!["--llc".to_string()],
requires: vec![],
}];
let active = vec!["llc".to_string(), "unknown_flag".to_string()];
let err = profile_sched_args(&active, &flags).unwrap_err();
assert!(
err.contains("unknown_flag"),
"error should cite flag: {err}"
);
assert!(err.contains("llc"), "error should list known flags: {err}");
}
fn test_metadata() -> KernelMetadata {
KernelMetadata::new(
ktstr::cache::KernelSource::Tarball,
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
}
fn store_test_entry(cache: &CacheDir, key: &str, meta: &KernelMetadata) -> CacheEntry {
let src = tempfile::TempDir::new().unwrap();
let image = src.path().join(&meta.image_name);
std::fs::write(&image, b"fake kernel").unwrap();
cache
.store(key, &ktstr::cache::CacheArtifacts::new(&image), meta)
.unwrap()
}
#[test]
fn format_entry_row_no_version() {
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let meta = KernelMetadata::new(
ktstr::cache::KernelSource::Local {
source_tree_path: None,
git_hash: None,
},
"x86_64".to_string(),
"bzImage".to_string(),
"2026-04-12T10:00:00Z".to_string(),
);
let entry = store_test_entry(&cache, "local-key", &meta);
let row = cli::format_entry_row(&entry, "hash", &[]);
assert!(row.contains("-"), "missing version should show dash");
}
#[test]
fn kconfig_status_reports_stale_on_hash_mismatch() {
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let meta = test_metadata().with_ktstr_kconfig_hash(Some("old".to_string()));
let entry = store_test_entry(&cache, "stale", &meta);
assert_eq!(
entry.kconfig_status("new"),
ktstr::cache::KconfigStatus::Stale {
cached: "old".to_string(),
current: "new".to_string(),
}
);
}
#[test]
fn kconfig_status_reports_matches_on_hash_equality() {
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let meta = test_metadata().with_ktstr_kconfig_hash(Some("same".to_string()));
let entry = store_test_entry(&cache, "fresh", &meta);
assert_eq!(
entry.kconfig_status("same"),
ktstr::cache::KconfigStatus::Matches
);
}
#[test]
fn kconfig_status_reports_untracked_when_entry_has_no_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let meta = test_metadata();
let entry = store_test_entry(&cache, "no-hash", &meta);
assert_eq!(
entry.kconfig_status("anything"),
ktstr::cache::KconfigStatus::Untracked
);
}
#[test]
fn kconfig_status_json_string_pins_all_three_variants() {
use ktstr::cache::KconfigStatus;
let tmp = tempfile::TempDir::new().unwrap();
let cache = CacheDir::with_root(tmp.path().join("cache"));
let matches_meta = test_metadata().with_ktstr_kconfig_hash(Some("h".to_string()));
let matches_entry = store_test_entry(&cache, "matches-key", &matches_meta);
let matches_status = matches_entry.kconfig_status("h");
assert!(
matches!(matches_status, KconfigStatus::Matches),
"hash equality must yield KconfigStatus::Matches"
);
assert_eq!(matches_status.to_string(), "matches");
let stale_meta = test_metadata().with_ktstr_kconfig_hash(Some("old".to_string()));
let stale_entry = store_test_entry(&cache, "stale-key", &stale_meta);
let stale_status = stale_entry.kconfig_status("new");
assert!(
matches!(stale_status, KconfigStatus::Stale { .. }),
"hash mismatch must yield KconfigStatus::Stale"
);
assert_eq!(stale_status.to_string(), "stale");
let untracked_meta = test_metadata();
let untracked_entry = store_test_entry(&cache, "untracked-key", &untracked_meta);
let untracked_status = untracked_entry.kconfig_status("anything");
assert!(
matches!(untracked_status, KconfigStatus::Untracked),
"entry without hash must yield KconfigStatus::Untracked"
);
assert_eq!(untracked_status.to_string(), "untracked");
}
#[test]
fn embedded_kconfig_hash_deterministic() {
let h1 = cli::embedded_kconfig_hash();
let h2 = cli::embedded_kconfig_hash();
assert_eq!(h1, h2);
}
#[test]
fn embedded_kconfig_hash_is_hex() {
let h = cli::embedded_kconfig_hash();
assert_eq!(h.len(), 8, "CRC32 hex should be 8 chars");
assert!(
h.chars().all(|c| c.is_ascii_hexdigit()),
"should be hex digits: {h}"
);
}
#[test]
fn embedded_kconfig_hash_matches_manual_crc32() {
let expected = format!("{:08x}", crc32fast::hash(cli::EMBEDDED_KCONFIG.as_bytes()));
assert_eq!(cli::embedded_kconfig_hash(), expected);
}
#[test]
fn parse_show_host_minimal() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "show-host"])
.unwrap_or_else(|e| panic!("{e}"));
assert!(matches!(k.command, KtstrCommand::ShowHost));
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "show-host", "stray"]);
assert!(
rejected.is_err(),
"show-host must reject positional arguments",
);
}
#[test]
fn parse_show_thresholds_with_test_arg() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "show-thresholds", "my_test_fn"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::ShowThresholds { test } => {
assert_eq!(test, "my_test_fn");
}
_ => panic!("expected ShowThresholds"),
}
}
#[test]
fn parse_show_thresholds_without_arg_rejected() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "show-thresholds"]);
assert!(
rejected.is_err(),
"show-thresholds requires a test-name argument",
);
}
#[test]
fn parse_show_thresholds_extra_arg_rejected() {
let rejected = Cargo::try_parse_from(["cargo", "ktstr", "show-thresholds", "a", "b"]);
assert!(
rejected.is_err(),
"show-thresholds must accept exactly one positional arg",
);
}
#[test]
fn show_host_helper_produces_non_empty_output() {
let out = cli::show_host();
assert!(
!out.is_empty(),
"show_host must return a non-empty report under normal Linux CI",
);
assert!(
out.contains("kernel_release"),
"show_host output must include the stable `kernel_release` row: {out}",
);
}
#[test]
fn show_thresholds_helper_unknown_test_returns_error() {
let err = cli::show_thresholds("definitely_not_a_registered_test_xyz").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("no registered ktstr test named"),
"error path must preserve the actionable diagnostic: {msg}",
);
}
#[test]
fn parse_shell_cpu_cap_with_no_perf_mode_succeeds() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from([
"cargo",
"ktstr",
"shell",
"--cpu-cap",
"4",
"--no-perf-mode",
])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Shell {
cpu_cap,
no_perf_mode,
..
} => {
assert_eq!(cpu_cap, Some(4));
assert!(no_perf_mode, "--no-perf-mode must be set");
}
_ => panic!("expected Shell"),
}
}
#[test]
fn parse_shell_cpu_cap_without_no_perf_mode_fails() {
let msg = match Cargo::try_parse_from(["cargo", "ktstr", "shell", "--cpu-cap", "4"]) {
Err(e) => e.to_string(),
Ok(_) => panic!("--cpu-cap without --no-perf-mode must fail the parse"),
};
assert!(
msg.to_ascii_lowercase().contains("no-perf-mode")
|| msg.to_ascii_lowercase().contains("no_perf_mode"),
"clap error must name the missing --no-perf-mode flag, got: {msg}",
);
}
#[test]
fn parse_shell_no_perf_mode_without_cpu_cap_succeeds() {
let Cargo {
command: CargoSub::Ktstr(k),
} = Cargo::try_parse_from(["cargo", "ktstr", "shell", "--no-perf-mode"])
.unwrap_or_else(|e| panic!("{e}"));
match k.command {
KtstrCommand::Shell {
cpu_cap,
no_perf_mode,
..
} => {
assert_eq!(cpu_cap, None, "no --cpu-cap must produce None");
assert!(no_perf_mode);
}
_ => panic!("expected Shell"),
}
}
#[test]
fn cache_key_to_version_label_tarball() {
assert_eq!(
cache_key_to_version_label("6.14.2-tarball-x86_64-kcabc1234"),
"6.14.2",
);
}
#[test]
fn cache_key_to_version_label_rc_tarball() {
assert_eq!(
cache_key_to_version_label("6.15-rc3-tarball-x86_64-kcabc"),
"6.15-rc3",
);
}
#[test]
fn cache_key_to_version_label_git() {
assert_eq!(
cache_key_to_version_label("for-next-git-deadbee-x86_64-kcabc"),
"for-next",
);
}
#[test]
fn cache_key_to_version_label_local_emits_hash6_disambiguator() {
assert_eq!(
cache_key_to_version_label("local-deadbee-x86_64-kcabc"),
"local_deadbe",
"must emit `local_{{first 6 chars of discriminator}}` so \
distinct local trees do not collide on label",
);
}
#[test]
fn cache_key_to_version_label_local_distinct_hashes_render_distinct_labels() {
let a = cache_key_to_version_label("local-aaaaaa1-x86_64-kcabc");
let b = cache_key_to_version_label("local-bbbbbb2-x86_64-kcabc");
assert_ne!(
a, b,
"distinct local discriminators must render distinct labels"
);
assert_eq!(a, "local_aaaaaa");
assert_eq!(b, "local_bbbbbb");
}
#[test]
fn cache_key_to_version_label_local_unknown_renders_local_unknown() {
assert_eq!(
cache_key_to_version_label("local-unknown-x86_64-kcabc"),
"local_unknown",
);
}
#[test]
fn cache_key_to_version_label_local_bare_yields_bare_local() {
assert_eq!(cache_key_to_version_label("local"), "local");
}
#[test]
fn cache_key_to_version_label_unknown_tag_falls_through() {
assert_eq!(
cache_key_to_version_label("6.14.2-novel-tag-kcabc"),
"6.14.2-novel-tag-kcabc",
);
}
#[test]
fn git_kernel_label_github_https() {
assert_eq!(
git_kernel_label("https://github.com/tj/sched_ext", "for-next"),
"git_tj_sched_ext_for-next",
);
}
#[test]
fn git_kernel_label_github_https_with_dot_git() {
assert_eq!(
git_kernel_label("https://github.com/tj/sched_ext.git", "for-next"),
"git_tj_sched_ext_for-next",
);
}
#[test]
fn git_kernel_label_gitlab_with_ref_tag() {
assert_eq!(
git_kernel_label("https://gitlab.com/foo/bar.git", "v6.14"),
"git_foo_bar_v6.14",
);
}
#[test]
fn git_kernel_label_local_mirror_two_segment_path() {
assert_eq!(
git_kernel_label("file:///srv/linux.git", "v6.14"),
"git_srv_linux_v6.14",
);
}
#[test]
fn git_kernel_label_truly_single_segment_path() {
assert_eq!(
git_kernel_label("file://linux.git", "v6.14"),
"git_linux_v6.14",
);
}
#[test]
fn git_kernel_label_ssh_style_url() {
assert_eq!(
git_kernel_label("ssh://git@github.com/tj/sched_ext", "main"),
"git_tj_sched_ext_main",
);
}
#[test]
fn path_kernel_label_includes_basename_and_hash() {
let p = std::path::Path::new("/tmp/somewhere/linux");
let label = path_kernel_label(p);
assert!(
label.starts_with("path_linux_"),
"label must start with `path_<basename>_`, got: {label}"
);
let hash_part = label.strip_prefix("path_linux_").unwrap();
assert_eq!(hash_part.len(), 6, "hash suffix must be 6 chars: {label}");
assert!(
hash_part.chars().all(|c| c.is_ascii_hexdigit()),
"hash suffix must be hex: {label}"
);
}
#[test]
fn path_kernel_label_distinguishes_paths_sharing_basename() {
let a = std::path::Path::new("/srv/a/linux");
let b = std::path::Path::new("/srv/b/linux");
assert_ne!(
path_kernel_label(a),
path_kernel_label(b),
"distinct path parents must produce distinct labels",
);
}
#[test]
fn decorate_path_label_for_dirty_clean_tree_passthrough() {
let base = "path_linux_a3b1c2";
assert_eq!(
decorate_path_label_for_dirty(base, false),
base,
"clean trees must not append a `_dirty` suffix",
);
}
#[test]
fn decorate_path_label_for_dirty_dirty_tree_appends_suffix() {
let base = "path_linux_a3b1c2";
assert_eq!(
decorate_path_label_for_dirty(base, true),
"path_linux_a3b1c2_dirty",
"dirty trees must append `_dirty` to the base label",
);
}
#[test]
fn decorate_path_label_for_dirty_survives_sanitize() {
let dirty_label = decorate_path_label_for_dirty("path_linux_a3b1c2", true);
let sanitized = ktstr::test_support::sanitize_kernel_label(&dirty_label);
assert_eq!(
sanitized, "kernel_path_linux_a3b1c2_dirty",
"`_dirty` must survive sanitize verbatim so the test report \
distinguishes dirty runs from clean runs in the nextest suffix",
);
}
#[test]
fn decorate_path_label_for_dirty_clean_dirty_sanitize_to_distinct_ids() {
let base = "path_linux_a3b1c2";
let clean =
ktstr::test_support::sanitize_kernel_label(&decorate_path_label_for_dirty(base, false));
let dirty =
ktstr::test_support::sanitize_kernel_label(&decorate_path_label_for_dirty(base, true));
assert_ne!(
clean, dirty,
"clean ({clean:?}) and dirty ({dirty:?}) sanitized labels must \
produce distinct nextest identifiers so test reports do not \
collapse non-reproducible runs into the cache-stored row",
);
}
#[test]
fn format_built_age_unparseable_returns_empty_string() {
assert_eq!(format_built_age("not-a-timestamp"), "");
assert_eq!(format_built_age(""), "");
assert_eq!(format_built_age("2026-01-02T03:04:05"), "");
}
#[test]
fn format_built_age_future_timestamp_returns_empty_string() {
assert_eq!(format_built_age("9999-12-31T23:59:59Z"), "");
}
#[test]
fn format_built_age_past_timestamp_includes_leading_comma_and_seconds() {
let one_hour_ago = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
.saturating_sub(3600);
let timestamp = humantime::format_rfc3339(
std::time::UNIX_EPOCH + std::time::Duration::from_secs(one_hour_ago),
)
.to_string();
let age = format_built_age(×tamp);
assert!(
age.starts_with(", built "),
"age suffix must start with the splice prefix `, built `, got {age:?}",
);
assert!(
age.ends_with(" ago"),
"age suffix must end with the relative-past keyword ` ago`, got {age:?}",
);
}
#[test]
fn resolve_path_kernel_nonexistent_returns_actionable_error() {
let raw = "/this/path/should/not/exist/under/test";
let result = resolve_path_kernel(std::path::Path::new(raw), raw);
let err = result.expect_err("nonexistent path must surface as Err");
assert!(
err.contains(&format!("--kernel {raw}")),
"error must lead with `--kernel {{raw_input}}:` so a typo \
names the exact string the user passed. got: {err}",
);
assert!(
err.contains(ktstr::KTSTR_KERNEL_HINT),
"error must end with KTSTR_KERNEL_HINT so the user sees \
the supported `--kernel` shapes. got: {err}",
);
}
#[test]
fn resolve_path_kernel_empty_tempdir_returns_not_a_source_tree_error() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let raw = tmp.path().display().to_string();
let result = resolve_path_kernel(tmp.path(), &raw);
let err = result.expect_err("empty tempdir must surface as Err");
assert!(
err.contains(&format!("--kernel {raw}")),
"error must lead with `--kernel {{raw_input}}:`. got: {err}",
);
assert!(
err.contains("not a kernel source tree"),
"error must include the `not a kernel source tree` phrase \
from `acquire_local_source_tree`'s diagnostic. got: {err}",
);
assert!(
err.contains(ktstr::KTSTR_KERNEL_HINT),
"error must end with KTSTR_KERNEL_HINT. got: {err}",
);
}
#[test]
fn encode_kernel_list_empty_input_returns_empty_string() {
let encoded = encode_kernel_list(&[]).expect("empty input must succeed");
assert!(
encoded.is_empty(),
"empty resolved list must encode to empty string, got {encoded:?}",
);
}
#[test]
fn encode_kernel_list_single_entry_has_no_separator() {
let resolved = vec![("6.14.2".to_string(), PathBuf::from("/cache/foo"))];
let encoded = encode_kernel_list(&resolved).expect("single entry must succeed");
assert_eq!(
encoded, "6.14.2=/cache/foo",
"single-entry encoding must be `label=path` with no trailing separator",
);
}
#[test]
fn encode_kernel_list_two_entries_uses_semicolon_separator() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6.15.0".to_string(), PathBuf::from("/cache/b")),
];
let encoded = encode_kernel_list(&resolved).expect("two entries must succeed");
assert_eq!(
encoded, "6.14.2=/cache/a;6.15.0=/cache/b",
"two-entry encoding must be `label=path;label=path`",
);
}
#[test]
fn encode_kernel_list_three_entries_preserves_order() {
let resolved = vec![
("z-late".to_string(), PathBuf::from("/cache/z")),
("a-early".to_string(), PathBuf::from("/cache/a")),
("m-mid".to_string(), PathBuf::from("/cache/m")),
];
let encoded = encode_kernel_list(&resolved).expect("three entries must succeed");
assert_eq!(
encoded, "z-late=/cache/z;a-early=/cache/a;m-mid=/cache/m",
"encoder must preserve input order; sorting would change test-name suffix order",
);
}
#[test]
fn encode_kernel_list_rejects_semicolon_in_path() {
let resolved = vec![("6.14.2".to_string(), PathBuf::from("/cache/has;semicolon"))];
let err = encode_kernel_list(&resolved)
.expect_err("path containing `;` must be rejected by encoder");
assert!(
err.contains("`;`"),
"error must reference the offending separator: {err}",
);
assert!(
err.contains("6.14.2"),
"error must name the offending label so the operator can locate the entry: {err}",
);
assert!(
err.contains("/cache/has;semicolon"),
"error must include the offending path: {err}",
);
}
#[test]
fn encode_kernel_list_rejects_semicolon_in_label() {
let resolved = vec![("evil;label".to_string(), PathBuf::from("/cache/clean"))];
let err = encode_kernel_list(&resolved)
.expect_err("label containing `;` must be rejected by encoder");
assert!(
err.contains("`;`"),
"error must reference the offending separator: {err}",
);
assert!(
err.contains("evil;label"),
"error must name the offending label so the operator \
can locate the producer that emitted it: {err}",
);
assert!(
err.contains("kernel label"),
"error must classify the violation as a label problem (not \
a path problem) so an operator reading the diagnostic \
knows which side of the wire format is at fault: {err}",
);
}
#[test]
fn encode_kernel_list_rejects_equals_in_label() {
let resolved = vec![("evil=label".to_string(), PathBuf::from("/cache/clean"))];
let err = encode_kernel_list(&resolved)
.expect_err("label containing `=` must be rejected by encoder");
assert!(
err.contains("`=`"),
"error must reference the offending separator: {err}",
);
assert!(
err.contains("evil=label"),
"error must name the offending label so the operator \
can locate the producer that emitted it: {err}",
);
assert!(
err.contains("kernel label"),
"error must classify the violation as a label problem: {err}",
);
}
#[test]
fn encode_kernel_list_first_entry_with_semicolon_rejected_before_emit() {
let resolved = vec![
("first".to_string(), PathBuf::from("/cache/has;semicolon")),
("second".to_string(), PathBuf::from("/cache/clean")),
];
let err = encode_kernel_list(&resolved)
.expect_err("path containing `;` must be rejected even when other entries are clean");
assert!(err.contains("first"));
}
#[test]
fn encode_kernel_list_later_entry_with_semicolon_still_rejected() {
let resolved = vec![
("first".to_string(), PathBuf::from("/cache/clean")),
("second".to_string(), PathBuf::from("/cache/has;semicolon")),
];
let err = encode_kernel_list(&resolved)
.expect_err("`;` anywhere in any path must abort the encode");
assert!(err.contains("second"));
}
#[test]
fn detect_label_collisions_empty_input_succeeds() {
let resolved: Vec<(String, PathBuf)> = Vec::new();
detect_label_collisions(&resolved).expect("empty input must succeed");
}
#[test]
fn detect_label_collisions_unique_labels_succeed() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6.15.0".to_string(), PathBuf::from("/cache/b")),
];
detect_label_collisions(&resolved).expect("distinct sanitized identifiers must succeed");
}
#[test]
fn detect_label_collisions_period_vs_dash_collides() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6-14-2".to_string(), PathBuf::from("/cache/b")),
];
let err = detect_label_collisions(&resolved)
.expect_err("colliding sanitized identifiers must surface an error");
assert!(
err.contains("6.14.2"),
"error must name first colliding label: {err}",
);
assert!(
err.contains("6-14-2"),
"error must name second colliding label: {err}",
);
assert!(
err.contains("kernel_6_14_2"),
"error must include the shared sanitized identifier: {err}",
);
assert!(
err.contains("Spell each --kernel value distinctly"),
"error must include the actionable remediation hint: {err}",
);
}
#[test]
fn detect_label_collisions_uppercase_vs_lowercase_collides() {
let resolved = vec![
("ABC".to_string(), PathBuf::from("/cache/a")),
("abc".to_string(), PathBuf::from("/cache/b")),
];
let err = detect_label_collisions(&resolved)
.expect_err("uppercase vs lowercase labels must collide post-sanitize");
assert!(err.contains("kernel_abc"));
}
#[test]
fn detect_label_collisions_identical_labels_collide() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6.14.2".to_string(), PathBuf::from("/cache/b")),
];
let err = detect_label_collisions(&resolved)
.expect_err("two identical labels must surface as a collision");
assert!(err.contains("6.14.2"));
assert!(err.contains("kernel_6_14_2"));
}
#[test]
fn detect_label_collisions_three_entries_two_collide_one_unique() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6-14-2".to_string(), PathBuf::from("/cache/b")),
("7.0.0".to_string(), PathBuf::from("/cache/c")),
];
let err = detect_label_collisions(&resolved)
.expect_err("collision in the first two entries must surface");
assert!(err.contains("6.14.2"));
assert!(err.contains("6-14-2"));
assert!(
!err.contains("7.0.0"),
"non-conflicting label should not appear in the collision diagnostic: {err}",
);
}
#[test]
fn detect_label_collisions_first_two_unique_third_collides_with_first() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("7.0.0".to_string(), PathBuf::from("/cache/b")),
("6-14-2".to_string(), PathBuf::from("/cache/c")),
];
let err = detect_label_collisions(&resolved)
.expect_err("late-arriving collision against an earlier entry must surface");
assert!(err.contains("6.14.2"), "earlier (prior) label must appear");
assert!(err.contains("6-14-2"), "later label must appear");
assert!(err.contains("kernel_6_14_2"));
}
#[test]
fn kernel_list_long_about_exposes_range_mode_json_keys() {
let about = ktstr::cli::KERNEL_LIST_LONG_ABOUT;
for range_field in ["range", "start", "end", "versions"] {
assert!(
about.contains(range_field),
"KERNEL_LIST_LONG_ABOUT must mention range-mode JSON \
field `{range_field}` so scripted consumers discover \
the schema without `cargo doc`; got: {about:?}",
);
}
assert!(
about.contains("--range"),
"KERNEL_LIST_LONG_ABOUT must reference the `--range` flag \
so a `kernel list --help` reader sees the range-mode \
entry point: got: {about:?}",
);
assert!(
about.contains("range-preview") || about.contains("range mode"),
"KERNEL_LIST_LONG_ABOUT must explain that --range switches \
to a structurally-different output shape so scripted \
consumers know to dispatch on the presence of the \
`range` key: got: {about:?}",
);
}
#[test]
fn preflight_collision_check_empty_input_succeeds() {
preflight_collision_check(&[]).expect("empty input must succeed");
}
#[test]
fn preflight_collision_check_unique_versions_succeed() {
let specs = vec!["6.14.2".to_string(), "6.15.0".to_string()];
preflight_collision_check(&specs)
.expect("distinct sanitized identifiers must succeed at pre-flight");
}
#[test]
fn preflight_collision_check_period_vs_dash_collides() {
let specs = vec!["6.14.2".to_string(), "6-14-2".to_string()];
let err = preflight_collision_check(&specs)
.expect_err("colliding labels must surface a pre-flight error");
assert!(err.contains("6.14.2"), "error must name first label: {err}");
assert!(
err.contains("6-14-2"),
"error must name second label: {err}"
);
assert!(
err.contains("kernel_6_14_2"),
"error must include the shared sanitized identifier: {err}",
);
assert!(
err.contains("pre-flight check found collision"),
"error must be the pre-flight diagnostic, not the post-resolve one: {err}",
);
}
#[test]
fn preflight_collision_check_identical_versions_succeed() {
let specs = vec!["6.14.2".to_string(), "6.14.2".to_string()];
preflight_collision_check(&specs)
.expect("identical specs must NOT bail at pre-flight (handled by dedupe post-resolve)");
}
#[test]
fn preflight_collision_check_skips_path_and_range_specs() {
let specs = vec![
"/tmp/kernel-a".to_string(),
"/tmp/kernel-b".to_string(),
"6.14.2..6.14.4".to_string(),
];
preflight_collision_check(&specs).expect(
"Path and Range specs must skip pre-flight — their labels are deferred to post-resolve",
);
}
#[test]
fn preflight_collision_check_skips_empty_and_whitespace_specs() {
let specs = vec!["".to_string(), " ".to_string(), "6.14.2".to_string()];
preflight_collision_check(&specs)
.expect("blank / whitespace-only specs must be silently skipped");
}
#[test]
fn preflight_collision_check_inverted_range_fails_validation() {
let specs = vec!["6.15..6.14".to_string()];
let err = preflight_collision_check(&specs)
.expect_err("inverted range must fail pre-flight validation");
assert!(
err.contains("inverted kernel range") || err.contains("--kernel"),
"error must surface the inversion diagnostic with --kernel framing: {err}",
);
}
#[test]
fn preflight_collision_check_git_url_collision() {
let specs = vec![
"git+ssh://host/foo/bar#v6.14".to_string(),
"git+ssh://host/foo/bar#v6-14".to_string(),
];
let err = preflight_collision_check(&specs)
.expect_err("colliding git refs must surface a pre-flight error");
assert!(err.contains("git_foo_bar_v6.14") || err.contains("git_foo_bar_v6-14"));
assert!(err.contains("kernel_git_foo_bar_v6_14"));
}
#[test]
fn dedupe_resolved_empty_input_returns_empty() {
let resolved: Vec<(String, PathBuf)> = Vec::new();
let deduped = dedupe_resolved(resolved);
assert!(deduped.is_empty());
}
#[test]
fn dedupe_resolved_unique_inputs_pass_through() {
let resolved = vec![
("a".to_string(), PathBuf::from("/cache/a")),
("b".to_string(), PathBuf::from("/cache/b")),
("c".to_string(), PathBuf::from("/cache/c")),
];
let deduped = dedupe_resolved(resolved.clone());
assert_eq!(deduped, resolved);
}
#[test]
fn dedupe_resolved_two_identical_tuples_collapse_to_one() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/v")),
("6.14.2".to_string(), PathBuf::from("/cache/v")),
];
let deduped = dedupe_resolved(resolved);
assert_eq!(
deduped.len(),
1,
"identical tuples must collapse to one entry"
);
assert_eq!(deduped[0].0, "6.14.2");
assert_eq!(deduped[0].1, PathBuf::from("/cache/v"));
}
#[test]
fn dedupe_resolved_same_label_different_paths_both_survive() {
let resolved = vec![
("6.14.2".to_string(), PathBuf::from("/cache/a")),
("6.14.2".to_string(), PathBuf::from("/cache/b")),
];
let deduped = dedupe_resolved(resolved);
assert_eq!(
deduped.len(),
2,
"same label + different paths must NOT dedupe — \
this is a real cache-key collision that detect_label_collisions \
must still catch downstream",
);
}
#[test]
fn dedupe_resolved_preserves_input_order() {
let resolved = vec![
("a".to_string(), PathBuf::from("/cache/a")),
("b".to_string(), PathBuf::from("/cache/b")),
("a".to_string(), PathBuf::from("/cache/a")),
("c".to_string(), PathBuf::from("/cache/c")),
];
let deduped = dedupe_resolved(resolved);
assert_eq!(
deduped,
vec![
("a".to_string(), PathBuf::from("/cache/a")),
("b".to_string(), PathBuf::from("/cache/b")),
("c".to_string(), PathBuf::from("/cache/c")),
],
);
}
#[test]
fn dedupe_resolved_three_identical_tuples_collapse_to_one() {
let resolved = vec![
("v".to_string(), PathBuf::from("/cache/v")),
("v".to_string(), PathBuf::from("/cache/v")),
("v".to_string(), PathBuf::from("/cache/v")),
];
let deduped = dedupe_resolved(resolved);
assert_eq!(deduped.len(), 1);
}
fn init_repo_with_chain(dir: &std::path::Path, n: usize) -> Vec<gix::ObjectId> {
let mut repo = gix::init(dir).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
{
use gix::config::tree::gitoxide;
let mut cfg = gix::config::File::new(gix::config::file::Metadata::api());
cfg.set_raw_value(&gitoxide::Author::NAME_FALLBACK, "ktstr-test")
.expect("set author name fallback");
cfg.set_raw_value(
&gitoxide::Author::EMAIL_FALLBACK,
"ktstr-test@example.invalid",
)
.expect("set author email fallback");
let mut snap = repo.config_snapshot_mut();
snap.append(cfg);
}
let mut chain: Vec<gix::ObjectId> = Vec::with_capacity(n);
for i in 0..n {
let blob_id: gix::ObjectId = repo
.write_blob(format!("v{i}\n").as_bytes())
.expect("write blob")
.detach();
let tree = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id,
}],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let parents: Vec<gix::ObjectId> = chain.last().copied().into_iter().collect();
let commit_id: gix::ObjectId = repo
.commit("HEAD", format!("c{i}"), tree_id, parents)
.expect("commit")
.detach();
chain.push(commit_id);
}
chain
}
#[test]
fn resolve_commit_specs_no_repo_passes_through_literal() {
let raw = vec![
"abc1234".to_string(),
"main".to_string(),
"HEAD".to_string(),
];
let out = resolve_commit_specs(None, &raw, "test");
assert_eq!(out, raw, "no repo → every input lands as-is");
}
#[test]
fn resolve_commit_specs_head_resolves_to_short_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 3);
let head = *chain.last().unwrap();
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["HEAD".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![head.to_hex_with_len(7).to_string()],
"HEAD must resolve to the tip commit's 7-char short hex",
);
}
#[test]
fn resolve_commit_specs_head_tilde_resolves_to_parent() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 3);
let head_tilde_1 = chain[1];
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["HEAD~1".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![head_tilde_1.to_hex_with_len(7).to_string()],
"HEAD~1 must resolve to the parent commit's 7-char short hex",
);
}
#[test]
fn resolve_commit_specs_range_expands_inclusive_of_to() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 3);
let c0 = chain[0];
let c1 = chain[1];
let c2 = chain[2];
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec![format!("{}..HEAD", c0.to_hex_with_len(40))];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert!(
out.contains(&c1.to_hex_with_len(7).to_string()),
"range result must include c1 (the parent of HEAD); got {out:?}",
);
assert!(
out.contains(&c2.to_hex_with_len(7).to_string()),
"range result must include c2 (HEAD); got {out:?}",
);
assert!(
!out.contains(&c0.to_hex_with_len(7).to_string()),
"range result must NOT include c0 (the hidden side); got {out:?}",
);
assert_eq!(
out.len(),
2,
"range c0..HEAD over 3-commit chain must yield exactly 2 commits; got {out:?}",
);
}
#[test]
fn resolve_commit_specs_unknown_hash_falls_through_to_literal() {
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_chain(tmp.path(), 1);
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["zzzzzzz".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec!["zzzzzzz".to_string()],
"non-hex input must pass through as literal",
);
}
#[test]
fn resolve_commit_specs_dirty_suffix_falls_through_to_literal() {
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_chain(tmp.path(), 1);
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["abc1234-dirty".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec!["abc1234-dirty".to_string()],
"-dirty-suffixed input must pass through as literal",
);
}
#[test]
fn resolve_commit_specs_empty_input_yields_empty_output() {
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_chain(tmp.path(), 1);
let repo = gix::open(tmp.path()).expect("gix::open");
let out = resolve_commit_specs(Some(&repo), &[], "test");
assert!(out.is_empty(), "empty input must yield empty output");
}
#[test]
fn resolve_commit_specs_mixed_inputs_resolve_per_entry() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 1);
let head = chain[0];
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["HEAD".to_string(), "abc1234-dirty".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![
head.to_hex_with_len(7).to_string(),
"abc1234-dirty".to_string(),
],
"HEAD resolves; -dirty input lands literal; order preserved",
);
}
#[test]
fn resolve_commit_specs_head_in_dirty_repo_appends_dirty_suffix() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 1);
let head = chain[0];
let repo = gix::open(tmp.path()).expect("gix::open");
let head_tree = repo.head_tree().expect("head_tree").id;
let mut idx = repo.index_from_tree(&head_tree).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join("file.txt"), b"original\n").unwrap();
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let raw = vec!["HEAD".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
let expected_short = head.to_hex_with_len(7).to_string();
assert_eq!(
out,
vec![format!("{expected_short}-dirty")],
"HEAD in a dirty repo must resolve to <short>-dirty",
);
}
#[test]
fn resolve_commit_specs_non_head_does_not_get_dirty_suffix_in_dirty_repo() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 2);
let parent = chain[0];
let repo = gix::open(tmp.path()).expect("gix::open");
let head_tree = repo.head_tree().expect("head_tree").id;
let mut idx = repo.index_from_tree(&head_tree).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join("file.txt"), b"v1\n").unwrap();
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let raw = vec!["HEAD~1".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![parent.to_hex_with_len(7).to_string()],
"HEAD~1 (historical commit) must NOT get -dirty suffix \
even when worktree is dirty",
);
}
#[test]
fn resolve_commit_specs_exclude_parents_resolves_like_include() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 2);
let head = chain[1];
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec![format!("{}^!", head.to_hex_with_len(40))];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![head.to_hex_with_len(7).to_string()],
"<oid>^! must resolve to the same 7-char short hex as <oid>",
);
}
#[test]
fn resolve_commit_specs_branch_name_resolves_to_tip() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 2);
let parent = chain[0];
let repo = gix::open(tmp.path()).expect("gix::open");
repo.reference(
"refs/heads/feature",
parent,
gix::refs::transaction::PreviousValue::MustNotExist,
"create feature branch for test",
)
.expect("create branch");
let raw = vec!["feature".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![parent.to_hex_with_len(7).to_string()],
"branch name must resolve to its tip commit",
);
}
#[test]
fn resolve_commit_specs_tag_name_resolves_to_target() {
let tmp = tempfile::TempDir::new().unwrap();
let chain = init_repo_with_chain(tmp.path(), 2);
let parent = chain[0];
let repo = gix::open(tmp.path()).expect("gix::open");
repo.reference(
"refs/tags/v0",
parent,
gix::refs::transaction::PreviousValue::MustNotExist,
"create v0 tag for test",
)
.expect("create tag");
let raw = vec!["v0".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec![parent.to_hex_with_len(7).to_string()],
"tag name must resolve to its target commit",
);
}
#[test]
fn resolve_commit_specs_empty_range_yields_no_entries() {
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_chain(tmp.path(), 2);
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["HEAD..HEAD".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert!(
out.is_empty(),
"HEAD..HEAD must expand to zero commits; got {out:?}",
);
}
#[test]
fn resolve_commit_specs_valid_hex_nonexistent_prefix_falls_through_to_literal() {
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_chain(tmp.path(), 1);
let repo = gix::open(tmp.path()).expect("gix::open");
let raw = vec!["deadbee".to_string()];
let out = resolve_commit_specs(Some(&repo), &raw, "test");
assert_eq!(
out,
vec!["deadbee".to_string()],
"valid-hex non-existent prefix must pass through as literal",
);
}
#[test]
fn looks_like_literal_hash_accepts_canonical_shapes() {
assert!(looks_like_literal_hash("abc1234"));
assert!(looks_like_literal_hash(
"abcdef0123456789abcdef0123456789abcdef01"
));
assert!(looks_like_literal_hash("abc1234-dirty"));
assert!(looks_like_literal_hash(
"abcdef0123456789abcdef0123456789abcdef01-dirty"
));
}
#[test]
fn looks_like_literal_hash_rejects_revspec_shapes() {
assert!(!looks_like_literal_hash("HEAD"));
assert!(!looks_like_literal_hash("main"));
assert!(!looks_like_literal_hash("HEAD~1"));
assert!(!looks_like_literal_hash("HEAD~3..HEAD"));
assert!(!looks_like_literal_hash("HEAD^"));
assert!(!looks_like_literal_hash("abc123"));
assert!(!looks_like_literal_hash(
"abcdef0123456789abcdef0123456789abcdef0123"
));
assert!(!looks_like_literal_hash(""));
assert!(!looks_like_literal_hash("abc-dirty"));
}
#[test]
fn looks_like_literal_hash_accepts_uppercase_and_mixed_case() {
assert!(looks_like_literal_hash("ABC1234"));
assert!(looks_like_literal_hash("AbC1234"));
assert!(looks_like_literal_hash("ABC1234-dirty"));
assert!(looks_like_literal_hash(
"ABCDEF0123456789ABCDEF0123456789ABCDEF01"
));
}
}