use std::path::PathBuf;
use anyhow::Context;
use crate::assert::{AssertResult, ScenarioStats};
use crate::monitor::MonitorSummary;
use crate::test_support::PayloadMetrics;
use crate::timeline::StimulusEvent;
use crate::vmm;
use super::entry::KtstrTestEntry;
use super::timefmt::{generate_run_id, now_iso8601};
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct SidecarResult {
pub test_name: String,
pub topology: String,
pub scheduler: String,
pub scheduler_commit: Option<String>,
pub project_commit: Option<String>,
pub payload: Option<String>,
pub metrics: Vec<PayloadMetrics>,
pub passed: bool,
pub skipped: bool,
pub stats: ScenarioStats,
pub monitor: Option<MonitorSummary>,
pub stimulus_events: Vec<StimulusEvent>,
pub work_type: String,
pub active_flags: Vec<String>,
pub verifier_stats: Vec<crate::monitor::bpf_prog::ProgVerifierStats>,
pub kvm_stats: Option<crate::vmm::KvmStatsTotals>,
pub sysctls: Vec<String>,
pub kargs: Vec<String>,
pub kernel_version: Option<String>,
pub kernel_commit: Option<String>,
pub timestamp: String,
pub run_id: String,
pub host: Option<crate::host_context::HostContext>,
pub cleanup_duration_ms: Option<u64>,
pub run_source: Option<String>,
}
#[cfg(test)]
impl SidecarResult {
pub(crate) fn test_fixture() -> SidecarResult {
SidecarResult {
test_name: "t".to_string(),
topology: "1n1l1c1t".to_string(),
scheduler: "eevdf".to_string(),
scheduler_commit: None,
project_commit: None,
payload: None,
metrics: Vec::new(),
passed: true,
skipped: false,
stats: crate::assert::ScenarioStats::default(),
monitor: None,
stimulus_events: Vec::new(),
work_type: "CpuSpin".to_string(),
active_flags: Vec::new(),
verifier_stats: Vec::new(),
kvm_stats: None,
sysctls: Vec::new(),
kargs: Vec::new(),
kernel_version: None,
kernel_commit: None,
timestamp: String::new(),
run_id: String::new(),
host: None,
cleanup_duration_ms: None,
run_source: None,
}
}
}
pub(crate) fn is_sidecar_filename(path: &std::path::Path) -> bool {
path.extension().and_then(|e| e.to_str()) == Some("json")
&& path
.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n.contains(".ktstr."))
}
pub(crate) fn collect_sidecars(dir: &std::path::Path) -> Vec<SidecarResult> {
collect_sidecars_with_errors(dir).0
}
pub(crate) struct SidecarParseError {
pub path: std::path::PathBuf,
pub raw_error: String,
pub enriched_message: Option<String>,
}
pub(crate) struct SidecarIoError {
pub path: std::path::PathBuf,
pub raw_error: String,
}
#[cfg(test)]
pub(crate) fn enriched_parse_error_message_for_test(
path: &std::path::Path,
raw_error: &str,
) -> Option<String> {
enriched_parse_error_message(path, raw_error)
}
fn enriched_parse_error_message(path: &std::path::Path, raw_error: &str) -> Option<String> {
let is_missing_host = raw_error.contains("missing field") && raw_error.contains("`host`");
if is_missing_host {
Some(format!(
"ktstr_test: skipping {}: {raw_error} — the `host` field \
was added to SidecarResult; pre-1.0 policy is \
disposable-sidecar: re-run the test to regenerate this \
file under the current schema (no migration shim exists)",
path.display(),
))
} else {
None
}
}
pub(crate) fn collect_sidecars_with_errors(
dir: &std::path::Path,
) -> (
Vec<SidecarResult>,
Vec<SidecarParseError>,
Vec<SidecarIoError>,
) {
let mut sidecars = Vec::new();
let mut parse_errors: Vec<SidecarParseError> = Vec::new();
let mut io_errors: Vec<SidecarIoError> = Vec::new();
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return (sidecars, parse_errors, io_errors),
};
let mut subdirs = Vec::new();
let try_load = |path: &std::path::Path,
out: &mut Vec<SidecarResult>,
parse_errs: &mut Vec<SidecarParseError>,
io_errs: &mut Vec<SidecarIoError>| {
if !is_sidecar_filename(path) {
return;
}
let data = match std::fs::read_to_string(path) {
Ok(d) => d,
Err(e) => {
let raw = e.to_string();
eprintln!("ktstr_test: cannot read {}: {raw}", path.display());
io_errs.push(SidecarIoError {
path: path.to_path_buf(),
raw_error: raw,
});
return;
}
};
match serde_json::from_str::<SidecarResult>(&data) {
Ok(sc) => out.push(sc),
Err(e) => {
let raw = e.to_string();
let enriched = enriched_parse_error_message(path, &raw);
match &enriched {
Some(prose) => eprintln!("{prose}"),
None => eprintln!("ktstr_test: skipping {}: {raw}", path.display()),
}
parse_errs.push(SidecarParseError {
path: path.to_path_buf(),
raw_error: raw,
enriched_message: enriched,
});
}
}
};
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() {
subdirs.push(path);
continue;
}
try_load(&path, &mut sidecars, &mut parse_errors, &mut io_errors);
}
for sub in subdirs {
if let Ok(entries) = std::fs::read_dir(&sub) {
for entry in entries.flatten() {
try_load(
&entry.path(),
&mut sidecars,
&mut parse_errors,
&mut io_errors,
);
}
}
}
(sidecars, parse_errors, io_errors)
}
pub fn collect_pool(root: &std::path::Path) -> Vec<SidecarResult> {
let entries = match std::fs::read_dir(root) {
Ok(e) => e,
Err(_) => return Vec::new(),
};
let mut pool = Vec::new();
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() {
pool.extend(collect_sidecars(&path));
}
}
pool
}
const VERIFIER_INSN_LIMIT: u32 = 1_000_000;
const VERIFIER_WARN_PCT: f64 = 75.0;
pub(crate) fn format_verifier_stats(sidecars: &[SidecarResult]) -> String {
use std::collections::BTreeMap;
let mut by_name: BTreeMap<&str, u32> = BTreeMap::new();
for sc in sidecars {
for info in &sc.verifier_stats {
let entry = by_name.entry(&info.name).or_insert(0);
*entry = (*entry).max(info.verified_insns);
}
}
if by_name.is_empty() {
return String::new();
}
let mut out = String::from("\n=== BPF VERIFIER STATS ===\n\n");
out.push_str(&format!(
" {:<24} {:>12} {:>8}\n",
"program", "verified", "limit%"
));
out.push_str(&format!(" {:-<24} {:-<12} {:-<8}\n", "", "", ""));
let mut warnings = Vec::new();
let mut total: u64 = 0;
for (&name, &verified_insns) in &by_name {
let pct = (verified_insns as f64 / VERIFIER_INSN_LIMIT as f64) * 100.0;
let flag = if pct >= VERIFIER_WARN_PCT { " !" } else { "" };
out.push_str(&format!(
" {:<24} {:>12} {:>7.1}%{flag}\n",
name, verified_insns, pct,
));
if pct >= VERIFIER_WARN_PCT {
warnings.push(format!(
" {name}: {pct:.1}% of 1M limit ({verified_insns} verified insns)",
));
}
total += verified_insns as u64;
}
out.push_str(&format!("\n total verified insns: {total}\n"));
if !warnings.is_empty() {
out.push_str("\nWARNING: programs near verifier complexity limit:\n");
for w in &warnings {
out.push_str(w);
out.push('\n');
}
}
out
}
pub(crate) fn format_callback_profile(sidecars: &[SidecarResult]) -> String {
let mut out = String::new();
for sc in sidecars {
let deltas = match sc
.monitor
.as_ref()
.and_then(|m| m.prog_stats_deltas.as_ref())
{
Some(d) if !d.is_empty() => d,
_ => continue,
};
if out.is_empty() {
out.push_str("\n=== BPF CALLBACK PROFILE ===\n");
}
out.push_str(&format!("\n {} ({}):\n", sc.test_name, sc.topology));
out.push_str(&format!(
" {:<24} {:>12} {:>14} {:>12}\n",
"program", "cnt", "total_ns", "avg_ns"
));
out.push_str(&format!(
" {:-<24} {:-<12} {:-<14} {:-<12}\n",
"", "", "", ""
));
for d in deltas {
out.push_str(&format!(
" {:<24} {:>12} {:>14} {:>12.0}\n",
d.name, d.cnt, d.nsecs, d.nsecs_per_call,
));
}
}
out
}
pub(crate) fn format_kvm_stats(sidecars: &[SidecarResult]) -> String {
let with_stats: Vec<&crate::vmm::KvmStatsTotals> = sidecars
.iter()
.filter_map(|sc| sc.kvm_stats.as_ref())
.collect();
if with_stats.is_empty() {
return String::new();
}
let n_vms = with_stats.len();
let vm_avg = |name: &str| -> u64 {
let sum: u64 = with_stats.iter().map(|d| d.avg(name)).sum();
sum / n_vms as u64
};
let exits = vm_avg("exits");
let halt = vm_avg("halt_exits");
let halt_wait_ns = vm_avg("halt_wait_ns");
let preempted = vm_avg("preemption_reported");
let signal = vm_avg("signal_exits");
let hypercalls = vm_avg("hypercalls");
let total_poll_ok: u64 = with_stats
.iter()
.map(|d| d.sum("halt_successful_poll"))
.sum();
let total_poll_try: u64 = with_stats
.iter()
.map(|d| d.sum("halt_attempted_poll"))
.sum();
if exits == 0 {
return String::new();
}
let halt_wait_ms = halt_wait_ns as f64 / 1_000_000.0;
let poll_pct = if total_poll_try > 0 {
(total_poll_ok as f64 / total_poll_try as f64) * 100.0
} else {
0.0
};
let mut out = format!("\n=== KVM STATS (avg across {n_vms} VMs) ===\n\n");
out.push_str(&format!(
" exits/vcpu {:>7} halt/vcpu {:>5} halt_wait_ms {:>7.1}\n",
exits, halt, halt_wait_ms,
));
out.push_str(&format!(
" poll_ok% {:>6.1}% preempted/vcpu {:>4} signal/vcpu {:>7}\n",
poll_pct, preempted, signal,
));
if hypercalls > 0 {
out.push_str(&format!(" hypercalls/vcpu {:>4}\n", hypercalls));
}
if preempted > 0 {
let total: u64 = with_stats
.iter()
.map(|d| d.sum("preemption_reported"))
.sum();
out.push_str(&format!(
"\n WARNING: {total} host preemptions detected \
-- timing results may be unreliable\n",
));
}
out
}
pub(crate) fn sidecar_dir() -> PathBuf {
sidecar_dir_override().unwrap_or_else(resolve_default_sidecar_dir)
}
fn resolve_default_sidecar_dir() -> PathBuf {
let kernel = detect_kernel_version();
let commit = detect_project_commit();
if commit.is_none() {
warn_unknown_project_commit_once();
}
runs_root().join(format_run_dirname(kernel.as_deref(), commit.as_deref()))
}
fn format_run_dirname(kernel: Option<&str>, commit: Option<&str>) -> String {
let kernel = kernel.unwrap_or("unknown");
let commit = commit.unwrap_or("unknown");
format!("{kernel}-{commit}")
}
pub fn runs_root() -> PathBuf {
let target = std::env::var("CARGO_TARGET_DIR")
.ok()
.filter(|d| !d.is_empty())
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("target"));
target.join("ktstr")
}
pub(crate) fn is_run_directory(entry: &std::fs::DirEntry) -> bool {
let path = entry.path();
if !path.is_dir() {
return false;
}
path.file_name()
.and_then(|n| n.as_encoded_bytes().first().copied())
.is_none_or(|b| b != b'.')
}
pub fn newest_run_dir() -> Option<PathBuf> {
let root = runs_root();
let entries = std::fs::read_dir(&root).ok()?;
entries
.filter_map(|e| e.ok())
.filter(is_run_directory)
.max_by_key(|e| e.metadata().and_then(|m| m.modified()).ok())
.map(|e| e.path())
}
pub(crate) fn detect_kernel_version() -> Option<String> {
use crate::kernel_path::KernelId;
let raw = crate::ktstr_kernel_env()?;
match KernelId::parse(&raw) {
KernelId::Path(_) => {
let p = std::path::Path::new(&raw);
let meta_path = p.join("metadata.json");
if let Ok(data) = std::fs::read_to_string(&meta_path)
&& let Ok(meta) = serde_json::from_str::<crate::cache::KernelMetadata>(&data)
{
return meta.version;
}
let ver_path = p.join("include/config/kernel.release");
if let Ok(v) = std::fs::read_to_string(ver_path) {
let v = v.trim();
if !v.is_empty() {
return Some(v.to_string());
}
}
None
}
KernelId::Version(ver) => Some(ver),
KernelId::CacheKey(key) => {
let cache = crate::cache::CacheDir::new().ok()?;
let entry = cache.lookup(&key)?;
entry.metadata.version
}
KernelId::Range { .. } | KernelId::Git { .. } => None,
}
}
pub(crate) fn detect_project_commit() -> Option<String> {
static PROJECT_COMMIT: std::sync::OnceLock<Option<String>> = std::sync::OnceLock::new();
PROJECT_COMMIT
.get_or_init(|| {
let cwd = std::env::current_dir().ok()?;
detect_commit_at(&cwd)
})
.clone()
}
fn detect_commit_at(path: &std::path::Path) -> Option<String> {
let repo = gix::discover(path).ok()?;
commit_with_dirty_suffix(&repo)
}
fn commit_with_dirty_suffix(repo: &gix::Repository) -> Option<String> {
let head = repo.head_id().ok()?;
let short_hash = head.to_hex_with_len(7).to_string();
if repo_is_dirty(repo).unwrap_or(false) {
Some(format!("{short_hash}-dirty"))
} else {
Some(short_hash)
}
}
#[doc(hidden)]
pub fn repo_is_dirty(repo: &gix::Repository) -> Option<bool> {
let head_tree_id = repo.head_tree().ok()?.id;
let mut index_dirty = false;
if let Ok(index) = repo.index() {
let _ = repo.tree_index_status(
&head_tree_id,
&index,
None,
gix::status::tree_index::TrackRenames::Disabled,
|_, _, _| {
index_dirty = true;
Ok::<_, std::convert::Infallible>(std::ops::ControlFlow::Break(()))
},
);
}
let worktree_dirty = if index_dirty {
false
} else {
repo.status(gix::progress::Discard)
.ok()
.and_then(|s| {
s.index_worktree_rewrites(None)
.index_worktree_submodules(gix::status::Submodule::Given {
ignore: gix::submodule::config::Ignore::All,
check_dirty: false,
})
.index_worktree_options_mut(|opts| {
opts.dirwalk_options = None;
})
.into_index_worktree_iter(Vec::new())
.ok()
.map(|mut iter| iter.next().is_some())
})
.unwrap_or(false)
};
Some(index_dirty || worktree_dirty)
}
pub(crate) fn detect_kernel_commit(kernel_dir: &std::path::Path) -> Option<String> {
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::{Mutex, OnceLock};
static KERNEL_COMMIT_CACHE: OnceLock<Mutex<HashMap<PathBuf, Option<String>>>> = OnceLock::new();
let cache_key = kernel_dir
.canonicalize()
.unwrap_or_else(|_| kernel_dir.to_path_buf());
let cache = KERNEL_COMMIT_CACHE.get_or_init(|| Mutex::new(HashMap::new()));
let mut guard = cache.lock().unwrap_or_else(|e| e.into_inner());
if let Some(cached) = guard.get(&cache_key) {
return cached.clone();
}
let result = gix::open(kernel_dir)
.ok()
.and_then(|repo| commit_with_dirty_suffix(&repo));
guard.insert(cache_key, result.clone());
result
}
pub const KTSTR_CI_ENV: &str = "KTSTR_CI";
pub const SIDECAR_RUN_SOURCE_CI: &str = "ci";
pub const SIDECAR_RUN_SOURCE_LOCAL: &str = "local";
pub const SIDECAR_RUN_SOURCE_ARCHIVE: &str = "archive";
pub(crate) fn detect_run_source() -> Option<String> {
match std::env::var(KTSTR_CI_ENV) {
Ok(v) if !v.is_empty() => Some(SIDECAR_RUN_SOURCE_CI.to_string()),
_ => Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
}
}
pub(crate) fn apply_archive_source_override(pool: &mut [SidecarResult]) {
for sc in pool {
sc.run_source = Some(SIDECAR_RUN_SOURCE_ARCHIVE.to_string());
}
}
fn resolve_kernel_source_dir() -> Option<std::path::PathBuf> {
use crate::kernel_path::KernelId;
let raw = crate::ktstr_kernel_env()?;
let id = KernelId::parse(&raw);
match id {
KernelId::Path(_) => {
let p = std::path::Path::new(&raw);
crate::cache::recover_local_source_tree(p)
.or_else(|| Some(std::path::PathBuf::from(&raw)))
}
KernelId::Version(_) | KernelId::CacheKey(_) => {
let cache = crate::cache::CacheDir::new().ok()?;
resolve_kernel_source_dir_with_cache(&id, &cache)
}
KernelId::Range { .. } | KernelId::Git { .. } => None,
}
}
fn resolve_kernel_source_dir_with_cache(
id: &crate::kernel_path::KernelId,
cache: &crate::cache::CacheDir,
) -> Option<std::path::PathBuf> {
use crate::kernel_path::KernelId;
match id {
KernelId::Version(ver) => {
let arch = std::env::consts::ARCH;
let tarball_key = format!("{ver}-tarball-{arch}-kc{}", crate::cache_key_suffix());
if let Some(entry) = cache.lookup(&tarball_key)
&& let crate::cache::KernelSource::Local {
source_tree_path: Some(p),
..
} = &entry.metadata.source
{
return Some(p.clone());
}
let entries = cache.list().ok()?;
for listed in entries {
let crate::cache::ListedEntry::Valid(entry) = listed else {
continue;
};
if entry.metadata.version.as_deref() != Some(ver.as_str()) {
continue;
}
if let crate::cache::KernelSource::Local {
source_tree_path: Some(p),
..
} = &entry.metadata.source
{
return Some(p.clone());
}
}
None
}
KernelId::CacheKey(k) => {
let entry = cache.lookup(k)?;
match entry.metadata.source {
crate::cache::KernelSource::Local {
source_tree_path: Some(ref p),
..
} => Some(p.clone()),
_ => None,
}
}
_ => None,
}
}
pub(crate) fn sidecar_variant_hash(sidecar: &SidecarResult) -> u64 {
use siphasher::sip::SipHasher13;
use std::hash::Hasher;
let mut h = SipHasher13::new_with_keys(0, 0);
h.write(sidecar.topology.as_bytes());
h.write(&[0]);
h.write(sidecar.scheduler.as_bytes());
h.write(&[0]);
h.write(&[0xfc]);
if let Some(name) = &sidecar.payload {
h.write(name.as_bytes());
}
h.write(&[0]);
h.write(sidecar.work_type.as_bytes());
h.write(&[0]);
h.write(&[0xfe]);
for f in &sidecar.active_flags {
h.write(f.as_bytes());
h.write(&[0]);
}
h.write(&[0xfd]);
let mut sorted_sysctls: Vec<&str> = sidecar.sysctls.iter().map(String::as_str).collect();
sorted_sysctls.sort_unstable();
for s in &sorted_sysctls {
h.write(s.as_bytes());
h.write(&[0]);
}
h.write(&[0xff]);
let mut sorted_kargs: Vec<&str> = sidecar.kargs.iter().map(String::as_str).collect();
sorted_kargs.sort_unstable();
for k in &sorted_kargs {
h.write(k.as_bytes());
h.write(&[0]);
}
h.finish()
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct SchedulerFingerprint {
pub(crate) scheduler: String,
pub(crate) scheduler_commit: Option<String>,
pub(crate) sysctls: Vec<String>,
pub(crate) kargs: Vec<String>,
}
fn scheduler_fingerprint(entry: &KtstrTestEntry) -> SchedulerFingerprint {
let scheduler = entry.scheduler.scheduler_name().to_string();
let scheduler_commit = entry
.scheduler
.scheduler_binary()
.and_then(|s| s.scheduler_commit())
.map(|s| s.to_string());
let sysctls: Vec<String> = entry
.scheduler
.sysctls()
.iter()
.map(|s| format!("sysctl.{}={}", s.key, s.value))
.collect();
let kargs: Vec<String> = entry
.scheduler
.kargs()
.iter()
.map(|s| s.to_string())
.collect();
SchedulerFingerprint {
scheduler,
scheduler_commit,
sysctls,
kargs,
}
}
fn serialize_and_write_sidecar(sidecar: &SidecarResult, label: &str) -> anyhow::Result<()> {
let (dir, do_pre_clear) = match sidecar_dir_override() {
Some(path) => (path, false),
None => (resolve_default_sidecar_dir(), true),
};
std::fs::create_dir_all(&dir)
.with_context(|| format!("create sidecar dir {}", dir.display()))?;
let _run_dir_lock = if do_pre_clear {
Some(acquire_run_dir_flock(&dir)?)
} else {
None
};
if do_pre_clear {
pre_clear_run_dir_once(&dir);
}
let variant_hash = sidecar_variant_hash(sidecar);
let path = dir.join(format!(
"{}-{:016x}.ktstr.json",
sidecar.test_name, variant_hash
));
let json = serde_json::to_string_pretty(sidecar)
.with_context(|| format!("serialize {label} for '{}'", sidecar.test_name))?;
std::fs::write(&path, json).with_context(|| format!("write {label} {}", path.display()))?;
Ok(())
}
fn sidecar_dir_override() -> Option<PathBuf> {
std::env::var("KTSTR_SIDECAR_DIR")
.ok()
.filter(|d| !d.is_empty())
.map(PathBuf::from)
}
fn warn_unknown_project_commit_once() {
static WARNED: std::sync::OnceLock<()> = std::sync::OnceLock::new();
let mut sink = std::io::stderr();
warn_unknown_project_commit_inner(&WARNED, &mut sink);
}
fn warn_unknown_project_commit_inner(
gate: &std::sync::OnceLock<()>,
sink: &mut dyn std::io::Write,
) {
gate.get_or_init(|| {
let _ = writeln!(
sink,
"ktstr: WARNING: project commit unavailable (cwd not in a git \
repo, or HEAD unreadable); runs at this kernel overwrite \
each other in target/ktstr/{{kernel}}-unknown/. Set \
KTSTR_SIDECAR_DIR=<unique-path> per run, or run from inside a \
git repo with at least one commit."
);
});
}
fn pre_clear_run_dir_once(dir: &std::path::Path) {
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::{Mutex, OnceLock};
static PRE_CLEARED: OnceLock<Mutex<HashSet<PathBuf>>> = OnceLock::new();
let cache_key = dir.canonicalize().unwrap_or_else(|_| dir.to_path_buf());
let cache = PRE_CLEARED.get_or_init(|| Mutex::new(HashSet::new()));
let mut guard = cache.lock().unwrap_or_else(|e| e.into_inner());
if !guard.insert(cache_key) {
return;
}
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_file() && is_sidecar_filename(&path) {
let _ = std::fs::remove_file(&path);
}
}
}
drop(guard);
}
const RUN_DIR_LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
fn run_dir_lock_path(dir: &std::path::Path) -> Option<PathBuf> {
let parent = dir.parent()?;
let leaf = dir.file_name()?;
let mut filename = std::ffi::OsString::from(leaf);
filename.push(".lock");
Some(parent.join(crate::flock::LOCK_DIR_NAME).join(filename))
}
fn acquire_run_dir_flock(dir: &std::path::Path) -> anyhow::Result<std::os::fd::OwnedFd> {
acquire_run_dir_flock_with_timeout(dir, RUN_DIR_LOCK_TIMEOUT)
}
fn acquire_run_dir_flock_with_timeout(
dir: &std::path::Path,
timeout: std::time::Duration,
) -> anyhow::Result<std::os::fd::OwnedFd> {
let lock_path = run_dir_lock_path(dir).ok_or_else(|| {
anyhow::anyhow!(
"cannot derive run-dir lock path from {} (no parent or no file_name component)",
dir.display(),
)
})?;
let context = format!("run-dir {}", dir.display());
crate::flock::acquire_flock_with_timeout(
&lock_path,
crate::flock::FlockMode::Exclusive,
timeout,
&context,
Some(
"A peer cargo ktstr test process is writing sidecars to the \
same {kernel}-{project_commit} directory; wait for it to \
finish or kill it, then retry.",
),
)
}
fn canonicalize_active_flags(flags: &[String]) -> Vec<String> {
let mut v: Vec<String> = flags.to_vec();
v.sort_by(|a, b| {
let ka = crate::scenario::flags::ALL
.iter()
.position(|x| *x == a.as_str())
.unwrap_or(usize::MAX);
let kb = crate::scenario::flags::ALL
.iter()
.position(|x| *x == b.as_str())
.unwrap_or(usize::MAX);
ka.cmp(&kb).then_with(|| a.as_str().cmp(b.as_str()))
});
v
}
pub(crate) fn write_skip_sidecar(
entry: &KtstrTestEntry,
active_flags: &[String],
) -> anyhow::Result<()> {
let SchedulerFingerprint {
scheduler,
scheduler_commit,
sysctls,
kargs,
} = scheduler_fingerprint(entry);
let sidecar = SidecarResult {
test_name: entry.name.to_string(),
topology: entry.topology.to_string(),
scheduler,
scheduler_commit,
project_commit: detect_project_commit(),
payload: entry.payload.map(|p| p.name.to_string()),
metrics: Vec::new(),
passed: true,
skipped: true,
stats: Default::default(),
monitor: None,
stimulus_events: Vec::new(),
work_type: "skipped".to_string(),
active_flags: canonicalize_active_flags(active_flags),
verifier_stats: Vec::new(),
kvm_stats: None,
sysctls,
kargs,
kernel_version: detect_kernel_version(),
kernel_commit: resolve_kernel_source_dir().and_then(|d| detect_kernel_commit(&d)),
timestamp: now_iso8601(),
run_id: generate_run_id(),
host: Some(crate::host_context::collect_host_context()),
cleanup_duration_ms: None,
run_source: detect_run_source(),
};
serialize_and_write_sidecar(&sidecar, "skip sidecar")
}
pub(crate) fn write_sidecar(
entry: &KtstrTestEntry,
vm_result: &vmm::VmResult,
stimulus_events: &[StimulusEvent],
check_result: &AssertResult,
work_type: &str,
active_flags: &[String],
payload_metrics: &[PayloadMetrics],
) -> anyhow::Result<()> {
let SchedulerFingerprint {
scheduler,
scheduler_commit,
sysctls,
kargs,
} = scheduler_fingerprint(entry);
let sidecar = SidecarResult {
test_name: entry.name.to_string(),
topology: entry.topology.to_string(),
scheduler,
scheduler_commit,
project_commit: detect_project_commit(),
payload: entry.payload.map(|p| p.name.to_string()),
metrics: payload_metrics.to_vec(),
passed: check_result.passed,
skipped: check_result.is_skipped(),
stats: check_result.stats.clone(),
monitor: vm_result.monitor.as_ref().map(|m| m.summary.clone()),
stimulus_events: stimulus_events.to_vec(),
work_type: work_type.to_string(),
active_flags: canonicalize_active_flags(active_flags),
verifier_stats: vm_result.verifier_stats.clone(),
kvm_stats: vm_result.kvm_stats.clone(),
sysctls,
kargs,
kernel_version: detect_kernel_version(),
kernel_commit: resolve_kernel_source_dir().and_then(|d| detect_kernel_commit(&d)),
timestamp: now_iso8601(),
run_id: generate_run_id(),
host: Some(crate::host_context::collect_host_context()),
cleanup_duration_ms: vm_result.cleanup_duration.map(|d| d.as_millis() as u64),
run_source: detect_run_source(),
};
serialize_and_write_sidecar(&sidecar, "sidecar")
}
#[cfg(test)]
mod tests {
use super::super::test_helpers::{EnvVarGuard, lock_env};
use super::*;
use crate::assert::{AssertResult, CgroupStats};
use crate::scenario::Ctx;
use anyhow::Result;
fn find_sidecars_by_prefix(dir: &std::path::Path, prefix: &str) -> Vec<std::path::PathBuf> {
std::fs::read_dir(dir)
.expect("sidecar dir must exist for lookup")
.filter_map(|e| e.ok().map(|e| e.path()))
.filter(|p| {
p.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n.starts_with(prefix) && n.ends_with(".ktstr.json"))
})
.collect()
}
fn find_single_sidecar_by_prefix(dir: &std::path::Path, prefix: &str) -> std::path::PathBuf {
let paths = find_sidecars_by_prefix(dir, prefix);
assert_eq!(
paths.len(),
1,
"single-variant test must produce exactly one sidecar under \
prefix {prefix:?}; got {paths:?}",
);
paths
.into_iter()
.next()
.expect("length-1 vec yields Some on first next()")
}
#[test]
fn find_sidecars_by_prefix_filters_suffix() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("foo-0001.ktstr.json"), b"{}").unwrap();
std::fs::write(tmp.join("foo-0002.ktstr.json.tmp"), b"{}").unwrap();
std::fs::write(tmp.join("foo-0003.json"), b"{}").unwrap();
std::fs::write(tmp.join("foo-0004.ktstr.txt"), b"{}").unwrap();
let paths = find_sidecars_by_prefix(tmp, "foo-");
assert_eq!(
paths.len(),
1,
"only the .ktstr.json file must match, got {paths:?}",
);
}
#[test]
fn find_sidecars_by_prefix_filters_prefix() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("foo-0001.ktstr.json"), b"{}").unwrap();
std::fs::write(tmp.join("bar-0002.ktstr.json"), b"{}").unwrap();
std::fs::write(tmp.join("foobar-0003.ktstr.json"), b"{}").unwrap();
let paths = find_sidecars_by_prefix(tmp, "foo-");
assert_eq!(
paths.len(),
1,
"only files starting with 'foo-' must match (not 'foobar-'), got {paths:?}",
);
}
#[test]
fn find_sidecars_by_prefix_empty_when_no_match() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("bar-0001.ktstr.json"), b"{}").unwrap();
let paths = find_sidecars_by_prefix(tmp, "foo-");
assert!(
paths.is_empty(),
"no prefix match must yield empty Vec, got {paths:?}",
);
}
#[test]
fn test_fixture_round_trips_clean() {
let sc = SidecarResult::test_fixture();
let json = serde_json::to_string(&sc).expect("fixture must serialize");
let _loaded: SidecarResult =
serde_json::from_str(&json).expect("fixture JSON must parse back");
}
#[test]
fn test_fixture_is_pass_not_skip() {
let sc = SidecarResult::test_fixture();
assert!(sc.passed, "fixture must default to passed=true");
assert!(!sc.skipped, "fixture must default to skipped=false");
}
#[test]
fn test_fixture_host_is_none() {
let sc = SidecarResult::test_fixture();
assert!(sc.host.is_none(), "fixture must default to host=None");
}
#[test]
fn test_fixture_payload_and_metrics_empty() {
let sc = SidecarResult::test_fixture();
assert!(sc.payload.is_none(), "fixture must default to payload=None");
assert!(
sc.metrics.is_empty(),
"fixture must default to metrics=empty"
);
}
#[test]
fn test_fixture_all_collections_empty_by_default() {
let sc = SidecarResult::test_fixture();
assert!(sc.metrics.is_empty(), "metrics must default empty");
assert!(
sc.active_flags.is_empty(),
"active_flags must default empty"
);
assert!(
sc.stimulus_events.is_empty(),
"stimulus_events must default empty"
);
assert!(
sc.verifier_stats.is_empty(),
"verifier_stats must default empty"
);
assert!(sc.sysctls.is_empty(), "sysctls must default empty");
assert!(sc.kargs.is_empty(), "kargs must default empty");
assert!(sc.payload.is_none(), "payload must default None");
assert!(sc.monitor.is_none(), "monitor must default None");
assert!(sc.kvm_stats.is_none(), "kvm_stats must default None");
assert!(
sc.kernel_version.is_none(),
"kernel_version must default None"
);
assert!(
sc.kernel_commit.is_none(),
"kernel_commit must default None"
);
assert!(sc.host.is_none(), "host must default None");
assert!(
sc.timestamp.is_empty(),
"timestamp must default empty String"
);
assert!(sc.run_id.is_empty(), "run_id must default empty String");
assert!(
sc.stats.cgroups.is_empty(),
"stats.cgroups must default empty (ScenarioStats::default)",
);
assert!(sc.passed, "passed must default true");
assert!(!sc.skipped, "skipped must default false");
}
#[test]
fn test_fixture_variant_hash_is_stable() {
let a = sidecar_variant_hash(&SidecarResult::test_fixture());
let b = sidecar_variant_hash(&SidecarResult::test_fixture());
assert_eq!(a, b, "two fresh fixtures must hash identically");
assert_eq!(
a, 0x55f6b9881e152f8c,
"fixture hash drifted — update only if the fixture default \
change is intentional; verify every call site that passes \
the fixture straight into sidecar_variant_hash still expresses \
the intent it had before",
);
}
#[test]
fn sidecar_result_roundtrip() {
let sc = SidecarResult {
test_name: "my_test".to_string(),
topology: "1n2l4c2t".to_string(),
scheduler: "scx_mitosis".to_string(),
scheduler_commit: Some("abc123".to_string()),
project_commit: Some("def4567".to_string()),
payload: None,
metrics: vec![],
passed: true,
skipped: false,
stats: crate::assert::ScenarioStats {
cgroups: vec![CgroupStats {
num_workers: 4,
num_cpus: 2,
avg_off_cpu_pct: 50.0,
min_off_cpu_pct: 40.0,
max_off_cpu_pct: 60.0,
spread: 20.0,
max_gap_ms: 100,
max_gap_cpu: 1,
total_migrations: 5,
..Default::default()
}],
total_workers: 4,
total_cpus: 2,
total_migrations: 5,
worst_spread: 20.0,
worst_gap_ms: 100,
worst_gap_cpu: 1,
..Default::default()
},
monitor: Some(MonitorSummary {
prog_stats_deltas: None,
total_samples: 10,
max_imbalance_ratio: 1.5,
max_local_dsq_depth: 3,
stall_detected: false,
event_deltas: Some(crate::monitor::ScxEventDeltas {
total_fallback: 7,
fallback_rate: 0.5,
max_fallback_burst: 2,
total_dispatch_offline: 0,
total_dispatch_keep_last: 3,
keep_last_rate: 0.2,
total_enq_skip_exiting: 0,
total_enq_skip_migration_disabled: 0,
..Default::default()
}),
schedstat_deltas: None,
..Default::default()
}),
stimulus_events: vec![crate::timeline::StimulusEvent {
elapsed_ms: 500,
label: "StepStart[0]".to_string(),
op_kind: Some("SetCpuset".to_string()),
detail: Some("4 cpus".to_string()),
total_iterations: None,
}],
work_type: "CpuSpin".to_string(),
active_flags: Vec::new(),
verifier_stats: vec![],
kvm_stats: None,
sysctls: vec![],
kargs: vec![],
kernel_version: None,
kernel_commit: Some("kabcde7".to_string()),
timestamp: String::new(),
run_id: String::new(),
host: None,
cleanup_duration_ms: Some(123),
run_source: Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
};
let json = serde_json::to_string_pretty(&sc).unwrap();
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
let SidecarResult {
test_name,
topology,
scheduler,
scheduler_commit,
project_commit,
payload,
metrics,
passed,
skipped,
stats,
monitor,
stimulus_events,
work_type,
active_flags,
verifier_stats,
kvm_stats,
sysctls,
kargs,
kernel_version,
kernel_commit,
timestamp,
run_id,
host,
cleanup_duration_ms,
run_source,
} = loaded;
assert_eq!(test_name, "my_test");
assert_eq!(topology, "1n2l4c2t");
assert_eq!(scheduler, "scx_mitosis");
assert_eq!(work_type, "CpuSpin");
assert_eq!(scheduler_commit.as_deref(), Some("abc123"));
assert_eq!(project_commit.as_deref(), Some("def4567"));
assert_eq!(
kernel_commit.as_deref(),
Some("kabcde7"),
"kernel_commit must round-trip the literal string \
populated on the write side, including the 7-char \
hex shape `detect_kernel_commit` produces. The \
fixture uses `kabcde7` (hex-only) to make accidental \
field-swap regressions with project_commit / \
scheduler_commit obvious — each commit field carries \
a distinct token.",
);
assert_eq!(payload, None, "fixture declared no payload");
assert_eq!(kvm_stats, None, "fixture declared no kvm_stats");
assert_eq!(kernel_version, None, "fixture declared no kernel_version");
assert_eq!(host, None, "fixture declared no host context");
assert_eq!(timestamp, "", "fixture used empty-string timestamp");
assert_eq!(run_id, "", "fixture used empty-string run_id");
assert!(passed);
assert!(!skipped, "fixture declared skipped=false");
assert!(metrics.is_empty(), "fixture declared empty metrics");
assert!(
active_flags.is_empty(),
"fixture declared empty active_flags",
);
assert!(
verifier_stats.is_empty(),
"fixture declared empty verifier_stats",
);
assert!(sysctls.is_empty(), "fixture declared empty sysctls");
assert!(kargs.is_empty(), "fixture declared empty kargs");
assert_eq!(stats.total_workers, 4);
assert_eq!(stats.cgroups.len(), 1);
assert_eq!(stats.cgroups[0].num_workers, 4);
assert_eq!(stats.worst_spread, 20.0);
let mon = monitor.unwrap();
assert_eq!(mon.total_samples, 10);
assert_eq!(mon.max_imbalance_ratio, 1.5);
assert_eq!(mon.max_local_dsq_depth, 3);
assert!(!mon.stall_detected);
let deltas = mon.event_deltas.unwrap();
assert_eq!(deltas.total_fallback, 7);
assert_eq!(deltas.total_dispatch_keep_last, 3);
assert_eq!(stimulus_events.len(), 1);
assert_eq!(stimulus_events[0].label, "StepStart[0]");
assert_eq!(
cleanup_duration_ms,
Some(123),
"cleanup_duration_ms round-tripped",
);
assert_eq!(
run_source.as_deref(),
Some(SIDECAR_RUN_SOURCE_LOCAL),
"run_source must round-trip the literal `local` populated on \
the write side, including the absent-vs-populated distinction",
);
}
#[test]
fn sidecar_result_roundtrip_all_fields_round_trip() {
use crate::assert::{CgroupStats, ScenarioStats};
use crate::host_context::HostContext;
use crate::monitor::MonitorSummary;
use crate::monitor::bpf_prog::ProgVerifierStats;
use crate::test_support::{Metric, MetricSource, MetricStream, PayloadMetrics, Polarity};
use crate::timeline::StimulusEvent;
let sc = SidecarResult {
test_name: "audit".to_string(),
topology: "8n8l16c2t".to_string(),
scheduler: "scx_audit".to_string(),
scheduler_commit: Some("deadbeef1234567890abcdef".to_string()),
project_commit: Some("cafebab-dirty".to_string()),
payload: Some("audit_payload".to_string()),
metrics: vec![PayloadMetrics {
payload_index: 0,
metrics: vec![Metric {
name: "audit_metric".to_string(),
value: 42.0,
polarity: Polarity::HigherBetter,
unit: "audits".to_string(),
source: MetricSource::Json,
stream: MetricStream::Stdout,
}],
exit_code: 7,
}],
passed: false,
skipped: true,
stats: ScenarioStats {
cgroups: vec![CgroupStats {
num_workers: 3,
..Default::default()
}],
total_workers: 3,
..Default::default()
},
monitor: Some(MonitorSummary {
total_samples: 17,
..Default::default()
}),
stimulus_events: vec![StimulusEvent {
elapsed_ms: 123,
label: "audit_event".to_string(),
op_kind: None,
detail: None,
total_iterations: None,
}],
work_type: "AuditWork".to_string(),
active_flags: vec!["flag_a".to_string(), "flag_b".to_string()],
verifier_stats: vec![ProgVerifierStats {
name: "audit_prog".to_string(),
verified_insns: 999,
}],
kvm_stats: Some(crate::vmm::KvmStatsTotals::default()),
sysctls: vec!["sysctl.kernel.audit_sysctl=1".to_string()],
kargs: vec!["audit_karg".to_string()],
kernel_version: Some("6.99.0".to_string()),
kernel_commit: Some("kabcde7-dirty".to_string()),
timestamp: "audit-timestamp".to_string(),
run_id: "audit-run-id".to_string(),
host: Some(HostContext {
kernel_name: Some("AuditLinux".to_string()),
..Default::default()
}),
cleanup_duration_ms: Some(987),
run_source: Some(SIDECAR_RUN_SOURCE_CI.to_string()),
};
let json = serde_json::to_string(&sc).expect("serialize");
let loaded: SidecarResult = serde_json::from_str(&json).expect("deserialize");
assert_eq!(loaded.test_name, "audit");
assert_eq!(loaded.topology, "8n8l16c2t");
assert_eq!(loaded.scheduler, "scx_audit");
assert_eq!(
loaded.scheduler_commit.as_deref(),
Some("deadbeef1234567890abcdef"),
"scheduler_commit must round-trip the literal string \
populated on the write side — not collapse to None via \
a missing serde attribute or default fallback",
);
assert_eq!(
loaded.project_commit.as_deref(),
Some("cafebab-dirty"),
"project_commit must round-trip the literal string \
populated on the write side, including the `-dirty` \
suffix that `detect_project_commit` appends — a \
regression that stripped the suffix or substituted \
None for a populated value would surface here. \
Fixture uses 7-char hex (`cafebab`) to match the \
`oid::to_hex_with_len(7)` shape `detect_project_commit` \
produces in production.",
);
assert_eq!(loaded.payload.as_deref(), Some("audit_payload"));
assert_eq!(loaded.metrics.len(), 1);
assert_eq!(loaded.metrics[0].exit_code, 7);
assert_eq!(loaded.metrics[0].metrics.len(), 1);
assert_eq!(loaded.metrics[0].metrics[0].name, "audit_metric");
assert_eq!(loaded.metrics[0].metrics[0].value, 42.0);
assert!(!loaded.passed, "passed must survive as false");
assert!(loaded.skipped, "skipped must survive as true");
assert_eq!(loaded.stats.total_workers, 3);
assert_eq!(loaded.stats.cgroups.len(), 1);
assert_eq!(loaded.stats.cgroups[0].num_workers, 3);
let mon = loaded.monitor.expect("monitor round-trips");
assert_eq!(mon.total_samples, 17);
assert_eq!(loaded.stimulus_events.len(), 1);
assert_eq!(loaded.stimulus_events[0].label, "audit_event");
assert_eq!(loaded.stimulus_events[0].elapsed_ms, 123);
assert_eq!(loaded.work_type, "AuditWork");
assert_eq!(loaded.active_flags, vec!["flag_a", "flag_b"]);
assert_eq!(loaded.verifier_stats.len(), 1);
assert_eq!(loaded.verifier_stats[0].name, "audit_prog");
assert_eq!(loaded.verifier_stats[0].verified_insns, 999);
assert!(
loaded.kvm_stats.is_some(),
"kvm_stats must round-trip as Some"
);
assert_eq!(loaded.sysctls, vec!["sysctl.kernel.audit_sysctl=1"]);
assert_eq!(loaded.kargs, vec!["audit_karg"]);
assert_eq!(loaded.kernel_version.as_deref(), Some("6.99.0"));
assert_eq!(
loaded.kernel_commit.as_deref(),
Some("kabcde7-dirty"),
"kernel_commit must round-trip the literal string \
populated on the write side, including the `-dirty` \
suffix that `detect_kernel_commit` appends. Fixture \
uses 7-char hex (`kabcde7`) to match the \
`oid::to_hex_with_len(7)` shape `detect_kernel_commit` \
produces in production. The leading `k` in the fixture \
token makes a project_commit / kernel_commit field-swap \
regression visible — each commit field carries a \
distinct token in the audit fixture.",
);
assert_eq!(loaded.timestamp, "audit-timestamp");
assert_eq!(loaded.run_id, "audit-run-id");
let host = loaded.host.expect("host round-trips");
assert_eq!(host.kernel_name.as_deref(), Some("AuditLinux"));
assert_eq!(loaded.cleanup_duration_ms, Some(987));
assert_eq!(
loaded.run_source.as_deref(),
Some(SIDECAR_RUN_SOURCE_CI),
"run_source must round-trip the literal `ci` populated on \
the write side. Audit fixture uses `ci` (vs `local` in \
the sibling roundtrip) so a write-vs-read field-swap \
regression that mapped one tag onto another would \
surface in this audit pass even if the sibling test \
did not detect it.",
);
}
#[test]
fn sidecar_result_roundtrip_no_monitor() {
let sc = SidecarResult {
test_name: "eevdf_test".to_string(),
topology: "1n1l2c1t".to_string(),
passed: false,
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.test_name, "eevdf_test");
assert!(!loaded.passed);
assert!(loaded.monitor.is_none());
assert!(loaded.stimulus_events.is_empty());
assert!(
json.contains("\"monitor\":null"),
"monitor=None must serialize as `\"monitor\":null`, not be omitted: {json}",
);
}
#[test]
fn sidecar_result_missing_required_field_rejected_by_deserialize() {
const REQUIRED_NON_OPTION_FIELDS: &[&str] = &[
"test_name",
"topology",
"scheduler",
"metrics",
"passed",
"skipped",
"stats",
"stimulus_events",
"work_type",
"active_flags",
"verifier_stats",
"sysctls",
"kargs",
"timestamp",
"run_id",
];
let fixture = SidecarResult::test_fixture();
let full = match serde_json::to_value(&fixture).unwrap() {
serde_json::Value::Object(m) => m,
other => panic!("expected object, got {other:?}"),
};
for field in REQUIRED_NON_OPTION_FIELDS {
let mut obj = full.clone();
assert!(
obj.remove(*field).is_some(),
"SidecarResult test fixture must emit `{field}` for its \
rejection case to be meaningful — the required-fields \
list has drifted from the struct definition",
);
let json = serde_json::Value::Object(obj).to_string();
let err = serde_json::from_str::<SidecarResult>(&json)
.err()
.unwrap_or_else(|| {
panic!(
"deserialize must reject SidecarResult with `{field}` removed, \
but succeeded — a regression may have added \
`#[serde(default)]` to this field",
)
});
let msg = format!("{err}");
assert!(
msg.contains(field),
"missing-field error for `{field}` must name the field; got: {msg}",
);
}
}
#[test]
fn sidecar_result_rename_contract_old_source_key_lands_run_source_none() {
let fixture = SidecarResult::test_fixture();
let full = match serde_json::to_value(&fixture).unwrap() {
serde_json::Value::Object(m) => m,
other => panic!("expected object, got {other:?}"),
};
let mut obj_old = full.clone();
obj_old.remove("run_source");
obj_old.insert(
"source".to_string(),
serde_json::Value::String("ci".to_string()),
);
let json_old = serde_json::Value::Object(obj_old).to_string();
let parsed_old: SidecarResult = serde_json::from_str(&json_old).expect(
"old-key sidecar must still deserialize — \
SidecarResult does not set deny_unknown_fields, \
so the unrecognised `\"source\"` key is silently dropped",
);
assert_eq!(
parsed_old.run_source, None,
"old `\"source\": \"ci\"` key must land run_source = None \
per the documented data-loss contract; a regression that \
added `#[serde(alias = \"source\")]` would yield Some(\"ci\") here",
);
let mut obj_new = full.clone();
obj_new.insert(
"run_source".to_string(),
serde_json::Value::String("ci".to_string()),
);
let json_new = serde_json::Value::Object(obj_new).to_string();
let parsed_new: SidecarResult =
serde_json::from_str(&json_new).expect("new-key sidecar must deserialize cleanly");
assert_eq!(
parsed_new.run_source.as_deref(),
Some("ci"),
"new `\"run_source\": \"ci\"` key must populate \
run_source — a regression breaking the new-key path \
would yield None here",
);
let mut obj_both = full.clone();
obj_both.insert(
"run_source".to_string(),
serde_json::Value::String("ci".to_string()),
);
obj_both.insert(
"source".to_string(),
serde_json::Value::String("local".to_string()),
);
let json_both = serde_json::Value::Object(obj_both).to_string();
let parsed_both: SidecarResult =
serde_json::from_str(&json_both).expect("both-keys sidecar must deserialize cleanly");
assert_eq!(
parsed_both.run_source.as_deref(),
Some("ci"),
"with both keys present, new `\"run_source\"` must win \
— the old `\"source\"` is silently dropped, NOT used \
as a fallback. A regression that processed `\"source\"` \
as an alias would surface here as Some(\"local\")",
);
}
#[test]
fn collect_sidecars_empty_dir() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let results = collect_sidecars(tmp_dir.path());
assert!(results.is_empty());
}
#[test]
fn collect_sidecars_nonexistent_dir() {
let results = collect_sidecars(std::path::Path::new("/nonexistent/path"));
assert!(results.is_empty());
}
#[test]
fn collect_sidecars_reads_json() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let sc = SidecarResult {
test_name: "test_x".to_string(),
topology: "1n1l2c1t".to_string(),
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
std::fs::write(tmp.join("test_x.ktstr.json"), &json).unwrap();
std::fs::write(tmp.join("other.json"), r#"{"key":"val"}"#).unwrap();
let results = collect_sidecars(tmp);
assert_eq!(results.len(), 1);
assert_eq!(results[0].test_name, "test_x");
}
#[test]
fn collect_sidecars_recurses_one_level() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let sub = tmp.join("job-0");
std::fs::create_dir_all(&sub).unwrap();
let sc = SidecarResult {
test_name: "nested_test".to_string(),
topology: "1n2l4c2t".to_string(),
scheduler: "scx_mitosis".to_string(),
passed: false,
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
std::fs::write(sub.join("nested_test.ktstr.json"), &json).unwrap();
let results = collect_sidecars(tmp);
assert_eq!(results.len(), 1);
assert_eq!(results[0].test_name, "nested_test");
assert!(!results[0].passed);
}
#[test]
fn collect_sidecars_does_not_recurse_past_one_level() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let top_sub = tmp.join("job-0");
let deep_sub = top_sub.join("replay-0");
std::fs::create_dir_all(&deep_sub).unwrap();
let sc = |name: &str| SidecarResult {
test_name: name.to_string(),
..SidecarResult::test_fixture()
};
std::fs::write(
top_sub.join("top_level.ktstr.json"),
serde_json::to_string(&sc("top_level")).unwrap(),
)
.unwrap();
std::fs::write(
deep_sub.join("deep_level.ktstr.json"),
serde_json::to_string(&sc("deep_level")).unwrap(),
)
.unwrap();
let results = collect_sidecars(tmp);
let names: Vec<&str> = results.iter().map(|r| r.test_name.as_str()).collect();
assert_eq!(
names,
vec!["top_level"],
"collect_sidecars must see only the one-level-deep sidecar, not the two-level one"
);
}
#[test]
fn collect_sidecars_skips_invalid_json() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("bad.ktstr.json"), "not json").unwrap();
let results = collect_sidecars(tmp);
assert!(results.is_empty());
}
#[test]
fn collect_sidecars_skips_non_ktstr_json() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("other.json"), r#"{"test":"val"}"#).unwrap();
let results = collect_sidecars(tmp);
assert!(results.is_empty());
}
#[test]
fn sidecar_result_work_type_field() {
let sc = SidecarResult {
work_type: "Bursty".to_string(),
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.work_type, "Bursty");
}
#[test]
fn write_sidecar_defaults_to_target_dir_without_env() {
let _lock = lock_env();
let target_dir = tempfile::TempDir::new().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", target_dir.path());
let _env_sidecar = EnvVarGuard::remove("KTSTR_SIDECAR_DIR");
let _env_kernel = EnvVarGuard::remove("KTSTR_KERNEL");
let dir = sidecar_dir();
let kernel = detect_kernel_version();
let commit = detect_project_commit();
let expected = runs_root().join(format_run_dirname(kernel.as_deref(), commit.as_deref()));
assert_eq!(dir, expected);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__sidecar_default_dir__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let check_result = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &check_result, "CpuSpin", &[], &[]).unwrap();
let paths = find_sidecars_by_prefix(&dir, "__sidecar_default_dir__-");
assert_eq!(
paths.len(),
1,
"single `write_sidecar` call against prefix \
`__sidecar_default_dir__-` must produce exactly one \
file; got {} ({paths:?}). If >1, either the variant \
hash collided for this test's variant-field tuple or \
`pre_clear_run_dir_once`'s per-directory keying failed \
to wipe a stale sidecar from a prior crashed run.",
paths.len(),
);
}
#[test]
fn sidecar_dir_empty_override_falls_back_to_default() {
let _lock = lock_env();
let target_dir = tempfile::TempDir::new().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", target_dir.path());
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", "");
let _env_kernel = EnvVarGuard::remove("KTSTR_KERNEL");
let dir = sidecar_dir();
let kernel = detect_kernel_version();
let commit = detect_project_commit();
let expected = runs_root().join(format_run_dirname(kernel.as_deref(), commit.as_deref()));
assert_eq!(
dir, expected,
"empty KTSTR_SIDECAR_DIR must fall back to the default \
`runs_root().join(format_run_dirname(...))` path, NOT \
return PathBuf::from(\"\"). A regression that dropped \
the `is_empty()` filter on the override read would \
surface here as `dir == PathBuf::from(\"\")`.",
);
assert_ne!(
dir,
std::path::PathBuf::new(),
"sidecar_dir must never return an empty path",
);
}
#[test]
fn format_run_dirname_clean_commit() {
assert_eq!(
format_run_dirname(Some("6.14.2"), Some("abc1234")),
"6.14.2-abc1234",
"clean dirname must be `{{kernel}}-{{project_commit}}`",
);
}
#[test]
fn format_run_dirname_dirty_commit() {
assert_eq!(
format_run_dirname(Some("6.14.2"), Some("abc1234-dirty")),
"6.14.2-abc1234-dirty",
"dirty dirname must pass the `-dirty` suffix through verbatim",
);
}
#[test]
fn format_run_dirname_unknown_commit() {
assert_eq!(
format_run_dirname(Some("6.14.2"), None),
"6.14.2-unknown",
"missing commit must collapse to `{{kernel}}-unknown` sentinel",
);
}
#[test]
fn format_run_dirname_unknown_kernel() {
assert_eq!(
format_run_dirname(None, Some("abc1234")),
"unknown-abc1234",
"missing kernel must collapse to `unknown-{{project_commit}}` sentinel",
);
}
#[test]
fn format_run_dirname_both_unknown_collide() {
assert_eq!(
format_run_dirname(None, None),
"unknown-unknown",
"both-missing case must produce `unknown-unknown` — the documented \
collision the operator must disambiguate via KTSTR_SIDECAR_DIR or git",
);
}
#[test]
fn pre_clear_run_dir_once_wipes_existing_sidecars() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("test_a-0000.ktstr.json"), b"{}").unwrap();
std::fs::write(tmp.join("test_b-1111.ktstr.json"), b"{}").unwrap();
assert_eq!(
std::fs::read_dir(tmp).unwrap().count(),
2,
"fixture precondition: tempdir must contain two sidecars",
);
pre_clear_run_dir_once(tmp);
let remaining: Vec<_> = std::fs::read_dir(tmp)
.unwrap()
.flatten()
.map(|e| e.file_name())
.collect();
assert!(
remaining.is_empty(),
"every *.ktstr.json file must be wiped; got {remaining:?}",
);
}
#[test]
fn pre_clear_run_dir_once_skips_subdirs_and_non_sidecars() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
std::fs::write(tmp.join("victim-0000.ktstr.json"), b"{}").unwrap();
std::fs::write(tmp.join("README.md"), b"keep").unwrap();
std::fs::write(tmp.join("other.json"), b"{}").unwrap();
std::fs::write(tmp.join("partial.ktstr.json.tmp"), b"{}").unwrap();
let sub = tmp.join("job-1");
std::fs::create_dir(&sub).unwrap();
std::fs::write(sub.join("nested-0000.ktstr.json"), b"{}").unwrap();
pre_clear_run_dir_once(tmp);
assert!(
!tmp.join("victim-0000.ktstr.json").exists(),
"top-level *.ktstr.json file must be wiped",
);
assert!(
tmp.join("README.md").exists(),
"non-sidecar file must survive",
);
assert!(
tmp.join("other.json").exists(),
"bare *.json (no .ktstr. infix) must survive",
);
assert!(
tmp.join("partial.ktstr.json.tmp").exists(),
"non-`.json` extension must survive even with .ktstr. infix",
);
assert!(sub.exists(), "subdirectory must survive");
assert!(
sub.join("nested-0000.ktstr.json").exists(),
"sidecar inside subdirectory must survive (pre-clear is shallow)",
);
}
#[test]
fn pre_clear_run_dir_once_silent_on_missing_dir() {
let tmp_dir = tempfile::TempDir::new().unwrap();
let nonexistent = tmp_dir.path().join("does_not_exist_yet");
assert!(
!nonexistent.exists(),
"fixture precondition: dir must not exist"
);
pre_clear_run_dir_once(&nonexistent);
assert!(
!nonexistent.exists(),
"pre_clear must not create the dir as a side effect",
);
}
#[test]
fn pre_clear_run_dir_once_keys_per_directory() {
let tmp_a = tempfile::TempDir::new().unwrap();
let tmp_b = tempfile::TempDir::new().unwrap();
std::fs::write(tmp_a.path().join("a-0000.ktstr.json"), b"{}").unwrap();
pre_clear_run_dir_once(tmp_a.path());
assert!(
!tmp_a.path().join("a-0000.ktstr.json").exists(),
"first call against A must wipe A's sidecar",
);
std::fs::write(tmp_a.path().join("a-1111.ktstr.json"), b"{}").unwrap();
pre_clear_run_dir_once(tmp_a.path());
assert!(
tmp_a.path().join("a-1111.ktstr.json").exists(),
"second call against A must be a no-op (cache hit) — \
the post-prime sidecar must survive. A regression to \
OnceLock<()> or a HashSet that ignores the key would \
leak this assertion.",
);
std::fs::write(tmp_b.path().join("b-0000.ktstr.json"), b"{}").unwrap();
pre_clear_run_dir_once(tmp_b.path());
assert!(
!tmp_b.path().join("b-0000.ktstr.json").exists(),
"first call against B must wipe B's sidecar — proves the \
per-dir keying distinguishes A from B (a OnceLock<()> \
that fired once for A would leak this assertion).",
);
}
#[test]
fn warn_unknown_project_commit_inner_emits_on_first_call() {
let gate = std::sync::OnceLock::new();
let mut sink: Vec<u8> = Vec::new();
warn_unknown_project_commit_inner(&gate, &mut sink);
assert!(
!sink.is_empty(),
"first call must emit bytes to the sink; got empty",
);
}
#[test]
fn warn_unknown_project_commit_inner_emits_expected_substring() {
let gate = std::sync::OnceLock::new();
let mut sink: Vec<u8> = Vec::new();
warn_unknown_project_commit_inner(&gate, &mut sink);
let captured = String::from_utf8(sink).expect("warning text must be UTF-8");
assert!(
captured.contains("WARNING:"),
"warning must carry the WARNING severity tag; got: {captured:?}",
);
assert!(
captured.contains("KTSTR_SIDECAR_DIR"),
"warning must reference KTSTR_SIDECAR_DIR as the remediation \
knob — operators rely on this hint to disambiguate \
non-git runs; got: {captured:?}",
);
}
#[test]
fn warn_unknown_project_commit_inner_second_call_is_no_op() {
let gate = std::sync::OnceLock::new();
let mut sink: Vec<u8> = Vec::new();
warn_unknown_project_commit_inner(&gate, &mut sink);
let after_first = sink.len();
assert!(
after_first > 0,
"fixture precondition: first call must emit bytes",
);
warn_unknown_project_commit_inner(&gate, &mut sink);
assert_eq!(
sink.len(),
after_first,
"second call against the same gate must NOT append bytes — \
the OnceLock<()> gating is the load-bearing invariant; got \
len {} (expected {after_first})",
sink.len(),
);
}
#[test]
fn newest_run_dir_skips_dotfile_subdirectories() {
use std::thread::sleep;
use std::time::Duration;
let _lock = lock_env();
let target_dir = tempfile::TempDir::new().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", target_dir.path());
let runs = target_dir.path().join("ktstr");
std::fs::create_dir(&runs).expect("mkdir runs root");
let real = runs.join("real-run");
std::fs::create_dir(&real).expect("mkdir real run dir");
sleep(Duration::from_millis(50));
std::fs::create_dir(runs.join(".locks")).expect("mkdir .locks");
let got = newest_run_dir().expect("non-empty runs root must yield Some");
assert_eq!(
got, real,
"newest_run_dir must pick the real run dir even when \
.locks/ has a newer mtime — a regression that drops \
the dotfile filter would surface here as `.locks/` \
winning the mtime contest",
);
}
#[test]
fn newest_run_dir_yields_none_when_only_dotfiles_exist() {
let _lock = lock_env();
let target_dir = tempfile::TempDir::new().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", target_dir.path());
let runs = target_dir.path().join("ktstr");
std::fs::create_dir(&runs).expect("mkdir runs root");
std::fs::create_dir(runs.join(".locks")).expect("mkdir .locks");
std::fs::create_dir(runs.join(".cache")).expect("mkdir .cache");
let got = newest_run_dir();
assert!(
got.is_none(),
"runs root with only dotfile subdirs must yield None; got {got:?}",
);
}
#[test]
fn is_run_directory_accepts_non_dotfile_subdir() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::create_dir(tmp.path().join("real-run")).unwrap();
let entry = std::fs::read_dir(tmp.path())
.unwrap()
.next()
.unwrap()
.unwrap();
assert!(
super::is_run_directory(&entry),
"non-dotfile subdir must be accepted",
);
}
#[test]
fn is_run_directory_rejects_dotfile_subdir() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::create_dir(tmp.path().join(".locks")).unwrap();
let entry = std::fs::read_dir(tmp.path())
.unwrap()
.next()
.unwrap()
.unwrap();
assert!(
!super::is_run_directory(&entry),
"dotfile subdir must be rejected",
);
}
#[test]
fn is_run_directory_rejects_regular_files() {
let tmp = tempfile::TempDir::new().unwrap();
std::fs::write(tmp.path().join("regular-file"), b"x").unwrap();
let entry = std::fs::read_dir(tmp.path())
.unwrap()
.next()
.unwrap()
.unwrap();
assert!(
!super::is_run_directory(&entry),
"regular file must be rejected",
);
}
#[test]
fn run_dir_lock_path_returns_expected_shape() {
let dir = std::path::Path::new("/runs-root/6.14.2-deadbee");
let lock = super::run_dir_lock_path(dir).expect("non-root dir must yield Some");
assert_eq!(
lock,
std::path::PathBuf::from("/runs-root/.locks/6.14.2-deadbee.lock"),
);
}
#[test]
fn run_dir_lock_path_no_parent_returns_none() {
let lock = super::run_dir_lock_path(std::path::Path::new("/"));
assert!(
lock.is_none(),
"root path must yield None (no parent), got {lock:?}",
);
}
#[test]
fn acquire_run_dir_flock_creates_locks_subdir_lazily() {
let tmp = tempfile::TempDir::new().unwrap();
let dir = tmp.path().join("6.14.2-deadbee");
std::fs::create_dir_all(&dir).unwrap();
let fd = super::acquire_run_dir_flock_with_timeout(&dir, std::time::Duration::from_secs(1))
.expect("first acquire must succeed against an uncontended dir");
assert!(
tmp.path().join(".locks").exists(),
".locks/ subdirectory must be created lazily on first acquire",
);
assert!(
tmp.path().join(".locks/6.14.2-deadbee.lock").exists(),
"lockfile must exist on disk after acquire",
);
drop(fd);
assert!(
tmp.path().join(".locks/6.14.2-deadbee.lock").exists(),
"lockfile sentinel must persist after fd drop — \
try_flock's contract is fd-bound release, not file unlink",
);
}
#[test]
fn acquire_run_dir_flock_releases_on_drop() {
let tmp = tempfile::TempDir::new().unwrap();
let dir = tmp.path().join("key");
std::fs::create_dir_all(&dir).unwrap();
let fd1 =
super::acquire_run_dir_flock_with_timeout(&dir, std::time::Duration::from_secs(1))
.expect("first acquire");
drop(fd1);
let fd2 =
super::acquire_run_dir_flock_with_timeout(&dir, std::time::Duration::from_secs(1))
.expect(
"second acquire after drop must succeed — a regression that \
fails to release the kernel flock on OwnedFd::drop would \
leak this assertion",
);
drop(fd2);
}
#[test]
fn acquire_run_dir_flock_times_out_when_peer_holds_lock() {
let tmp = tempfile::TempDir::new().unwrap();
let dir = tmp.path().join("contended-key");
std::fs::create_dir_all(&dir).unwrap();
let lock_path = super::run_dir_lock_path(&dir).unwrap();
std::fs::create_dir_all(lock_path.parent().unwrap()).unwrap();
let _peer_fd = crate::flock::try_flock(&lock_path, crate::flock::FlockMode::Exclusive)
.expect("peer flock attempt")
.expect("peer must acquire on a fresh lockfile");
let start = std::time::Instant::now();
let err =
super::acquire_run_dir_flock_with_timeout(&dir, std::time::Duration::from_millis(300))
.expect_err("acquire must fail while peer holds LOCK_EX");
let elapsed = start.elapsed();
assert!(
elapsed >= std::time::Duration::from_millis(250),
"acquire must wait ~timeout before erroring; elapsed={elapsed:?}",
);
let msg = format!("{err:#}");
assert!(
msg.contains("timed out"),
"error must surface the timeout cause; got: {msg}",
);
assert!(
msg.contains("LOCK_EX"),
"error must name the flock mode for operator triage; got: {msg}",
);
}
#[test]
fn write_sidecar_same_dir_is_last_writer_wins_after_pre_clear() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry_a = KtstrTestEntry {
name: "__reuse_first_run__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry_a, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
assert_eq!(
find_sidecars_by_prefix(tmp, "__reuse_first_run__-").len(),
1,
"first invocation must write its sidecar",
);
pre_clear_run_dir_once(tmp);
assert_eq!(
find_sidecars_by_prefix(tmp, "__reuse_first_run__-").len(),
0,
"pre-clear must wipe the first invocation's sidecar before \
the second invocation writes — this is the last-writer-wins \
contract",
);
let entry_b = KtstrTestEntry {
name: "__reuse_second_run__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
write_sidecar(&entry_b, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
assert_eq!(
find_sidecars_by_prefix(tmp, "__reuse_first_run__-").len(),
0,
"first invocation's sidecar must remain wiped after second invocation writes",
);
assert_eq!(
find_sidecars_by_prefix(tmp, "__reuse_second_run__-").len(),
1,
"second invocation's sidecar must be the only sidecar in the dir",
);
}
#[test]
fn write_sidecar_override_does_not_pre_clear() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
std::fs::write(tmp.join("__preserved__-0000.ktstr.json"), b"{}").unwrap();
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__override_skips_preclear__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
assert!(
tmp.join("__preserved__-0000.ktstr.json").exists(),
"pre-existing sidecar in override dir must NOT be pre-cleared — \
operator-chosen directories are owned by the operator and \
must not lose data on `write_sidecar`",
);
assert_eq!(
find_sidecars_by_prefix(tmp, "__override_skips_preclear__-").len(),
1,
"new sidecar must be written alongside the preserved one",
);
}
#[test]
fn write_sidecar_default_path_two_writes_both_survive() {
let _lock = lock_env();
let target_dir = tempfile::TempDir::new().unwrap();
let _env_target = EnvVarGuard::set("CARGO_TARGET_DIR", target_dir.path());
let _env_sidecar = EnvVarGuard::remove("KTSTR_SIDECAR_DIR");
let _env_kernel = EnvVarGuard::remove("KTSTR_KERNEL");
let dir = sidecar_dir();
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry_first = KtstrTestEntry {
name: "__b3_first__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let entry_second = KtstrTestEntry {
name: "__b3_second__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry_first, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
assert_eq!(
find_sidecars_by_prefix(&dir, "__b3_first__-").len(),
1,
"first write must produce its sidecar",
);
write_sidecar(&entry_second, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
let first_count = find_sidecars_by_prefix(&dir, "__b3_first__-").len();
let second_count = find_sidecars_by_prefix(&dir, "__b3_second__-").len();
assert_eq!(
first_count, 1,
"first sidecar must survive the second write — a count of 0 \
reveals the canonicalize-cache-split regression: pre-clear \
ran a second time and wiped sidecar 1. Move `create_dir_all` \
before `pre_clear_run_dir_once` so canonicalize sees the \
same dir on both calls.",
);
assert_eq!(second_count, 1, "second sidecar must land normally",);
}
#[test]
fn write_sidecar_writes_file() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__sidecar_write_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let check_result = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &check_result, "CpuSpin", &[], &[]).unwrap();
let path = find_single_sidecar_by_prefix(tmp, "__sidecar_write_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.test_name, "__sidecar_write_test__");
assert!(loaded.passed);
assert!(!loaded.skipped, "pass result is not a skip");
let host = loaded
.host
.as_ref()
.expect("write_sidecar must populate host field from collect_host_context");
assert_eq!(host.kernel_name.as_deref(), Some("Linux"));
assert!(
host.kernel_cmdline.is_some(),
"write_sidecar must capture full HostContext, not Default::default() — \
/proc/cmdline is always readable on Linux (see host_context tests)",
);
assert!(
host.kernel_release.is_some(),
"write_sidecar must capture kernel_release — uname() is \
filesystem-independent; a None here means the default \
substitution bypassed the full collect_host_context()",
);
}
#[test]
fn write_sidecar_variant_hash_distinguishes_active_flags() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__flagvariant_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
let flags_a = vec!["llc".to_string()];
let flags_b = vec!["llc".to_string(), "steal".to_string()];
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &flags_a, &[]).unwrap();
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &flags_b, &[]).unwrap();
let paths = find_sidecars_by_prefix(tmp, "__flagvariant_test__-");
assert_eq!(
paths.len(),
2,
"two active_flags variants must produce two distinct files, got {paths:?}"
);
}
#[test]
fn write_sidecar_variant_hash_is_order_invariant_for_active_flags() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__flagorder_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
let forward = vec!["llc".to_string(), "steal".to_string()];
let reversed = vec!["steal".to_string(), "llc".to_string()];
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &forward, &[]).unwrap();
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &reversed, &[]).unwrap();
let paths = find_sidecars_by_prefix(tmp, "__flagorder_test__-");
assert_eq!(
paths.len(),
1,
"reversed-order writes of the same flag SET must \
collapse to a single canonical sidecar filename \
(overwrite); got {paths:?}. If this fails with \
`paths.len() == 2`, the write path has regressed to \
hashing caller-order flags — re-sort via \
`canonicalize_active_flags` in both write_sidecar \
and write_skip_sidecar.",
);
let path = &paths[0];
let data = std::fs::read_to_string(path).expect("read canonical sidecar");
let loaded: SidecarResult =
serde_json::from_str(&data).expect("deserialize canonical sidecar");
assert_eq!(
loaded.active_flags,
vec!["llc".to_string(), "steal".to_string()],
"on-disk active_flags must be sorted in \
`scenario::flags::ALL` positional order; got: {:?}",
loaded.active_flags,
);
}
#[test]
fn sidecar_variant_hash_is_order_invariant_for_sysctls_and_kargs() {
let forward = SidecarResult {
sysctls: vec![
"sysctl.a=1".to_string(),
"sysctl.b=2".to_string(),
"sysctl.c=3".to_string(),
],
kargs: vec![
"karg_alpha".to_string(),
"karg_beta".to_string(),
"karg_gamma".to_string(),
],
..SidecarResult::test_fixture()
};
let reversed = SidecarResult {
sysctls: vec![
"sysctl.c=3".to_string(),
"sysctl.b=2".to_string(),
"sysctl.a=1".to_string(),
],
kargs: vec![
"karg_gamma".to_string(),
"karg_beta".to_string(),
"karg_alpha".to_string(),
],
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&forward),
sidecar_variant_hash(&reversed),
"reversed-order sysctls/kargs must hash identically — \
the hash sorts both collections lexically before \
folding bytes in, matching the set-determines-hash \
contract documented on `sidecar_variant_hash`. A \
regression that dropped the sort block would produce \
distinct hashes and duplicate sidecar files for the \
same semantic variant.",
);
let partial = SidecarResult {
sysctls: forward.sysctls.clone(),
kargs: reversed.kargs.clone(),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&forward),
sidecar_variant_hash(&partial),
"kargs-only reversal must still hash identically — \
partial revert (one of the two sorts dropped) must \
fail this assertion. Got distinct hashes for: \
sysctls={:?}, kargs={:?} vs sysctls={:?}, kargs={:?}",
forward.sysctls,
forward.kargs,
partial.sysctls,
partial.kargs,
);
}
#[test]
fn write_skip_sidecar_variant_hash_is_order_invariant_for_active_flags() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skipflagorder_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let forward = vec!["llc".to_string(), "steal".to_string()];
let reversed = vec!["steal".to_string(), "llc".to_string()];
write_skip_sidecar(&entry, &forward).unwrap();
write_skip_sidecar(&entry, &reversed).unwrap();
let paths = find_sidecars_by_prefix(tmp, "__skipflagorder_test__-");
assert_eq!(
paths.len(),
1,
"reversed-order skip-sidecar writes of the same flag \
SET must collapse to a single canonical filename \
(overwrite); got {paths:?}. If this fails with \
`paths.len() == 2`, canonicalization was removed from \
`write_skip_sidecar` even if the run-path test above \
still passes — apply `canonicalize_active_flags` in \
both write sites, not just one.",
);
let path = &paths[0];
let data = std::fs::read_to_string(path).expect("read canonical skip sidecar");
let loaded: SidecarResult =
serde_json::from_str(&data).expect("deserialize canonical skip sidecar");
assert_eq!(
loaded.active_flags,
vec!["llc".to_string(), "steal".to_string()],
"on-disk active_flags of a skip sidecar must be sorted \
in `scenario::flags::ALL` positional order; got: {:?}",
loaded.active_flags,
);
}
#[test]
fn canonicalize_active_flags_orders_unknown_lexically_after_known() {
let input = vec![
"zzz_unknown".to_string(),
"llc".to_string(),
"aaa_unknown".to_string(),
];
let got = canonicalize_active_flags(&input);
assert_eq!(
got,
vec![
"llc".to_string(),
"aaa_unknown".to_string(),
"zzz_unknown".to_string(),
],
"known flags must sort first by ALL position, unknown \
flags must sort lexically after; got: {got:?}",
);
let reversed: Vec<String> = input.into_iter().rev().collect();
let got_rev = canonicalize_active_flags(&reversed);
assert_eq!(
got_rev, got,
"reversed input must canonicalize to the same output; \
got: {got_rev:?}, expected: {got:?}",
);
}
#[test]
fn write_sidecar_variant_hash_distinguishes_work_types() {
let _lock = lock_env();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp = tmp_dir.path();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__variant_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
write_sidecar(&entry, &vm_result, &[], &ok, "YieldHeavy", &[], &[]).unwrap();
let paths = find_sidecars_by_prefix(tmp, "__variant_test__-");
assert_eq!(
paths.len(),
2,
"two work_type variants must produce two distinct files, got {paths:?}"
);
}
#[test]
fn sidecar_variant_hash_stability_populated() {
let sc = SidecarResult {
topology: "1n2l4c1t".to_string(),
scheduler: "scx-ktstr".to_string(),
payload: None,
work_type: "CpuSpin".to_string(),
active_flags: vec!["llc".to_string(), "steal".to_string()],
sysctls: vec!["sysctl.kernel.sched_cfs_bandwidth_slice_us=1000".to_string()],
kargs: vec!["nosmt".to_string()],
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&sc),
0xbc0f38005915a09f,
"sidecar_variant_hash output drifted — regenerate expected only if \
the wire format change is intentional and old sidecars are \
disposable (which they are per ktstr's pre-1.0 stance)",
);
}
#[test]
fn sidecar_variant_hash_stability_empty_collections() {
let sc = SidecarResult {
topology: "1n1l1c1t".to_string(),
scheduler: "eevdf".to_string(),
payload: None,
work_type: String::new(),
active_flags: Vec::new(),
sysctls: Vec::new(),
kargs: Vec::new(),
..SidecarResult::test_fixture()
};
assert_eq!(sidecar_variant_hash(&sc), 0x1b61394511b42e01);
}
#[test]
fn sidecar_variant_hash_distinguishes_payload() {
let base = SidecarResult::test_fixture;
let none = base();
assert!(
none.payload.is_none(),
"fixture default for payload must remain None"
);
let fio = SidecarResult {
payload: Some("fio".to_string()),
..base()
};
let stress = SidecarResult {
payload: Some("stress-ng".to_string()),
..base()
};
let h_none = sidecar_variant_hash(&none);
let h_fio = sidecar_variant_hash(&fio);
let h_stress = sidecar_variant_hash(&stress);
assert_ne!(
h_none, h_fio,
"absent vs present payload must hash differently",
);
assert_ne!(
h_fio, h_stress,
"different payload names must hash differently",
);
}
#[test]
fn format_verifier_stats_empty() {
assert!(format_verifier_stats(&[]).is_empty());
}
#[test]
fn format_verifier_stats_no_data() {
let sc = SidecarResult::test_fixture();
assert!(format_verifier_stats(&[sc]).is_empty());
}
#[test]
fn format_verifier_stats_table() {
let sc = SidecarResult {
verifier_stats: vec![
crate::monitor::bpf_prog::ProgVerifierStats {
name: "dispatch".to_string(),
verified_insns: 50000,
},
crate::monitor::bpf_prog::ProgVerifierStats {
name: "enqueue".to_string(),
verified_insns: 30000,
},
],
..SidecarResult::test_fixture()
};
let result = format_verifier_stats(&[sc]);
assert!(result.contains("BPF VERIFIER STATS"));
assert!(result.contains("dispatch"));
assert!(result.contains("enqueue"));
assert!(result.contains("50000"));
assert!(result.contains("30000"));
assert!(result.contains("total verified insns: 80000"));
assert!(!result.contains("WARNING"));
}
#[test]
fn format_verifier_stats_warning() {
let sc = SidecarResult {
verifier_stats: vec![crate::monitor::bpf_prog::ProgVerifierStats {
name: "heavy".to_string(),
verified_insns: 800000,
}],
..SidecarResult::test_fixture()
};
let result = format_verifier_stats(&[sc]);
assert!(result.contains("WARNING"));
assert!(result.contains("heavy"));
assert!(result.contains("80.0%"));
}
#[test]
fn sidecar_verifier_stats_serde_roundtrip() {
let sc = SidecarResult {
verifier_stats: vec![crate::monitor::bpf_prog::ProgVerifierStats {
name: "init".to_string(),
verified_insns: 5000,
}],
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
assert!(json.contains("verifier_stats"));
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.verifier_stats.len(), 1);
assert_eq!(loaded.verifier_stats[0].name, "init");
assert_eq!(loaded.verifier_stats[0].verified_insns, 5000);
}
#[test]
fn sidecar_verifier_stats_empty_emits_as_empty_array() {
let sc = SidecarResult::test_fixture();
let json = serde_json::to_string(&sc).unwrap();
assert!(
json.contains("\"verifier_stats\":[]"),
"empty verifier_stats must emit as `\"verifier_stats\":[]`: {json}",
);
}
#[test]
fn format_verifier_stats_deduplicates() {
let vstats = vec![crate::monitor::bpf_prog::ProgVerifierStats {
name: "dispatch".to_string(),
verified_insns: 50000,
}];
let sc1 = SidecarResult {
verifier_stats: vstats.clone(),
..SidecarResult::test_fixture()
};
let sc2 = SidecarResult {
verifier_stats: vstats,
..SidecarResult::test_fixture()
};
let result = format_verifier_stats(&[sc1, sc2]);
assert!(result.contains("total verified insns: 50000"));
}
#[test]
fn scheduler_fingerprint_eevdf_empty_extras() {
let entry = KtstrTestEntry {
name: "eevdf_test",
..KtstrTestEntry::DEFAULT
};
let SchedulerFingerprint {
scheduler: name,
scheduler_commit: commit,
sysctls,
kargs,
} = scheduler_fingerprint(&entry);
assert_eq!(name, "eevdf");
assert!(
commit.is_none(),
"Eevdf variant has no userspace binary; \
scheduler_commit must be None. Got: {commit:?}",
);
assert!(sysctls.is_empty());
assert!(kargs.is_empty());
}
#[test]
fn scheduler_fingerprint_formats_sysctls_with_prefix() {
use super::super::entry::Sysctl;
static SYSCTLS: &[Sysctl] = &[
Sysctl::new("kernel.foo", "1"),
Sysctl::new("kernel.bar", "yes"),
];
static SCHED: super::super::entry::Scheduler =
super::super::entry::Scheduler::new("s").sysctls(SYSCTLS);
static SCHED_PAYLOAD: super::super::payload::Payload =
super::super::payload::Payload::from_scheduler(&SCHED);
let entry = KtstrTestEntry {
name: "s_test",
scheduler: &SCHED_PAYLOAD,
..KtstrTestEntry::DEFAULT
};
let SchedulerFingerprint {
scheduler: name,
scheduler_commit: _,
sysctls,
kargs,
} = scheduler_fingerprint(&entry);
assert_eq!(name, "s");
assert_eq!(
sysctls,
vec![
"sysctl.kernel.foo=1".to_string(),
"sysctl.kernel.bar=yes".to_string(),
]
);
assert!(kargs.is_empty());
}
#[test]
fn scheduler_fingerprint_forwards_kargs_verbatim() {
static SCHED: super::super::entry::Scheduler =
super::super::entry::Scheduler::new("s").kargs(&["quiet", "splash"]);
static SCHED_PAYLOAD: super::super::payload::Payload =
super::super::payload::Payload::from_scheduler(&SCHED);
let entry = KtstrTestEntry {
name: "s_test",
scheduler: &SCHED_PAYLOAD,
..KtstrTestEntry::DEFAULT
};
let SchedulerFingerprint {
scheduler: _,
scheduler_commit: _,
sysctls,
kargs,
} = scheduler_fingerprint(&entry);
assert_eq!(kargs, vec!["quiet".to_string(), "splash".to_string()]);
assert!(sysctls.is_empty());
}
#[test]
fn scheduler_fingerprint_uses_display_name_for_discover() {
use super::super::entry::SchedulerSpec;
static SCHED: super::super::entry::Scheduler =
super::super::entry::Scheduler::new("s").binary(SchedulerSpec::Discover("scx_relaxed"));
static SCHED_PAYLOAD: super::super::payload::Payload =
super::super::payload::Payload::from_scheduler(&SCHED);
let entry = KtstrTestEntry {
name: "rel_test",
scheduler: &SCHED_PAYLOAD,
..KtstrTestEntry::DEFAULT
};
let SchedulerFingerprint {
scheduler: name,
scheduler_commit: commit,
sysctls: _,
kargs: _,
} = scheduler_fingerprint(&entry);
assert_eq!(name, "s");
assert!(
commit.is_none(),
"Discover variant currently returns None via \
`SchedulerSpec::scheduler_commit` — \
`resolve_scheduler`'s cascade does not guarantee a \
fresh build, so there is no authoritative source for \
the scheduler binary's commit and `scheduler_commit` \
reports None honestly. Got: {commit:?}",
);
}
#[test]
fn scheduler_fingerprint_binary_payload_has_no_commit() {
static BINARY_PAYLOAD: super::super::payload::Payload =
super::super::payload::Payload::binary("bin_test", "some_binary");
let entry = KtstrTestEntry {
name: "bin_test",
scheduler: &BINARY_PAYLOAD,
..KtstrTestEntry::DEFAULT
};
let SchedulerFingerprint {
scheduler: name,
scheduler_commit: commit,
sysctls,
kargs,
} = scheduler_fingerprint(&entry);
assert_eq!(
name, "kernel_default",
"binary-kind payload must report the intent-level \
scheduler label; got: {name:?}",
);
assert!(
commit.is_none(),
"binary-kind payload has no scheduler binary at all — \
scheduler_commit must be None via the `and_then` \
short-circuit on `scheduler_binary() == None`. Got: \
{commit:?}",
);
assert!(
sysctls.is_empty(),
"binary-kind payload reports no sysctls; got: {sysctls:?}",
);
assert!(
kargs.is_empty(),
"binary-kind payload reports no kargs; got: {kargs:?}",
);
}
#[test]
fn write_skip_sidecar_records_passed_true_skipped_true() {
let _lock = lock_env();
let tmp = std::env::temp_dir().join("ktstr-sidecar-skip-writes-test");
let _ = std::fs::remove_dir_all(&tmp);
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", &tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_sidecar_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let active_flags: Vec<String> = vec!["llc".to_string()];
write_skip_sidecar(&entry, &active_flags).expect("skip sidecar must write");
let path = find_single_sidecar_by_prefix(&tmp, "__skip_sidecar_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.test_name, "__skip_sidecar_test__");
assert!(
loaded.passed,
"skip sidecar must set passed=true so the verdict gate does not flip fail",
);
assert!(
loaded.skipped,
"skip sidecar must set skipped=true so stats tooling excludes from pass count",
);
assert_eq!(
loaded.work_type, "skipped",
"skip path uses the 'skipped' work_type bucket so grouping keeps the skip distinguishable",
);
assert_eq!(loaded.active_flags, active_flags);
let host = loaded
.host
.as_ref()
.expect("write_skip_sidecar must populate host field from collect_host_context");
assert_eq!(host.kernel_name.as_deref(), Some("Linux"));
assert!(
host.kernel_cmdline.is_some(),
"write_skip_sidecar must capture full HostContext, not Default::default()",
);
assert!(
host.kernel_release.is_some(),
"write_skip_sidecar must capture kernel_release (syscall-sourced)",
);
let _ = std::fs::remove_dir_all(&tmp);
}
#[test]
fn write_skip_sidecar_returns_err_when_dir_cannot_be_created() {
let _lock = lock_env();
let blocker = std::env::temp_dir().join("ktstr-sidecar-skip-blocker");
let _ = std::fs::remove_file(&blocker);
let _ = std::fs::remove_dir_all(&blocker);
std::fs::write(&blocker, b"not a dir").unwrap();
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", &blocker);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_sidecar_err_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let result = write_skip_sidecar(&entry, &[]);
assert!(
result.is_err(),
"skip sidecar write must return Err when the target is a regular file",
);
let _ = std::fs::remove_file(&blocker);
}
#[test]
fn sidecar_payload_and_metrics_always_emit_when_empty() {
let sc = SidecarResult::test_fixture();
let json = serde_json::to_string(&sc).unwrap();
assert!(
json.contains("\"payload\":null"),
"empty payload must emit as `\"payload\":null`: {json}",
);
assert!(
json.contains("\"metrics\":[]"),
"empty metrics must emit as `\"metrics\":[]`: {json}",
);
assert!(
json.contains("\"project_commit\":null"),
"absent project_commit must emit as `\"project_commit\":null`, \
not be omitted via `skip_serializing_if`: {json}",
);
assert!(
json.contains("\"kernel_commit\":null"),
"absent kernel_commit must emit as `\"kernel_commit\":null`, \
not be omitted via `skip_serializing_if`: {json}",
);
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
let SidecarResult {
test_name: _,
topology: _,
scheduler: _,
scheduler_commit,
project_commit,
payload,
metrics,
passed: _,
skipped: _,
stats: _,
monitor,
stimulus_events,
work_type: _,
active_flags,
verifier_stats,
kvm_stats,
sysctls,
kargs,
kernel_version,
kernel_commit,
timestamp: _,
run_id: _,
host,
cleanup_duration_ms,
run_source,
} = loaded;
assert!(payload.is_none());
assert!(metrics.is_empty());
assert!(scheduler_commit.is_none());
assert!(project_commit.is_none());
assert!(monitor.is_none());
assert!(stimulus_events.is_empty());
assert!(active_flags.is_empty());
assert!(verifier_stats.is_empty());
assert!(kvm_stats.is_none());
assert!(sysctls.is_empty());
assert!(kargs.is_empty());
assert!(kernel_version.is_none());
assert!(kernel_commit.is_none());
assert!(host.is_none());
assert!(cleanup_duration_ms.is_none());
assert!(
run_source.is_none(),
"absent run_source must round-trip as None, \
matching the symmetric serialize/deserialize \
contract enforced for every other nullable field",
);
}
#[test]
fn sidecar_payload_and_metrics_roundtrip_populated() {
use crate::test_support::{Metric, MetricSource, MetricStream, PayloadMetrics, Polarity};
let pm = PayloadMetrics {
payload_index: 0,
metrics: vec![Metric {
name: "iops".to_string(),
value: 5000.0,
polarity: Polarity::HigherBetter,
unit: "iops".to_string(),
source: MetricSource::Json,
stream: MetricStream::Stdout,
}],
exit_code: 0,
};
let sc = SidecarResult {
test_name: "fio_run".to_string(),
topology: "1n1l2c1t".to_string(),
payload: Some("fio".to_string()),
metrics: vec![pm],
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
assert!(json.contains("\"payload\":\"fio\""));
assert!(json.contains("\"metrics\""));
assert!(json.contains("\"iops\""));
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("fio"));
assert_eq!(loaded.metrics.len(), 1);
assert_eq!(loaded.metrics[0].exit_code, 0);
assert_eq!(loaded.metrics[0].metrics.len(), 1);
assert_eq!(loaded.metrics[0].metrics[0].name, "iops");
assert_eq!(loaded.metrics[0].metrics[0].value, 5000.0);
assert_eq!(
loaded.metrics[0].metrics[0].stream,
MetricStream::Stdout,
"metric stream tag must round-trip through sidecar \
serde; a regression that lost `stream` serialization \
or deserialized it to a different variant would break \
review-tooling's stdout-vs-stderr attribution",
);
}
#[test]
fn write_sidecar_records_entry_payload_name() {
use crate::test_support::{OutputFormat, Payload, PayloadKind};
let _lock = lock_env();
let tmp = std::env::temp_dir().join("ktstr-sidecar-payload-name-test");
let _ = std::fs::remove_dir_all(&tmp);
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", &tmp);
static FIO: Payload = Payload {
name: "fio",
kind: PayloadKind::Binary("fio"),
output: OutputFormat::Json,
default_args: &[],
default_checks: &[],
metrics: &[],
include_files: &[],
uses_parent_pgrp: false,
known_flags: None,
metric_bounds: None,
};
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__payload_name_test__",
func: dummy,
auto_repro: false,
payload: Some(&FIO),
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &[], &[]).unwrap();
let path = find_single_sidecar_by_prefix(&tmp, "__payload_name_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("fio"));
assert!(
loaded.metrics.is_empty(),
"metrics stay empty until a Ctx-level accumulator lands",
);
let _ = std::fs::remove_dir_all(&tmp);
}
#[test]
fn write_sidecar_forwards_payload_metrics_slice() {
use crate::test_support::{Metric, MetricSource, MetricStream, PayloadMetrics, Polarity};
let _lock = lock_env();
let tmp = std::env::temp_dir().join("ktstr-sidecar-metrics-slice-test");
let _ = std::fs::remove_dir_all(&tmp);
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", &tmp);
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__metrics_slice_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
let metrics = vec![
PayloadMetrics {
payload_index: 0,
metrics: vec![Metric {
name: "iops".to_string(),
value: 1200.0,
polarity: Polarity::HigherBetter,
unit: "iops".to_string(),
source: MetricSource::Json,
stream: MetricStream::Stdout,
}],
exit_code: 0,
},
PayloadMetrics {
payload_index: 1,
metrics: vec![],
exit_code: 2,
},
];
write_sidecar(&entry, &vm_result, &[], &ok, "CpuSpin", &[], &metrics).unwrap();
let path = find_single_sidecar_by_prefix(&tmp, "__metrics_slice_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.metrics.len(), 2);
assert_eq!(loaded.metrics[0].exit_code, 0);
assert_eq!(loaded.metrics[0].metrics.len(), 1);
assert_eq!(loaded.metrics[0].metrics[0].name, "iops");
assert_eq!(loaded.metrics[1].exit_code, 2);
assert!(loaded.metrics[1].metrics.is_empty());
let _ = std::fs::remove_dir_all(&tmp);
}
#[test]
fn write_skip_sidecar_records_entry_payload_name() {
use crate::test_support::{OutputFormat, Payload, PayloadKind};
let _lock = lock_env();
let tmp = std::env::temp_dir().join("ktstr-sidecar-skip-payload-test");
let _ = std::fs::remove_dir_all(&tmp);
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", &tmp);
static STRESS: Payload = Payload {
name: "stress-ng",
kind: PayloadKind::Binary("stress-ng"),
output: OutputFormat::ExitCode,
default_args: &[],
default_checks: &[],
metrics: &[],
include_files: &[],
uses_parent_pgrp: false,
known_flags: None,
metric_bounds: None,
};
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_payload_name_test__",
func: dummy,
auto_repro: false,
payload: Some(&STRESS),
..KtstrTestEntry::DEFAULT
};
write_skip_sidecar(&entry, &[]).unwrap();
let path = find_single_sidecar_by_prefix(&tmp, "__skip_payload_name_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("stress-ng"));
assert!(loaded.skipped);
assert!(
loaded.metrics.is_empty(),
"skip path never accumulates metrics"
);
let _ = std::fs::remove_dir_all(&tmp);
}
#[test]
fn sidecar_variant_hash_excludes_host_context() {
use crate::host_context::HostContext;
let populated = HostContext {
cpu_model: Some("Example CPU".to_string()),
cpu_vendor: Some("GenuineExample".to_string()),
total_memory_kb: Some(16_384_000),
hugepages_total: Some(0),
hugepages_free: Some(0),
hugepages_size_kb: Some(2048),
thp_enabled: Some("always [madvise] never".to_string()),
thp_defrag: Some("[always] defer madvise never".to_string()),
sched_tunables: None,
online_cpus: Some(8),
numa_nodes: Some(2),
cpufreq_governor: std::collections::BTreeMap::new(),
kernel_name: Some("Linux".to_string()),
kernel_release: Some("6.11.0".to_string()),
arch: Some("x86_64".to_string()),
kernel_cmdline: Some("preempt=lazy".to_string()),
heap_state: None,
};
let without_host = SidecarResult {
topology: "1n1l2c1t".to_string(),
..SidecarResult::test_fixture()
};
let with_host = SidecarResult {
topology: "1n1l2c1t".to_string(),
host: Some(populated),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_host),
sidecar_variant_hash(&with_host),
"host context must not influence variant hash",
);
}
#[test]
fn sidecar_variant_hash_excludes_scheduler_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
scheduler_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
scheduler_commit: Some("0000000000000000000000000000000000000000".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"scheduler_commit must not influence variant hash — \
runs of the same semantic variant on different \
scheduler-binary builds must remain comparable by \
`stats compare`",
);
}
#[test]
fn sidecar_variant_hash_excludes_project_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abcdef1-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"project_commit must not influence variant hash — \
None vs Some(...) case",
);
let with_commit_a = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_b = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("def5678".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_a),
sidecar_variant_hash(&with_commit_b),
"project_commit must not influence variant hash — \
two distinct populated commits case (catches XOR-style \
regressions where None and one specific Some happen to \
collide)",
);
let with_commit_clean = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_dirty = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_clean),
sidecar_variant_hash(&with_commit_dirty),
"project_commit must not influence variant hash — \
clean vs `-dirty` of the same hex case (catches a \
regression that distinguished only the dirty bit)",
);
}
#[test]
fn sidecar_variant_hash_excludes_kernel_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abcdef1-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"kernel_commit must not influence variant hash — \
None vs Some(...) case",
);
let with_commit_a = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_b = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("def5678".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_a),
sidecar_variant_hash(&with_commit_b),
"kernel_commit must not influence variant hash — \
two distinct populated commits case (catches XOR-style \
regressions where None and one specific Some happen to \
collide)",
);
let with_commit_clean = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_dirty = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_clean),
sidecar_variant_hash(&with_commit_dirty),
"kernel_commit must not influence variant hash — \
clean vs `-dirty` of the same hex case (catches a \
regression that distinguished only the dirty bit)",
);
}
#[test]
fn sidecar_variant_hash_excludes_run_source() {
let none = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: None,
..SidecarResult::test_fixture()
};
let local = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&none),
sidecar_variant_hash(&local),
"run_source must not influence variant hash — None vs \
Some(\"local\") case",
);
let ci = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_CI.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&local),
sidecar_variant_hash(&ci),
"run_source must not influence variant hash — \
Some(\"local\") vs Some(\"ci\") case (catches XOR-style \
regressions where two specific tags happen to collide)",
);
let archive = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_ARCHIVE.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&ci),
sidecar_variant_hash(&archive),
"run_source must not influence variant hash — \
Some(\"ci\") vs Some(\"archive\") case",
);
}
#[test]
fn detect_run_source_routes_on_ktstr_ci_env() {
let _lock = lock_env();
let _restore = EnvVarGuard::remove(KTSTR_CI_ENV);
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
"unset KTSTR_CI must classify as `local`",
);
let _set_empty = EnvVarGuard::set(KTSTR_CI_ENV, std::path::Path::new(""));
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
"empty-string KTSTR_CI must classify as `local` so a \
defensively-cleared variable does not accidentally \
flip the tag",
);
drop(_set_empty);
let _set_one = EnvVarGuard::set(KTSTR_CI_ENV, std::path::Path::new("1"));
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_CI.to_string()),
"non-empty KTSTR_CI must classify as `ci`",
);
}
#[test]
fn apply_archive_source_override_rewrites_every_entry() {
let mut pool = vec![
SidecarResult {
run_source: Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
..SidecarResult::test_fixture()
},
SidecarResult {
run_source: Some(SIDECAR_RUN_SOURCE_CI.to_string()),
..SidecarResult::test_fixture()
},
SidecarResult {
run_source: None,
..SidecarResult::test_fixture()
},
];
apply_archive_source_override(&mut pool);
for sc in &pool {
assert_eq!(
sc.run_source.as_deref(),
Some(SIDECAR_RUN_SOURCE_ARCHIVE),
"every sidecar in a --dir pool must surface as \
archive after override",
);
}
}
#[test]
fn sidecar_result_roundtrip_with_populated_host_context() {
use crate::host_context::HostContext;
let mut tunables = std::collections::BTreeMap::new();
tunables.insert("sched_migration_cost_ns".to_string(), "500000".to_string());
let ctx = HostContext {
cpu_model: Some("Example CPU".to_string()),
cpu_vendor: Some("GenuineExample".to_string()),
total_memory_kb: Some(16_384_000),
hugepages_total: Some(4),
hugepages_free: Some(2),
hugepages_size_kb: Some(2048),
thp_enabled: Some("always [madvise] never".to_string()),
thp_defrag: Some("[always] defer madvise never".to_string()),
sched_tunables: Some(tunables),
online_cpus: Some(8),
numa_nodes: Some(2),
cpufreq_governor: std::collections::BTreeMap::new(),
kernel_name: Some("Linux".to_string()),
kernel_release: Some("6.11.0".to_string()),
arch: Some("x86_64".to_string()),
kernel_cmdline: Some("preempt=lazy isolcpus=1-3".to_string()),
heap_state: Some(crate::host_heap::HostHeapState::test_fixture()),
};
let sc = SidecarResult {
topology: "1n1l2c1t".to_string(),
host: Some(ctx.clone()),
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
let host = loaded.host.expect("host must round-trip");
assert_eq!(host, ctx);
}
#[cfg(target_os = "linux")]
#[test]
fn sidecars_in_a_run_carry_identical_host_context() {
const N: usize = 8;
let samples: Vec<crate::host_context::HostContext> = (0..N)
.map(|_| crate::host_context::collect_host_context())
.collect();
let first = samples
.first()
.expect("N > 0 samples must produce at least one host context");
for (i, s) in samples.iter().enumerate() {
assert_eq!(
s.kernel_name, first.kernel_name,
"sidecar {i}: kernel_name drifted from first sample",
);
assert_eq!(
s.kernel_release, first.kernel_release,
"sidecar {i}: kernel_release drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.arch, first.arch,
"sidecar {i}: arch drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.cpu_model, first.cpu_model,
"sidecar {i}: cpu_model drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.cpu_vendor, first.cpu_vendor,
"sidecar {i}: cpu_vendor drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.total_memory_kb, first.total_memory_kb,
"sidecar {i}: total_memory_kb drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.hugepages_size_kb, first.hugepages_size_kb,
"sidecar {i}: hugepages_size_kb drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.online_cpus, first.online_cpus,
"sidecar {i}: online_cpus drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.numa_nodes, first.numa_nodes,
"sidecar {i}: numa_nodes drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.kernel_cmdline, first.kernel_cmdline,
"sidecar {i}: kernel_cmdline drifted — only a reboot can change it",
);
}
for (i, s) in samples.iter().enumerate() {
assert_eq!(
s.hugepages_total.is_some(),
first.hugepages_total.is_some(),
"sidecar {i}: hugepages_total presence flipped across sidecars",
);
assert_eq!(
s.hugepages_free.is_some(),
first.hugepages_free.is_some(),
"sidecar {i}: hugepages_free presence flipped across sidecars",
);
assert_eq!(
s.thp_enabled.is_some(),
first.thp_enabled.is_some(),
"sidecar {i}: thp_enabled presence flipped across sidecars",
);
assert_eq!(
s.thp_defrag.is_some(),
first.thp_defrag.is_some(),
"sidecar {i}: thp_defrag presence flipped across sidecars",
);
assert_eq!(
s.sched_tunables.is_some(),
first.sched_tunables.is_some(),
"sidecar {i}: sched_tunables presence flipped across sidecars",
);
}
}
fn set_test_author_fallback(repo: &mut gix::Repository) {
use gix::config::tree::gitoxide;
let mut cfg = gix::config::File::new(gix::config::file::Metadata::api());
cfg.set_raw_value(&gitoxide::Author::NAME_FALLBACK, "ktstr-test")
.expect("set author name fallback");
cfg.set_raw_value(
&gitoxide::Author::EMAIL_FALLBACK,
"ktstr-test@example.invalid",
)
.expect("set author email fallback");
let mut snap = repo.config_snapshot_mut();
snap.append(cfg);
}
fn init_clean_repo_with_file(dir: &std::path::Path) -> gix::ObjectId {
let mut repo = gix::init(dir).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let blob_id: gix::ObjectId = repo.write_blob(b"original\n").expect("write blob").detach();
let tree = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id,
}],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let commit_id: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(dir.join("file.txt"), b"original\n").expect("write worktree file");
commit_id
}
#[test]
fn detect_project_commit_clean_repo_returns_short_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
let result = super::detect_commit_at(tmp.path()).expect("clean repo must yield Some");
assert_eq!(
result.len(),
7,
"clean result must be a 7-char hex hash, got {result:?}"
);
assert!(
!result.contains('-'),
"clean result must not carry a -dirty suffix, got {result:?}"
);
assert!(
result.chars().all(|c| c.is_ascii_hexdigit()),
"clean result must be pure hex, got {result:?}"
);
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"clean result must match the HEAD short hash exactly"
);
}
#[test]
fn detect_project_commit_dirty_repo_appends_dirty_suffix() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let result = super::detect_commit_at(tmp.path()).expect("dirty repo must yield Some");
let expected_prefix = head.to_hex_with_len(7).to_string();
assert_eq!(
result,
format!("{expected_prefix}-dirty"),
"dirty result must be {expected_prefix:?} + -dirty suffix"
);
}
#[test]
fn repo_is_dirty_clean_repo_returns_some_false() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
let repo = gix::open(tmp.path()).expect("gix::open clean repo");
assert_eq!(
super::repo_is_dirty(&repo),
Some(false),
"clean repo must yield Some(false)"
);
}
#[test]
fn repo_is_dirty_dirty_worktree_returns_some_true() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let repo = gix::open(tmp.path()).expect("gix::open dirty repo");
assert_eq!(
super::repo_is_dirty(&repo),
Some(true),
"dirty worktree must yield Some(true)"
);
}
#[test]
fn detect_project_commit_non_git_returns_none() {
let tmp = tempfile::TempDir::new_in(std::env::temp_dir()).unwrap();
assert!(
gix::discover(tmp.path()).is_err(),
"tempdir {} must not resolve to any ancestor git repo",
tmp.path().display()
);
let result = super::detect_commit_at(tmp.path());
assert!(
result.is_none(),
"non-git directory must yield None, got {result:?}"
);
}
#[test]
fn detect_project_commit_unborn_head_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let _repo = gix::init(tmp.path()).expect("gix::init");
let result = super::detect_commit_at(tmp.path());
assert!(
result.is_none(),
"unborn HEAD must yield None, got {result:?}"
);
}
#[test]
fn detect_project_commit_concurrent_calls_agree() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
let path = tmp.path();
let baseline =
super::detect_commit_at(path).expect("baseline single-thread call must yield Some");
const THREADS: usize = 8;
let results = std::thread::scope(|scope| {
let mut handles = Vec::with_capacity(THREADS);
for _ in 0..THREADS {
handles.push(scope.spawn(|| super::detect_commit_at(path)));
}
handles
.into_iter()
.map(|h| h.join().expect("thread join"))
.collect::<Vec<_>>()
});
for (i, r) in results.iter().enumerate() {
assert_eq!(
r.as_deref(),
Some(baseline.as_str()),
"thread {i} disagreed with baseline {baseline:?}: got {r:?}"
);
}
}
#[test]
fn detect_project_commit_submodule_uninit_is_clean() {
let tmp = tempfile::TempDir::new().unwrap();
let mut repo = gix::init(tmp.path()).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let gitmodules_content = b"\
[submodule \"submod\"]\n\
\tpath = submod\n\
\turl = https://example.invalid/submod.git\n";
let gitmodules_blob: gix::ObjectId = repo
.write_blob(gitmodules_content)
.expect("write .gitmodules blob")
.detach();
let null_commit_id = gix::ObjectId::null(gix::hash::Kind::Sha1);
let tree = gix::objs::Tree {
entries: vec![
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: ".gitmodules".into(),
oid: gitmodules_blob,
},
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Commit.into(),
filename: "submod".into(),
oid: null_commit_id,
},
],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let head: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join(".gitmodules"), gitmodules_content)
.expect("write .gitmodules worktree");
std::fs::create_dir(tmp.path().join("submod")).expect("create submod dir");
let result =
super::detect_commit_at(tmp.path()).expect("submodule repo must still yield Some");
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"uninitialized submodule must not trigger -dirty suffix"
);
}
#[test]
fn detect_kernel_commit_clean_repo_returns_short_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
let result = super::detect_kernel_commit(tmp.path()).expect("clean repo must yield Some");
assert_eq!(
result.len(),
7,
"clean result must be a 7-char hex hash, got {result:?}"
);
assert!(
!result.contains('-'),
"clean result must not carry a -dirty suffix, got {result:?}"
);
assert!(
result.chars().all(|c| c.is_ascii_hexdigit()),
"clean result must be pure hex, got {result:?}"
);
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"clean result must match the HEAD short hash exactly"
);
}
#[test]
fn detect_kernel_commit_dirty_repo_appends_dirty_suffix() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let result = super::detect_kernel_commit(tmp.path()).expect("dirty repo must yield Some");
let expected_prefix = head.to_hex_with_len(7).to_string();
assert_eq!(
result,
format!("{expected_prefix}-dirty"),
"dirty result must be {expected_prefix:?} + -dirty suffix"
);
}
#[test]
fn detect_kernel_commit_non_git_directory_returns_none() {
let parent = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(parent.path());
let nested = parent.path().join("not_a_repo");
std::fs::create_dir(&nested).expect("create nested non-git subdir");
assert!(
gix::discover(&nested).is_ok(),
"gix::discover must succeed from the nested path (parent IS a repo) — \
this precondition validates that detect_kernel_commit's open-vs-discover \
choice is the correct one for the test scenario",
);
let result = super::detect_kernel_commit(&nested);
assert!(
result.is_none(),
"non-git directory must yield None — `detect_kernel_commit` uses \
`gix::open` (NOT `gix::discover`), so the parent's HEAD must \
NOT leak through. Got {result:?}",
);
}
#[test]
fn detect_kernel_commit_unborn_head_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let _repo = gix::init(tmp.path()).expect("gix::init");
let result = super::detect_kernel_commit(tmp.path());
assert!(
result.is_none(),
"unborn HEAD must yield None, got {result:?}"
);
}
#[test]
fn detect_kernel_commit_submodule_uninit_is_clean() {
let tmp = tempfile::TempDir::new().unwrap();
let mut repo = gix::init(tmp.path()).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let gitmodules_content = b"\
[submodule \"submod\"]\n\
\tpath = submod\n\
\turl = https://example.invalid/submod.git\n";
let gitmodules_blob: gix::ObjectId = repo
.write_blob(gitmodules_content)
.expect("write .gitmodules blob")
.detach();
let null_commit_id = gix::ObjectId::null(gix::hash::Kind::Sha1);
let tree = gix::objs::Tree {
entries: vec![
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: ".gitmodules".into(),
oid: gitmodules_blob,
},
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Commit.into(),
filename: "submod".into(),
oid: null_commit_id,
},
],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let head: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join(".gitmodules"), gitmodules_content)
.expect("write .gitmodules worktree");
std::fs::create_dir(tmp.path().join("submod")).expect("create submod dir");
let result =
super::detect_kernel_commit(tmp.path()).expect("submodule repo must still yield Some");
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"uninitialized submodule must not trigger -dirty suffix"
);
}
#[test]
fn detect_project_commit_memoizes_across_consecutive_calls() {
let first = super::detect_project_commit();
let second = super::detect_project_commit();
assert_eq!(
first, second,
"consecutive detect_project_commit calls must return \
identical Option<String> via the OnceLock cache; \
got first={first:?}, second={second:?}",
);
let third = super::detect_project_commit();
assert_eq!(
first, third,
"third detect_project_commit call must still match the \
first; got first={first:?}, third={third:?}",
);
}
#[test]
fn detect_kernel_commit_memoizes_across_consecutive_calls_same_path() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let head = init_clean_repo_with_file(tmp.path());
let expected = head.to_hex_with_len(7).to_string();
let first = super::detect_kernel_commit(tmp.path());
let second = super::detect_kernel_commit(tmp.path());
let third = super::detect_kernel_commit(tmp.path());
assert_eq!(
first.as_deref(),
Some(expected.as_str()),
"first call must return the clean short hash {expected:?}; \
got {first:?}",
);
assert_eq!(
first, second,
"consecutive detect_kernel_commit calls with the same \
path must agree via the Mutex<HashMap> cache; got \
first={first:?}, second={second:?}",
);
assert_eq!(
first, third,
"third detect_kernel_commit call with the same path must \
still match; got first={first:?}, third={third:?}",
);
}
#[test]
fn detect_kernel_commit_distinct_paths_do_not_cross_contaminate() {
let tmp_a = tempfile::TempDir::new().expect("tempdir A");
let tmp_b = tempfile::TempDir::new().expect("tempdir B");
let head_a = init_clean_repo_with_file(tmp_a.path());
let mut repo_b = gix::init(tmp_b.path()).expect("gix::init B");
let _ = repo_b
.committer_or_set_generic_fallback()
.expect("committer fallback B");
set_test_author_fallback(&mut repo_b);
let blob_id_b: gix::ObjectId = repo_b
.write_blob(b"different\n")
.expect("write blob B")
.detach();
let tree_b = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id_b,
}],
};
let tree_id_b: gix::ObjectId = repo_b.write_object(&tree_b).expect("write tree B").detach();
let head_b: gix::ObjectId = repo_b
.commit(
"HEAD",
"init B",
tree_id_b,
std::iter::empty::<gix::ObjectId>(),
)
.expect("commit B")
.detach();
let mut idx_b = repo_b
.index_from_tree(&tree_id_b)
.expect("index_from_tree B");
idx_b
.write(gix::index::write::Options::default())
.expect("write index B");
std::fs::write(tmp_b.path().join("file.txt"), b"different\n")
.expect("write worktree file B");
let expected_a = head_a.to_hex_with_len(7).to_string();
let expected_b = head_b.to_hex_with_len(7).to_string();
assert_ne!(
expected_a, expected_b,
"fixture precondition: the two repos must have distinct \
HEADs for this test to mean anything; got a={expected_a} \
b={expected_b}",
);
let a1 = super::detect_kernel_commit(tmp_a.path());
let b1 = super::detect_kernel_commit(tmp_b.path());
let a2 = super::detect_kernel_commit(tmp_a.path());
let b2 = super::detect_kernel_commit(tmp_b.path());
assert_eq!(
a1.as_deref(),
Some(expected_a.as_str()),
"first call against path A must return A's HEAD short \
hash {expected_a:?}; got {a1:?}",
);
assert_eq!(
b1.as_deref(),
Some(expected_b.as_str()),
"first call against path B must return B's HEAD short \
hash {expected_b:?}; got {b1:?}",
);
assert_eq!(
a1, a2,
"second call against path A must match the first \
(cache hit on the A entry); got a1={a1:?}, a2={a2:?}",
);
assert_eq!(
b1, b2,
"second call against path B must match the first \
(cache hit on the B entry, NOT contaminated by A); \
got b1={b1:?}, b2={b2:?}",
);
assert_ne!(
a2, b2,
"after interleaved calls, A and B must STILL hold \
distinct values — a regression that lost per-key \
distinction would equate them; got a2={a2:?}, b2={b2:?}",
);
}
#[cfg(unix)]
#[test]
fn detect_kernel_commit_canonicalizes_symlink_aliases() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let real = tmp.path().join("real");
std::fs::create_dir(&real).expect("mkdir real");
let head = init_clean_repo_with_file(&real);
let alias = tmp.path().join("alias");
std::os::unix::fs::symlink(&real, &alias).expect("symlink alias -> real");
let real_clean =
super::detect_kernel_commit(&real).expect("clean canonical-path probe must yield Some");
assert_eq!(
real_clean,
head.to_hex_with_len(7).to_string(),
"fixture precondition: canonical-path probe must return \
the clean 7-char head hash; got {real_clean:?}",
);
std::fs::write(real.join("file.txt"), b"modified-after-prime\n")
.expect("dirty the worktree");
let alias_result =
super::detect_kernel_commit(&alias).expect("alias-path probe must yield Some");
assert!(
!alias_result.ends_with("-dirty"),
"alias call must hit the cached pre-dirt entry — a \
`-dirty` suffix proves the alias bypassed the cache \
and re-probed the now-dirty repo, which is the \
regression this test guards against. got {alias_result:?}",
);
assert_eq!(
alias_result, real_clean,
"alias call must return the EXACT cached clean value \
from the canonical-path probe; got alias={alias_result:?}, \
cached={real_clean:?}",
);
}
fn local_metadata_with_source_tree(
version: &str,
source_tree_path: std::path::PathBuf,
) -> crate::cache::KernelMetadata {
crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Local {
source_tree_path: Some(source_tree_path),
git_hash: None,
},
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some(version.to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()))
}
fn create_fake_image_in(dir: &std::path::Path) -> std::path::PathBuf {
let image = dir.join("bzImage");
std::fs::write(&image, b"fake kernel image").expect("write fake image");
image
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_tarball_key_local_source() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let arch = std::env::consts::ARCH;
let key = format!("6.14.2-tarball-{arch}-kc{}", crate::cache_key_suffix());
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(
resolved.as_deref(),
Some(src.path()),
"tarball-shaped Local entry must resolve via direct lookup",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_falls_back_to_scan_for_local() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(
resolved.as_deref(),
Some(src.path()),
"fallback scan must find a Local entry by version when \
the tarball-shaped lookup misses",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_skips_non_local_in_fallback() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"weird-key-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Tarball,
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()));
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"non-Local entries are transient and must not be returned by the fallback scan; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_skips_mismatched_version_in_fallback() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.13.0", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"Local entry with mismatched version must not be returned; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_cache_key_direct_lookup_local() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::CacheKey(key);
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(resolved.as_deref(), Some(src.path()));
}
#[test]
fn resolve_kernel_source_dir_with_cache_cache_key_non_local_yields_none() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"main-git-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Git {
git_hash: Some("deadbee".to_string()),
git_ref: Some("main".to_string()),
},
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()));
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::CacheKey(key);
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"Git source has no persisted source tree; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_empty_cache_yields_none() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(resolved.is_none());
}
#[test]
fn resolve_kernel_source_dir_path_metadata_local_returns_source_tree() {
use super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let cache_entry = tempfile::TempDir::new().expect("cache entry tempdir");
let src_tree = tempfile::TempDir::new().expect("src tree tempdir");
let meta = local_metadata_with_source_tree("6.14.2", src_tree.path().to_path_buf());
std::fs::write(
cache_entry.path().join("metadata.json"),
serde_json::to_string(&meta).expect("serialize metadata"),
)
.expect("write metadata.json");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", cache_entry.path());
assert_eq!(
super::resolve_kernel_source_dir().as_deref(),
Some(src_tree.path()),
"Path arm must recover source_tree_path from metadata.json \
when the env value points at a cache entry with a Local source",
);
}
#[test]
fn resolve_kernel_source_dir_path_no_metadata_returns_env_value() {
use super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let dir = tempfile::TempDir::new().expect("dir tempdir");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", dir.path());
assert_eq!(
super::resolve_kernel_source_dir().as_deref(),
Some(dir.path()),
"Path arm with no metadata.json must return the env value verbatim",
);
}
#[test]
fn resolve_kernel_source_dir_path_metadata_non_local_falls_through() {
use super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let cache_entry = tempfile::TempDir::new().expect("cache entry tempdir");
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Tarball,
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()));
std::fs::write(
cache_entry.path().join("metadata.json"),
serde_json::to_string(&meta).expect("serialize metadata"),
)
.expect("write metadata.json");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", cache_entry.path());
assert_eq!(
super::resolve_kernel_source_dir().as_deref(),
Some(cache_entry.path()),
"Path arm with non-Local source metadata must fall back \
to the env value verbatim — Tarball entries have no \
persisted source_tree_path to recover",
);
}
}