use super::super::super::test_helpers::{EnvVarGuard, lock_env};
use super::super::*;
use super::find_single_sidecar_by_prefix;
use crate::assert::AssertResult;
use crate::scenario::Ctx;
use anyhow::Result;
#[test]
fn write_skip_sidecar_records_passed_true_skipped_true() {
let _lock = lock_env();
let tmp = tempfile::Builder::new()
.prefix("ktstr-sidecar-skip-writes-test-")
.tempdir()
.expect("create tempdir");
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp.path());
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_sidecar_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let active_flags: Vec<String> = vec!["llc".to_string()];
write_skip_sidecar(&entry, &active_flags).expect("skip sidecar must write");
let path = find_single_sidecar_by_prefix(tmp.path(), "__skip_sidecar_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.test_name, "__skip_sidecar_test__");
assert!(
loaded.passed,
"skip sidecar must set passed=true so the verdict gate does not flip fail",
);
assert!(
loaded.skipped,
"skip sidecar must set skipped=true so stats tooling excludes from pass count",
);
assert_eq!(
loaded.work_type, "skipped",
"skip path uses the 'skipped' work_type bucket so grouping keeps the skip distinguishable",
);
assert_eq!(loaded.active_flags, active_flags);
let host = loaded
.host
.as_ref()
.expect("write_skip_sidecar must populate host field from collect_host_context");
assert_eq!(host.kernel_name.as_deref(), Some("Linux"));
assert!(
host.kernel_cmdline.is_some(),
"write_skip_sidecar must capture full HostContext, not Default::default()",
);
assert!(
host.kernel_release.is_some(),
"write_skip_sidecar must capture kernel_release (syscall-sourced)",
);
}
#[test]
fn write_skip_sidecar_returns_err_when_dir_cannot_be_created() {
let _lock = lock_env();
let blocker = tempfile::Builder::new()
.prefix("ktstr-sidecar-skip-blocker-")
.tempfile()
.expect("create blocker tempfile");
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", blocker.path());
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_sidecar_err_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let result = write_skip_sidecar(&entry, &[]);
assert!(
result.is_err(),
"skip sidecar write must return Err when the target is a regular file",
);
}
#[test]
fn sidecar_payload_and_metrics_always_emit_when_empty() {
let sc = SidecarResult::test_fixture();
let json = serde_json::to_string(&sc).unwrap();
assert!(
json.contains("\"payload\":null"),
"empty payload must emit as `\"payload\":null`: {json}",
);
assert!(
json.contains("\"metrics\":[]"),
"empty metrics must emit as `\"metrics\":[]`: {json}",
);
assert!(
json.contains("\"project_commit\":null"),
"absent project_commit must emit as `\"project_commit\":null`, \
not be omitted via `skip_serializing_if`: {json}",
);
assert!(
json.contains("\"kernel_commit\":null"),
"absent kernel_commit must emit as `\"kernel_commit\":null`, \
not be omitted via `skip_serializing_if`: {json}",
);
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
let SidecarResult {
test_name: _,
topology: _,
scheduler: _,
scheduler_commit,
project_commit,
payload,
metrics,
passed: _,
skipped: _,
stats: _,
monitor,
stimulus_events,
work_type: _,
active_flags,
verifier_stats,
kvm_stats,
sysctls,
kargs,
kernel_version,
kernel_commit,
timestamp: _,
run_id: _,
host,
cleanup_duration_ms,
run_source,
} = loaded;
assert!(payload.is_none());
assert!(metrics.is_empty());
assert!(scheduler_commit.is_none());
assert!(project_commit.is_none());
assert!(monitor.is_none());
assert!(stimulus_events.is_empty());
assert!(active_flags.is_empty());
assert!(verifier_stats.is_empty());
assert!(kvm_stats.is_none());
assert!(sysctls.is_empty());
assert!(kargs.is_empty());
assert!(kernel_version.is_none());
assert!(kernel_commit.is_none());
assert!(host.is_none());
assert!(cleanup_duration_ms.is_none());
assert!(
run_source.is_none(),
"absent run_source must round-trip as None, \
matching the symmetric serialize/deserialize \
contract enforced for every other nullable field",
);
}
#[test]
fn sidecar_payload_and_metrics_roundtrip_populated() {
use crate::test_support::{Metric, MetricSource, MetricStream, PayloadMetrics, Polarity};
let pm = PayloadMetrics {
payload_index: 0,
metrics: vec![Metric {
name: "iops".to_string(),
value: 5000.0,
polarity: Polarity::HigherBetter,
unit: "iops".to_string(),
source: MetricSource::Json,
stream: MetricStream::Stdout,
}],
exit_code: 0,
};
let sc = SidecarResult {
test_name: "fio_run".to_string(),
topology: "1n1l2c1t".to_string(),
payload: Some("fio".to_string()),
metrics: vec![pm],
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
assert!(json.contains("\"payload\":\"fio\""));
assert!(json.contains("\"metrics\""));
assert!(json.contains("\"iops\""));
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("fio"));
assert_eq!(loaded.metrics.len(), 1);
assert_eq!(loaded.metrics[0].exit_code, 0);
assert_eq!(loaded.metrics[0].metrics.len(), 1);
assert_eq!(loaded.metrics[0].metrics[0].name, "iops");
assert_eq!(loaded.metrics[0].metrics[0].value, 5000.0);
assert_eq!(
loaded.metrics[0].metrics[0].stream,
MetricStream::Stdout,
"metric stream tag must round-trip through sidecar \
serde; a regression that lost `stream` serialization \
or deserialized it to a different variant would break \
review-tooling's stdout-vs-stderr attribution",
);
}
#[test]
fn write_sidecar_records_entry_payload_name() {
use crate::test_support::{OutputFormat, Payload, PayloadKind};
let _lock = lock_env();
let tmp = tempfile::Builder::new()
.prefix("ktstr-sidecar-payload-name-test-")
.tempdir()
.expect("create tempdir");
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp.path());
static FIO: Payload = Payload {
name: "fio",
kind: PayloadKind::Binary("fio"),
output: OutputFormat::Json,
default_args: &[],
default_checks: &[],
metrics: &[],
include_files: &[],
uses_parent_pgrp: false,
known_flags: None,
metric_bounds: None,
};
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__payload_name_test__",
func: dummy,
auto_repro: false,
payload: Some(&FIO),
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
write_sidecar(&entry, &vm_result, &[], &ok, "SpinWait", &[], &[]).unwrap();
let path = find_single_sidecar_by_prefix(tmp.path(), "__payload_name_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("fio"));
assert!(
loaded.metrics.is_empty(),
"metrics stay empty until a Ctx-level accumulator lands",
);
}
#[test]
fn write_sidecar_forwards_payload_metrics_slice() {
use crate::test_support::{Metric, MetricSource, MetricStream, PayloadMetrics, Polarity};
let _lock = lock_env();
let tmp = tempfile::Builder::new()
.prefix("ktstr-sidecar-metrics-slice-test-")
.tempdir()
.expect("create tempdir");
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp.path());
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__metrics_slice_test__",
func: dummy,
auto_repro: false,
..KtstrTestEntry::DEFAULT
};
let vm_result = crate::vmm::VmResult::test_fixture();
let ok = AssertResult::pass();
let metrics = vec![
PayloadMetrics {
payload_index: 0,
metrics: vec![Metric {
name: "iops".to_string(),
value: 1200.0,
polarity: Polarity::HigherBetter,
unit: "iops".to_string(),
source: MetricSource::Json,
stream: MetricStream::Stdout,
}],
exit_code: 0,
},
PayloadMetrics {
payload_index: 1,
metrics: vec![],
exit_code: 2,
},
];
write_sidecar(&entry, &vm_result, &[], &ok, "SpinWait", &[], &metrics).unwrap();
let path = find_single_sidecar_by_prefix(tmp.path(), "__metrics_slice_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.metrics.len(), 2);
assert_eq!(loaded.metrics[0].exit_code, 0);
assert_eq!(loaded.metrics[0].metrics.len(), 1);
assert_eq!(loaded.metrics[0].metrics[0].name, "iops");
assert_eq!(loaded.metrics[1].exit_code, 2);
assert!(loaded.metrics[1].metrics.is_empty());
}
#[test]
fn write_skip_sidecar_records_entry_payload_name() {
use crate::test_support::{OutputFormat, Payload, PayloadKind};
let _lock = lock_env();
let tmp = tempfile::Builder::new()
.prefix("ktstr-sidecar-skip-payload-test-")
.tempdir()
.expect("create tempdir");
let _env_sidecar = EnvVarGuard::set("KTSTR_SIDECAR_DIR", tmp.path());
static STRESS: Payload = Payload {
name: "stress-ng",
kind: PayloadKind::Binary("stress-ng"),
output: OutputFormat::ExitCode,
default_args: &[],
default_checks: &[],
metrics: &[],
include_files: &[],
uses_parent_pgrp: false,
known_flags: None,
metric_bounds: None,
};
fn dummy(_ctx: &Ctx) -> Result<AssertResult> {
Ok(AssertResult::pass())
}
let entry = KtstrTestEntry {
name: "__skip_payload_name_test__",
func: dummy,
auto_repro: false,
payload: Some(&STRESS),
..KtstrTestEntry::DEFAULT
};
write_skip_sidecar(&entry, &[]).unwrap();
let path = find_single_sidecar_by_prefix(tmp.path(), "__skip_payload_name_test__-");
let data = std::fs::read_to_string(&path).unwrap();
let loaded: SidecarResult = serde_json::from_str(&data).unwrap();
assert_eq!(loaded.payload.as_deref(), Some("stress-ng"));
assert!(loaded.skipped);
assert!(
loaded.metrics.is_empty(),
"skip path never accumulates metrics"
);
}
#[test]
fn sidecar_variant_hash_excludes_host_context() {
use crate::host_context::HostContext;
let populated = HostContext {
cpu_model: Some("Example CPU".to_string()),
cpu_vendor: Some("GenuineExample".to_string()),
total_memory_kb: Some(16_384_000),
hugepages_total: Some(0),
hugepages_free: Some(0),
hugepages_size_kb: Some(2048),
thp_enabled: Some("always [madvise] never".to_string()),
thp_defrag: Some("[always] defer madvise never".to_string()),
sched_tunables: None,
online_cpus: Some(8),
numa_nodes: Some(2),
cpufreq_governor: std::collections::BTreeMap::new(),
kernel_name: Some("Linux".to_string()),
kernel_release: Some("6.11.0".to_string()),
arch: Some("x86_64".to_string()),
kernel_cmdline: Some("preempt=lazy".to_string()),
heap_state: None,
};
let without_host = SidecarResult {
topology: "1n1l2c1t".to_string(),
..SidecarResult::test_fixture()
};
let with_host = SidecarResult {
topology: "1n1l2c1t".to_string(),
host: Some(populated),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_host),
sidecar_variant_hash(&with_host),
"host context must not influence variant hash",
);
}
#[test]
fn sidecar_variant_hash_excludes_scheduler_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
scheduler_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
scheduler_commit: Some("0000000000000000000000000000000000000000".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"scheduler_commit must not influence variant hash — \
runs of the same semantic variant on different \
scheduler-binary builds must remain comparable by \
`stats compare`",
);
}
#[test]
fn sidecar_variant_hash_excludes_project_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abcdef1-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"project_commit must not influence variant hash — \
None vs Some(...) case",
);
let with_commit_a = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_b = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("def5678".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_a),
sidecar_variant_hash(&with_commit_b),
"project_commit must not influence variant hash — \
two distinct populated commits case (catches XOR-style \
regressions where None and one specific Some happen to \
collide)",
);
let with_commit_clean = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_dirty = SidecarResult {
topology: "1n1l2c1t".to_string(),
project_commit: Some("abc1234-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_clean),
sidecar_variant_hash(&with_commit_dirty),
"project_commit must not influence variant hash — \
clean vs `-dirty` of the same hex case (catches a \
regression that distinguished only the dirty bit)",
);
}
#[test]
fn sidecar_variant_hash_excludes_kernel_commit() {
let without_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: None,
..SidecarResult::test_fixture()
};
let with_commit = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abcdef1-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&without_commit),
sidecar_variant_hash(&with_commit),
"kernel_commit must not influence variant hash — \
None vs Some(...) case",
);
let with_commit_a = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_b = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("def5678".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_a),
sidecar_variant_hash(&with_commit_b),
"kernel_commit must not influence variant hash — \
two distinct populated commits case (catches XOR-style \
regressions where None and one specific Some happen to \
collide)",
);
let with_commit_clean = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234".to_string()),
..SidecarResult::test_fixture()
};
let with_commit_dirty = SidecarResult {
topology: "1n1l2c1t".to_string(),
kernel_commit: Some("abc1234-dirty".to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&with_commit_clean),
sidecar_variant_hash(&with_commit_dirty),
"kernel_commit must not influence variant hash — \
clean vs `-dirty` of the same hex case (catches a \
regression that distinguished only the dirty bit)",
);
}
#[test]
fn sidecar_variant_hash_excludes_run_source() {
let none = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: None,
..SidecarResult::test_fixture()
};
let local = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&none),
sidecar_variant_hash(&local),
"run_source must not influence variant hash — None vs \
Some(\"local\") case",
);
let ci = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_CI.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&local),
sidecar_variant_hash(&ci),
"run_source must not influence variant hash — \
Some(\"local\") vs Some(\"ci\") case (catches XOR-style \
regressions where two specific tags happen to collide)",
);
let archive = SidecarResult {
topology: "1n1l2c1t".to_string(),
run_source: Some(SIDECAR_RUN_SOURCE_ARCHIVE.to_string()),
..SidecarResult::test_fixture()
};
assert_eq!(
sidecar_variant_hash(&ci),
sidecar_variant_hash(&archive),
"run_source must not influence variant hash — \
Some(\"ci\") vs Some(\"archive\") case",
);
}
#[test]
fn detect_run_source_routes_on_ktstr_ci_env() {
let _lock = lock_env();
let _restore = EnvVarGuard::remove(KTSTR_CI_ENV);
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
"unset KTSTR_CI must classify as `local`",
);
let _set_empty = EnvVarGuard::set(KTSTR_CI_ENV, std::path::Path::new(""));
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
"empty-string KTSTR_CI must classify as `local` so a \
defensively-cleared variable does not accidentally \
flip the tag",
);
drop(_set_empty);
let _set_one = EnvVarGuard::set(KTSTR_CI_ENV, std::path::Path::new("1"));
assert_eq!(
detect_run_source(),
Some(SIDECAR_RUN_SOURCE_CI.to_string()),
"non-empty KTSTR_CI must classify as `ci`",
);
}
#[test]
fn apply_archive_source_override_rewrites_every_entry() {
let mut pool = vec![
SidecarResult {
run_source: Some(SIDECAR_RUN_SOURCE_LOCAL.to_string()),
..SidecarResult::test_fixture()
},
SidecarResult {
run_source: Some(SIDECAR_RUN_SOURCE_CI.to_string()),
..SidecarResult::test_fixture()
},
SidecarResult {
run_source: None,
..SidecarResult::test_fixture()
},
];
apply_archive_source_override(&mut pool);
for sc in &pool {
assert_eq!(
sc.run_source.as_deref(),
Some(SIDECAR_RUN_SOURCE_ARCHIVE),
"every sidecar in a --dir pool must surface as \
archive after override",
);
}
}
#[test]
fn sidecar_result_roundtrip_with_populated_host_context() {
use crate::host_context::HostContext;
let mut tunables = std::collections::BTreeMap::new();
tunables.insert("sched_migration_cost_ns".to_string(), "500000".to_string());
let ctx = HostContext {
cpu_model: Some("Example CPU".to_string()),
cpu_vendor: Some("GenuineExample".to_string()),
total_memory_kb: Some(16_384_000),
hugepages_total: Some(4),
hugepages_free: Some(2),
hugepages_size_kb: Some(2048),
thp_enabled: Some("always [madvise] never".to_string()),
thp_defrag: Some("[always] defer madvise never".to_string()),
sched_tunables: Some(tunables),
online_cpus: Some(8),
numa_nodes: Some(2),
cpufreq_governor: std::collections::BTreeMap::new(),
kernel_name: Some("Linux".to_string()),
kernel_release: Some("6.11.0".to_string()),
arch: Some("x86_64".to_string()),
kernel_cmdline: Some("preempt=lazy isolcpus=1-3".to_string()),
heap_state: Some(crate::host_heap::HostHeapState::test_fixture()),
};
let sc = SidecarResult {
topology: "1n1l2c1t".to_string(),
host: Some(ctx.clone()),
..SidecarResult::test_fixture()
};
let json = serde_json::to_string(&sc).unwrap();
let loaded: SidecarResult = serde_json::from_str(&json).unwrap();
let host = loaded.host.expect("host must round-trip");
assert_eq!(host, ctx);
}
#[cfg(target_os = "linux")]
#[test]
fn sidecars_in_a_run_carry_identical_host_context() {
const N: usize = 8;
let samples: Vec<crate::host_context::HostContext> = (0..N)
.map(|_| crate::host_context::collect_host_context())
.collect();
let first = samples
.first()
.expect("N > 0 samples must produce at least one host context");
for (i, s) in samples.iter().enumerate() {
assert_eq!(
s.kernel_name, first.kernel_name,
"sidecar {i}: kernel_name drifted from first sample",
);
assert_eq!(
s.kernel_release, first.kernel_release,
"sidecar {i}: kernel_release drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.arch, first.arch,
"sidecar {i}: arch drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.cpu_model, first.cpu_model,
"sidecar {i}: cpu_model drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.cpu_vendor, first.cpu_vendor,
"sidecar {i}: cpu_vendor drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.total_memory_kb, first.total_memory_kb,
"sidecar {i}: total_memory_kb drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.hugepages_size_kb, first.hugepages_size_kb,
"sidecar {i}: hugepages_size_kb drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.online_cpus, first.online_cpus,
"sidecar {i}: online_cpus drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.numa_nodes, first.numa_nodes,
"sidecar {i}: numa_nodes drifted — STATIC_HOST_INFO cache broken?",
);
assert_eq!(
s.kernel_cmdline, first.kernel_cmdline,
"sidecar {i}: kernel_cmdline drifted — only a reboot can change it",
);
}
for (i, s) in samples.iter().enumerate() {
assert_eq!(
s.hugepages_total.is_some(),
first.hugepages_total.is_some(),
"sidecar {i}: hugepages_total presence flipped across sidecars",
);
assert_eq!(
s.hugepages_free.is_some(),
first.hugepages_free.is_some(),
"sidecar {i}: hugepages_free presence flipped across sidecars",
);
assert_eq!(
s.thp_enabled.is_some(),
first.thp_enabled.is_some(),
"sidecar {i}: thp_enabled presence flipped across sidecars",
);
assert_eq!(
s.thp_defrag.is_some(),
first.thp_defrag.is_some(),
"sidecar {i}: thp_defrag presence flipped across sidecars",
);
assert_eq!(
s.sched_tunables.is_some(),
first.sched_tunables.is_some(),
"sidecar {i}: sched_tunables presence flipped across sidecars",
);
}
}
fn set_test_author_fallback(repo: &mut gix::Repository) {
use gix::config::tree::gitoxide;
let mut cfg = gix::config::File::new(gix::config::file::Metadata::api());
cfg.set_raw_value(&gitoxide::Author::NAME_FALLBACK, "ktstr-test")
.expect("set author name fallback");
cfg.set_raw_value(
&gitoxide::Author::EMAIL_FALLBACK,
"ktstr-test@example.invalid",
)
.expect("set author email fallback");
let mut snap = repo.config_snapshot_mut();
snap.append(cfg);
}
fn init_clean_repo_with_file(dir: &std::path::Path) -> gix::ObjectId {
let mut repo = gix::init(dir).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let blob_id: gix::ObjectId = repo.write_blob(b"original\n").expect("write blob").detach();
let tree = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id,
}],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let commit_id: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(dir.join("file.txt"), b"original\n").expect("write worktree file");
commit_id
}
#[test]
fn detect_project_commit_clean_repo_returns_short_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
let result = super::super::detect_commit_at(tmp.path()).expect("clean repo must yield Some");
assert_eq!(
result.len(),
7,
"clean result must be a 7-char hex hash, got {result:?}"
);
assert!(
!result.contains('-'),
"clean result must not carry a -dirty suffix, got {result:?}"
);
assert!(
result.chars().all(|c| c.is_ascii_hexdigit()),
"clean result must be pure hex, got {result:?}"
);
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"clean result must match the HEAD short hash exactly"
);
}
#[test]
fn detect_project_commit_dirty_repo_appends_dirty_suffix() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let result = super::super::detect_commit_at(tmp.path()).expect("dirty repo must yield Some");
let expected_prefix = head.to_hex_with_len(7).to_string();
assert_eq!(
result,
format!("{expected_prefix}-dirty"),
"dirty result must be {expected_prefix:?} + -dirty suffix"
);
}
#[test]
fn repo_is_dirty_clean_repo_returns_some_false() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
let repo = gix::open(tmp.path()).expect("gix::open clean repo");
assert_eq!(
super::super::repo_is_dirty(&repo),
Some(false),
"clean repo must yield Some(false)"
);
}
#[test]
fn repo_is_dirty_dirty_worktree_returns_some_true() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let repo = gix::open(tmp.path()).expect("gix::open dirty repo");
assert_eq!(
super::super::repo_is_dirty(&repo),
Some(true),
"dirty worktree must yield Some(true)"
);
}
#[test]
fn detect_project_commit_non_git_returns_none() {
let tmp = tempfile::TempDir::new_in(std::env::temp_dir()).unwrap();
if super::super::super::test_helpers::tempdir_resolves_to_ancestor_git(tmp.path()) {
skip!(
"tempdir {} resolves to an ancestor git repo; cannot pin \
non-git path semantics in this environment",
tmp.path().display()
);
}
let result = super::super::detect_commit_at(tmp.path());
assert!(
result.is_none(),
"non-git directory must yield None, got {result:?}"
);
}
#[test]
fn detect_project_commit_unborn_head_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let _repo = gix::init(tmp.path()).expect("gix::init");
let result = super::super::detect_commit_at(tmp.path());
assert!(
result.is_none(),
"unborn HEAD must yield None, got {result:?}"
);
}
#[test]
fn detect_project_commit_concurrent_calls_agree() {
let tmp = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(tmp.path());
let path = tmp.path();
let baseline =
super::super::detect_commit_at(path).expect("baseline single-thread call must yield Some");
const THREADS: usize = 8;
let results = std::thread::scope(|scope| {
let mut handles = Vec::with_capacity(THREADS);
for _ in 0..THREADS {
handles.push(scope.spawn(|| super::super::detect_commit_at(path)));
}
handles
.into_iter()
.map(|h| h.join().expect("thread join"))
.collect::<Vec<_>>()
});
for (i, r) in results.iter().enumerate() {
assert_eq!(
r.as_deref(),
Some(baseline.as_str()),
"thread {i} disagreed with baseline {baseline:?}: got {r:?}"
);
}
}
#[test]
fn detect_project_commit_submodule_uninit_is_clean() {
let tmp = tempfile::TempDir::new().unwrap();
let mut repo = gix::init(tmp.path()).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let gitmodules_content = b"\
[submodule \"submod\"]\n\
\tpath = submod\n\
\turl = https://example.invalid/submod.git\n";
let gitmodules_blob: gix::ObjectId = repo
.write_blob(gitmodules_content)
.expect("write .gitmodules blob")
.detach();
let null_commit_id = gix::ObjectId::null(gix::hash::Kind::Sha1);
let tree = gix::objs::Tree {
entries: vec![
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: ".gitmodules".into(),
oid: gitmodules_blob,
},
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Commit.into(),
filename: "submod".into(),
oid: null_commit_id,
},
],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let head: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join(".gitmodules"), gitmodules_content)
.expect("write .gitmodules worktree");
std::fs::create_dir(tmp.path().join("submod")).expect("create submod dir");
let result =
super::super::detect_commit_at(tmp.path()).expect("submodule repo must still yield Some");
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"uninitialized submodule must not trigger -dirty suffix"
);
}
#[test]
fn detect_kernel_commit_clean_repo_returns_short_hash() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
let result =
super::super::detect_kernel_commit(tmp.path()).expect("clean repo must yield Some");
assert_eq!(
result.len(),
7,
"clean result must be a 7-char hex hash, got {result:?}"
);
assert!(
!result.contains('-'),
"clean result must not carry a -dirty suffix, got {result:?}"
);
assert!(
result.chars().all(|c| c.is_ascii_hexdigit()),
"clean result must be pure hex, got {result:?}"
);
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"clean result must match the HEAD short hash exactly"
);
}
#[test]
fn detect_kernel_commit_dirty_repo_appends_dirty_suffix() {
let tmp = tempfile::TempDir::new().unwrap();
let head = init_clean_repo_with_file(tmp.path());
std::fs::write(tmp.path().join("file.txt"), b"modified\n").unwrap();
let result =
super::super::detect_kernel_commit(tmp.path()).expect("dirty repo must yield Some");
let expected_prefix = head.to_hex_with_len(7).to_string();
assert_eq!(
result,
format!("{expected_prefix}-dirty"),
"dirty result must be {expected_prefix:?} + -dirty suffix"
);
}
#[test]
fn detect_kernel_commit_non_git_directory_returns_none() {
let parent = tempfile::TempDir::new().unwrap();
init_clean_repo_with_file(parent.path());
let nested = parent.path().join("not_a_repo");
std::fs::create_dir(&nested).expect("create nested non-git subdir");
assert!(
gix::discover(&nested).is_ok(),
"gix::discover must succeed from the nested path (parent IS a repo) — \
this precondition validates that detect_kernel_commit's open-vs-discover \
choice is the correct one for the test scenario",
);
let result = super::super::detect_kernel_commit(&nested);
assert!(
result.is_none(),
"non-git directory must yield None — `detect_kernel_commit` uses \
`gix::open` (NOT `gix::discover`), so the parent's HEAD must \
NOT leak through. Got {result:?}",
);
}
#[test]
fn detect_kernel_commit_unborn_head_returns_none() {
let tmp = tempfile::TempDir::new().unwrap();
let _repo = gix::init(tmp.path()).expect("gix::init");
let result = super::super::detect_kernel_commit(tmp.path());
assert!(
result.is_none(),
"unborn HEAD must yield None, got {result:?}"
);
}
#[test]
fn detect_kernel_commit_submodule_uninit_is_clean() {
let tmp = tempfile::TempDir::new().unwrap();
let mut repo = gix::init(tmp.path()).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let gitmodules_content = b"\
[submodule \"submod\"]\n\
\tpath = submod\n\
\turl = https://example.invalid/submod.git\n";
let gitmodules_blob: gix::ObjectId = repo
.write_blob(gitmodules_content)
.expect("write .gitmodules blob")
.detach();
let null_commit_id = gix::ObjectId::null(gix::hash::Kind::Sha1);
let tree = gix::objs::Tree {
entries: vec![
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: ".gitmodules".into(),
oid: gitmodules_blob,
},
gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Commit.into(),
filename: "submod".into(),
oid: null_commit_id,
},
],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let head: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join(".gitmodules"), gitmodules_content)
.expect("write .gitmodules worktree");
std::fs::create_dir(tmp.path().join("submod")).expect("create submod dir");
let result = super::super::detect_kernel_commit(tmp.path())
.expect("submodule repo must still yield Some");
assert_eq!(
result,
head.to_hex_with_len(7).to_string(),
"uninitialized submodule must not trigger -dirty suffix"
);
}
#[test]
fn detect_project_commit_memoizes_across_consecutive_calls() {
let first = super::super::detect_project_commit();
let second = super::super::detect_project_commit();
assert_eq!(
first, second,
"consecutive detect_project_commit calls must return \
identical Option<String> via the OnceLock cache; \
got first={first:?}, second={second:?}",
);
let third = super::super::detect_project_commit();
assert_eq!(
first, third,
"third detect_project_commit call must still match the \
first; got first={first:?}, third={third:?}",
);
}
#[test]
fn detect_kernel_commit_memoizes_across_consecutive_calls_same_path() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let head = init_clean_repo_with_file(tmp.path());
let expected = head.to_hex_with_len(7).to_string();
let first = super::super::detect_kernel_commit(tmp.path());
let second = super::super::detect_kernel_commit(tmp.path());
let third = super::super::detect_kernel_commit(tmp.path());
assert_eq!(
first.as_deref(),
Some(expected.as_str()),
"first call must return the clean short hash {expected:?}; \
got {first:?}",
);
assert_eq!(
first, second,
"consecutive detect_kernel_commit calls with the same \
path must agree via the Mutex<HashMap> cache; got \
first={first:?}, second={second:?}",
);
assert_eq!(
first, third,
"third detect_kernel_commit call with the same path must \
still match; got first={first:?}, third={third:?}",
);
}
#[test]
fn detect_kernel_commit_distinct_paths_do_not_cross_contaminate() {
let tmp_a = tempfile::TempDir::new().expect("tempdir A");
let tmp_b = tempfile::TempDir::new().expect("tempdir B");
let head_a = init_clean_repo_with_file(tmp_a.path());
let mut repo_b = gix::init(tmp_b.path()).expect("gix::init B");
let _ = repo_b
.committer_or_set_generic_fallback()
.expect("committer fallback B");
set_test_author_fallback(&mut repo_b);
let blob_id_b: gix::ObjectId = repo_b
.write_blob(b"different\n")
.expect("write blob B")
.detach();
let tree_b = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id_b,
}],
};
let tree_id_b: gix::ObjectId = repo_b.write_object(&tree_b).expect("write tree B").detach();
let head_b: gix::ObjectId = repo_b
.commit(
"HEAD",
"init B",
tree_id_b,
std::iter::empty::<gix::ObjectId>(),
)
.expect("commit B")
.detach();
let mut idx_b = repo_b
.index_from_tree(&tree_id_b)
.expect("index_from_tree B");
idx_b
.write(gix::index::write::Options::default())
.expect("write index B");
std::fs::write(tmp_b.path().join("file.txt"), b"different\n").expect("write worktree file B");
let expected_a = head_a.to_hex_with_len(7).to_string();
let expected_b = head_b.to_hex_with_len(7).to_string();
assert_ne!(
expected_a, expected_b,
"fixture precondition: the two repos must have distinct \
HEADs for this test to mean anything; got a={expected_a} \
b={expected_b}",
);
let a1 = super::super::detect_kernel_commit(tmp_a.path());
let b1 = super::super::detect_kernel_commit(tmp_b.path());
let a2 = super::super::detect_kernel_commit(tmp_a.path());
let b2 = super::super::detect_kernel_commit(tmp_b.path());
assert_eq!(
a1.as_deref(),
Some(expected_a.as_str()),
"first call against path A must return A's HEAD short \
hash {expected_a:?}; got {a1:?}",
);
assert_eq!(
b1.as_deref(),
Some(expected_b.as_str()),
"first call against path B must return B's HEAD short \
hash {expected_b:?}; got {b1:?}",
);
assert_eq!(
a1, a2,
"second call against path A must match the first \
(cache hit on the A entry); got a1={a1:?}, a2={a2:?}",
);
assert_eq!(
b1, b2,
"second call against path B must match the first \
(cache hit on the B entry, NOT contaminated by A); \
got b1={b1:?}, b2={b2:?}",
);
assert_ne!(
a2, b2,
"after interleaved calls, A and B must STILL hold \
distinct values — a regression that lost per-key \
distinction would equate them; got a2={a2:?}, b2={b2:?}",
);
}
#[test]
fn detect_kernel_commit_failure_does_not_poison_cache() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let mut repo = gix::init(tmp.path()).expect("gix::init");
let _ = repo
.committer_or_set_generic_fallback()
.expect("committer fallback");
set_test_author_fallback(&mut repo);
let first = super::super::detect_kernel_commit(tmp.path());
assert!(
first.is_none(),
"fixture precondition: unborn HEAD probe must return \
None; got {first:?}",
);
let blob_id: gix::ObjectId = repo.write_blob(b"original\n").expect("write blob").detach();
let tree = gix::objs::Tree {
entries: vec![gix::objs::tree::Entry {
mode: gix::objs::tree::EntryKind::Blob.into(),
filename: "file.txt".into(),
oid: blob_id,
}],
};
let tree_id: gix::ObjectId = repo.write_object(&tree).expect("write tree").detach();
let head: gix::ObjectId = repo
.commit("HEAD", "init", tree_id, std::iter::empty::<gix::ObjectId>())
.expect("commit")
.detach();
let mut idx = repo.index_from_tree(&tree_id).expect("index_from_tree");
idx.write(gix::index::write::Options::default())
.expect("write index");
std::fs::write(tmp.path().join("file.txt"), b"original\n").expect("write worktree file");
let second = super::super::detect_kernel_commit(tmp.path());
let expected = head.to_hex_with_len(7).to_string();
assert_eq!(
second.as_deref(),
Some(expected.as_str()),
"second call after the path becomes a valid checkout \
must return the resolved short hash {expected:?} — a \
regression that cached the first None would surface as \
None here, locking the path at `unknown` for the rest \
of the process. got {second:?}",
);
}
#[cfg(unix)]
#[test]
fn detect_kernel_commit_canonicalizes_symlink_aliases() {
let tmp = tempfile::TempDir::new().expect("tempdir");
let real = tmp.path().join("real");
std::fs::create_dir(&real).expect("mkdir real");
let head = init_clean_repo_with_file(&real);
let alias = tmp.path().join("alias");
std::os::unix::fs::symlink(&real, &alias).expect("symlink alias -> real");
let real_clean = super::super::detect_kernel_commit(&real)
.expect("clean canonical-path probe must yield Some");
assert_eq!(
real_clean,
head.to_hex_with_len(7).to_string(),
"fixture precondition: canonical-path probe must return \
the clean 7-char head hash; got {real_clean:?}",
);
std::fs::write(real.join("file.txt"), b"modified-after-prime\n").expect("dirty the worktree");
let alias_result =
super::super::detect_kernel_commit(&alias).expect("alias-path probe must yield Some");
assert!(
!alias_result.ends_with("-dirty"),
"alias call must hit the cached pre-dirt entry — a \
`-dirty` suffix proves the alias bypassed the cache \
and re-probed the now-dirty repo, which is the \
regression this test guards against. got {alias_result:?}",
);
assert_eq!(
alias_result, real_clean,
"alias call must return the EXACT cached clean value \
from the canonical-path probe; got alias={alias_result:?}, \
cached={real_clean:?}",
);
}
fn local_metadata_with_source_tree(
version: &str,
source_tree_path: std::path::PathBuf,
) -> crate::cache::KernelMetadata {
crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Local {
source_tree_path: Some(source_tree_path),
git_hash: None,
},
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some(version.to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()))
}
fn create_fake_image_in(dir: &std::path::Path) -> std::path::PathBuf {
let image = dir.join("bzImage");
std::fs::write(&image, b"fake kernel image").expect("write fake image");
image
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_tarball_key_local_source() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let arch = std::env::consts::ARCH;
let key = format!("6.14.2-tarball-{arch}-kc{}", crate::cache_key_suffix());
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(
resolved.as_deref(),
Some(src.path()),
"tarball-shaped Local entry must resolve via direct lookup",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_falls_back_to_scan_for_local() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(
resolved.as_deref(),
Some(src.path()),
"fallback scan must find a Local entry by version when \
the tarball-shaped lookup misses",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_skips_non_local_in_fallback() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"weird-key-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Tarball,
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()));
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"non-Local entries are transient and must not be returned by the fallback scan; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_skips_mismatched_version_in_fallback() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.13.0", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"Local entry with mismatched version must not be returned; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_cache_key_direct_lookup_local() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let src = tempfile::TempDir::new().expect("src tempdir");
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"local-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = local_metadata_with_source_tree("6.14.2", src.path().to_path_buf());
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::CacheKey(key);
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert_eq!(resolved.as_deref(), Some(src.path()));
}
#[test]
fn resolve_kernel_source_dir_with_cache_cache_key_non_local_yields_none() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let image_dir = tempfile::TempDir::new().expect("image tempdir");
let image = create_fake_image_in(image_dir.path());
let key = format!(
"main-git-deadbee-{arch}-kc{suffix}",
arch = std::env::consts::ARCH,
suffix = crate::cache_key_suffix(),
);
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Git {
git_hash: Some("deadbee".to_string()),
git_ref: Some("main".to_string()),
},
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()))
.with_config_hash(Some("abc123".to_string()))
.with_ktstr_kconfig_hash(Some("def456".to_string()));
cache
.store(&key, &crate::cache::CacheArtifacts::new(&image), &meta)
.expect("store cache entry");
let id = crate::kernel_path::KernelId::CacheKey(key);
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(
resolved.is_none(),
"Git source has no persisted source tree; got {resolved:?}",
);
}
#[test]
fn resolve_kernel_source_dir_with_cache_version_empty_cache_yields_none() {
let cache_root = tempfile::TempDir::new().expect("cache tempdir");
let cache = crate::cache::CacheDir::with_root(cache_root.path().to_path_buf());
let id = crate::kernel_path::KernelId::Version("6.14.2".to_string());
let resolved = super::super::resolve_kernel_source_dir_with_cache(&id, &cache);
assert!(resolved.is_none());
}
#[test]
fn resolve_kernel_source_dir_path_metadata_local_returns_source_tree() {
use super::super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let cache_entry = tempfile::TempDir::new().expect("cache entry tempdir");
let src_tree = tempfile::TempDir::new().expect("src tree tempdir");
let meta = local_metadata_with_source_tree("6.14.2", src_tree.path().to_path_buf());
std::fs::write(
cache_entry.path().join("metadata.json"),
serde_json::to_string(&meta).expect("serialize metadata"),
)
.expect("write metadata.json");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", cache_entry.path());
assert_eq!(
super::super::resolve_kernel_source_dir().as_deref(),
Some(src_tree.path()),
"Path arm must recover source_tree_path from metadata.json \
when the env value points at a cache entry with a Local source",
);
}
#[test]
fn resolve_kernel_source_dir_path_no_metadata_returns_env_value() {
use super::super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let dir = tempfile::TempDir::new().expect("dir tempdir");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", dir.path());
assert_eq!(
super::super::resolve_kernel_source_dir().as_deref(),
Some(dir.path()),
"Path arm with no metadata.json must return the env value verbatim",
);
}
#[test]
fn resolve_kernel_source_dir_path_metadata_non_local_falls_through() {
use super::super::super::test_helpers::{EnvVarGuard, lock_env};
let _lock = lock_env();
let cache_entry = tempfile::TempDir::new().expect("cache entry tempdir");
let meta = crate::cache::KernelMetadata::new(
crate::cache::KernelSource::Tarball,
std::env::consts::ARCH.to_string(),
"bzImage".to_string(),
"2026-04-26T00:00:00Z".to_string(),
)
.with_version(Some("6.14.2".to_string()));
std::fs::write(
cache_entry.path().join("metadata.json"),
serde_json::to_string(&meta).expect("serialize metadata"),
)
.expect("write metadata.json");
let _guard = EnvVarGuard::set("KTSTR_KERNEL", cache_entry.path());
assert_eq!(
super::super::resolve_kernel_source_dir().as_deref(),
Some(cache_entry.path()),
"Path arm with non-Local source metadata must fall back \
to the env value verbatim — Tarball entries have no \
persisted source_tree_path to recover",
);
}