use super::*;
#[test]
fn parse_rq_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
assert_ne!(
offsets.rq_nr_running, offsets.rq_clock,
"rq_nr_running and rq_clock offsets must be distinct"
);
assert!(offsets.rq_clock > 0);
assert!(offsets.rq_scx > 0);
assert!(offsets.dsq_nr > 0);
}
#[test]
fn parse_event_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(ev) = &offsets.event_offsets {
let mut all = vec![
ev.ev_select_cpu_fallback,
ev.ev_dispatch_local_dsq_offline,
ev.ev_dispatch_keep_last,
ev.ev_enq_skip_exiting,
ev.ev_enq_skip_migration_disabled,
];
for off in [
ev.ev_reenq_immed,
ev.ev_reenq_local_repeat,
ev.ev_refill_slice_dfl,
ev.ev_bypass_duration,
ev.ev_bypass_dispatch,
ev.ev_bypass_activate,
ev.ev_insert_not_owned,
ev.ev_sub_bypass_dispatch,
]
.into_iter()
.flatten()
{
all.push(off);
}
for i in 0..all.len() {
for j in (i + 1)..all.len() {
assert_ne!(all[i], all[j], "event counter offsets must be distinct");
}
}
}
}
#[test]
fn parse_schedstat_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(ss) = &offsets.schedstat_offsets {
assert!(ss.rq_sched_info > 0);
assert!(
ss.sched_info_run_delay > 0,
"run_delay must follow pcount in struct sched_info"
);
assert_ne!(
ss.sched_info_pcount, ss.sched_info_run_delay,
"pcount and run_delay offsets must be distinct"
);
let rq_fields = [
ss.rq_yld_count,
ss.rq_sched_count,
ss.rq_sched_goidle,
ss.rq_ttwu_count,
ss.rq_ttwu_local,
];
for &off in &rq_fields {
assert!(off > 0, "schedstat rq field offset must be nonzero");
}
for i in 0..rq_fields.len() {
for j in (i + 1)..rq_fields.len() {
assert_ne!(
rq_fields[i], rq_fields[j],
"schedstat rq field offsets must be distinct"
);
}
}
}
}
#[test]
fn parse_sched_domain_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(sd) = &offsets.sched_domain_offsets {
assert!(sd.rq_sd > 0, "rq.sd must be at nonzero offset");
assert_ne!(
sd.sd_level, sd.sd_parent,
"level and parent offsets must be distinct"
);
assert_ne!(
sd.sd_name, sd.sd_parent,
"name and parent offsets must be distinct"
);
let always_present = [
sd.sd_balance_interval,
sd.sd_nr_balance_failed,
sd.sd_max_newidle_lb_cost,
];
for &off in &always_present {
assert!(off > 0, "sched_domain runtime field offset must be nonzero");
}
for off in [
sd.sd_newidle_call,
sd.sd_newidle_success,
sd.sd_newidle_ratio,
]
.into_iter()
.flatten()
{
assert!(
off > 0,
"optional newidle field offset must be nonzero when present"
);
}
if let Some(so) = &sd.stats_offsets {
let array_fields = [
so.sd_lb_count,
so.sd_lb_failed,
so.sd_lb_balanced,
so.sd_lb_imbalance_load,
so.sd_lb_imbalance_util,
so.sd_lb_imbalance_task,
so.sd_lb_imbalance_misfit,
so.sd_lb_gained,
so.sd_lb_hot_gained,
so.sd_lb_nobusyg,
so.sd_lb_nobusyq,
];
for i in 0..array_fields.len() {
for j in (i + 1)..array_fields.len() {
assert_ne!(
array_fields[i], array_fields[j],
"sched_domain array field offsets must be distinct"
);
}
}
let scalar_fields = [
so.sd_alb_count,
so.sd_alb_failed,
so.sd_alb_pushed,
so.sd_ttwu_wake_remote,
so.sd_ttwu_move_affine,
so.sd_ttwu_move_balance,
];
for &off in &scalar_fields {
assert!(off > 0, "sched_domain scalar field offset must be nonzero");
}
for i in 0..scalar_fields.len() {
for j in (i + 1)..scalar_fields.len() {
assert_ne!(
scalar_fields[i], scalar_fields[j],
"sched_domain scalar field offsets must be distinct"
);
}
}
}
}
}
#[test]
fn parse_bpf_map_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
assert!(offsets.map_name > 0);
assert!(offsets.map_type > 0);
assert!(offsets.value_size > 0);
assert!(offsets.array_value > 0);
assert!(offsets.map_btf > 0);
assert!(offsets.map_btf_value_type_id > 0);
assert!(offsets.btf_data_size > offsets.btf_data);
}
#[test]
fn parse_bpf_prog_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_prog_offsets(&path);
assert!(offsets.prog_aux > 0);
assert!(offsets.aux_verified_insns > 0);
assert!(offsets.aux_name > 0);
}
#[test]
fn btf_optional_offsets_consistent() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = match KernelOffsets::from_vmlinux(&path) {
Ok(o) => o,
Err(e) => skip!("vmlinux BTF resolution failed: {e}"),
};
assert_ne!(
offsets.rq_nr_running, offsets.rq_scx,
"rq_nr_running and rq_scx offsets must be distinct"
);
if let Some(ref ev) = offsets.event_offsets {
assert!(ev.percpu_ptr_off > 0);
}
if let Some(ref wd) = offsets.watchdog_offsets {
assert!(
wd.scx_sched_watchdog_timeout_off > 0,
"watchdog_timeout offset must be nonzero within scx_sched"
);
assert!(
offsets.event_offsets.is_some(),
"watchdog_offsets present implies event_offsets must also resolve"
);
}
}
#[test]
fn from_vmlinux_nonexistent() {
let path = std::path::Path::new("/nonexistent/vmlinux");
assert!(KernelOffsets::from_vmlinux(path).is_err());
}
#[test]
fn from_vmlinux_empty_file() {
let dir = std::env::temp_dir().join(format!("ktstr-btf-empty-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let f = dir.join("vmlinux");
std::fs::write(&f, b"").unwrap();
assert!(KernelOffsets::from_vmlinux(&f).is_err());
let _ = std::fs::remove_dir_all(&dir);
}
#[test]
fn btf_sidecar_path_appends_dot_btf() {
let p = std::path::Path::new("/cache/vmlinux");
assert_eq!(
btf_sidecar_path(p),
std::path::PathBuf::from("/cache/vmlinux.btf"),
);
}
#[test]
fn btf_sidecar_path_preserves_existing_extension() {
let p = std::path::Path::new("/cache/vmlinux.elf");
assert_eq!(
btf_sidecar_path(p),
std::path::PathBuf::from("/cache/vmlinux.elf.btf"),
);
}
#[test]
fn is_raw_btf_accepts_little_endian_magic() {
assert!(is_raw_btf(&[0x9F, 0xEB, 0x01, 0x00]));
}
#[test]
fn is_raw_btf_rejects_wrong_magic_and_short_input() {
assert!(!is_raw_btf(&[0x7F, b'E', b'L', b'F']));
assert!(!is_raw_btf(&[0xEB, 0x9F, 0x01, 0x00]));
assert!(!is_raw_btf(&[0x9F]));
assert!(!is_raw_btf(&[]));
}
#[test]
fn sidecar_fresh_false_when_either_file_missing() {
let dir =
std::env::temp_dir().join(format!("ktstr-btf-sidecar-missing-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let vmlinux = dir.join("vmlinux");
let sidecar = dir.join("vmlinux.btf");
std::fs::write(&vmlinux, b"vmlinux-bytes").unwrap();
assert!(!sidecar_fresh(&sidecar, &vmlinux));
std::fs::write(&sidecar, b"cached-btf").unwrap();
assert!(sidecar_fresh(&sidecar, &vmlinux));
std::fs::remove_file(&vmlinux).unwrap();
assert!(!sidecar_fresh(&sidecar, &vmlinux));
let _ = std::fs::remove_dir_all(&dir);
}
struct CacheStagedVmlinux {
_cache_env: crate::test_support::test_helpers::EnvVarGuard,
entry_dir: std::path::PathBuf,
vmlinux: std::path::PathBuf,
_root: tempfile::TempDir,
}
fn stage_in_cache(src: &std::path::Path) -> CacheStagedVmlinux {
let root = tempfile::TempDir::new().expect("cache-root tempdir");
let entry_dir = root.path().join("kentry");
std::fs::create_dir_all(&entry_dir).expect("create cache entry dir");
let vmlinux = entry_dir.join("vmlinux");
std::fs::copy(src, &vmlinux).expect("copy vmlinux into cache-staged dir");
let _cache_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", root.path());
CacheStagedVmlinux {
_cache_env,
entry_dir,
vmlinux,
_root: root,
}
}
#[test]
fn load_btf_writes_sidecar_then_hits_cache_on_second_load() {
use std::time::Duration;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::thread::sleep(Duration::from_millis(10));
assert!(
!sidecar.exists(),
"precondition: sidecar should not exist before first load",
);
let btf1 = load_btf_from_path(vmlinux).expect("first load must succeed");
let _ = format!("{:?}", btf1.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"first load must write sidecar at {}",
sidecar.display(),
);
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"sidecar contents must carry the raw BTF 0x9FEB magic",
);
assert!(
sidecar_fresh(&sidecar, vmlinux),
"sidecar mtime must be ≥ vmlinux mtime after first load",
);
let sidecar_mtime_before = std::fs::metadata(&sidecar).unwrap().modified().unwrap();
std::thread::sleep(Duration::from_millis(50));
let btf2 = load_btf_from_path(vmlinux).expect("second load must succeed");
let _ = format!("{:?}", btf2.resolve_types_by_name("task_struct").is_ok());
let sidecar_mtime_after = std::fs::metadata(&sidecar).unwrap().modified().unwrap();
assert_eq!(
sidecar_mtime_before, sidecar_mtime_after,
"second load must hit sidecar cache — mtime bump proves a \
redundant rewrite",
);
}
#[test]
fn load_btf_rejects_stale_sidecar() {
use std::time::{Duration, SystemTime};
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::fs::write(&sidecar, b"stale-sidecar-bytes").unwrap();
let past = SystemTime::now() - Duration::from_secs(3600);
let f = std::fs::File::options().write(true).open(&sidecar).unwrap();
f.set_modified(past).unwrap();
drop(f);
assert!(
!sidecar_fresh(&sidecar, vmlinux),
"precondition: planted sidecar must be stale",
);
let btf = load_btf_from_path(vmlinux)
.expect("load must succeed via ELF fallback despite stale sidecar");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"load must overwrite stale sidecar with fresh BTF bytes",
);
assert!(
sidecar_fresh(&sidecar, vmlinux),
"sidecar must be fresh again after re-extraction",
);
}
#[test]
fn load_btf_recovers_from_corrupt_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::fs::write(&sidecar, b"not-btf-bytes").unwrap();
assert!(
sidecar_fresh(&sidecar, vmlinux),
"precondition: planted sidecar must be mtime-fresh",
);
let btf =
load_btf_from_path(vmlinux).expect("load must recover when sidecar is fresh-but-corrupt");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"corrupt sidecar must be overwritten on next load",
);
}
#[test]
#[cfg(unix)]
fn load_btf_survives_readonly_sidecar_dir() {
use std::os::unix::fs::PermissionsExt;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
if unsafe { libc::geteuid() } == 0 {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let entry_dir = staged.entry_dir.as_path();
std::fs::set_permissions(entry_dir, std::fs::Permissions::from_mode(0o555)).unwrap();
let btf =
load_btf_from_path(vmlinux).expect("load must succeed even when sidecar dir is read-only");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not exist after write to read-only dir",
);
let _ = std::fs::set_permissions(entry_dir, std::fs::Permissions::from_mode(0o755));
}
#[test]
fn load_btf_skips_sidecar_for_raw_btf_input() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if !path.starts_with("/sys/") {
let dir =
std::env::temp_dir().join(format!("ktstr-btf-sidecar-raw-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let src_data = std::fs::read(&path).unwrap();
let elf = match goblin::elf::Elf::parse(&src_data) {
Ok(e) => e,
Err(_) => {
let raw = dir.join("vmlinux.btf-raw");
std::fs::copy(&path, &raw).unwrap();
let _ = load_btf_from_path(&raw).expect("raw-BTF load must succeed");
let sidecar = btf_sidecar_path(&raw);
assert!(
!sidecar.exists(),
"raw-BTF input must not produce a sidecar",
);
let _ = std::fs::remove_dir_all(&dir);
return;
}
};
let btf_shdr = elf
.section_headers
.iter()
.find(|sh| elf.shdr_strtab.get_at(sh.sh_name) == Some(".BTF"));
let shdr = match btf_shdr {
Some(s) => s,
None => {
let _ = std::fs::remove_dir_all(&dir);
return;
}
};
let offset = shdr.sh_offset as usize;
let size = shdr.sh_size as usize;
let raw_bytes = &src_data[offset..offset + size];
let raw = dir.join("vmlinux.btf-raw");
std::fs::write(&raw, raw_bytes).unwrap();
let _ = load_btf_from_path(&raw).expect("raw-BTF load must succeed");
let sidecar = btf_sidecar_path(&raw);
assert!(
!sidecar.exists(),
"raw-BTF input must not produce a sidecar at {}",
sidecar.display(),
);
let _ = std::fs::remove_dir_all(&dir);
}
}
#[test]
fn sidecar_skipped_when_path_outside_cache_root() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache root tempdir");
let _cache_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into source-tree dir");
let btf =
load_btf_from_path(&vmlinux).expect("load must succeed even when sidecar is suppressed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not be written when vmlinux path is outside cache root, got {}",
sidecar.display(),
);
}
#[test]
fn sidecar_written_when_path_inside_cache_root() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
assert!(
!sidecar.exists(),
"precondition: sidecar must not exist before the load — \
a leftover from a prior test would falsely pass the post-load \
existence check",
);
let btf = load_btf_from_path(vmlinux).expect("load must succeed inside cache root");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"sidecar must be written when vmlinux path is inside cache root, expected at {}",
sidecar.display(),
);
let bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&bytes),
"sidecar must contain raw BTF (0x9FEB magic) when written inside cache root",
);
}
#[test]
fn sidecar_skipped_when_cache_root_unresolvable() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let _no_ktstr = crate::test_support::test_helpers::EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _no_xdg = crate::test_support::test_helpers::EnvVarGuard::remove("XDG_CACHE_HOME");
let _no_home = crate::test_support::test_helpers::EnvVarGuard::remove("HOME");
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux");
let btf =
load_btf_from_path(&vmlinux).expect("load must succeed when cache root is unresolvable");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not be written when cache root is unresolvable, got {}",
sidecar.display(),
);
}
#[test]
#[cfg(unix)]
fn load_btf_symlink_into_cache_writes_sidecar_in_cache_only() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let real_vmlinux = staged.vmlinux.as_path();
let real_sidecar = btf_sidecar_path(real_vmlinux);
assert!(
!real_sidecar.exists(),
"precondition: real sidecar must not exist before the load",
);
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let symlink_path = source_tree.path().join("vmlinux");
std::os::unix::fs::symlink(real_vmlinux, &symlink_path)
.expect("create symlink to real vmlinux");
let lexical_sidecar = btf_sidecar_path(&symlink_path);
let btf = load_btf_from_path(&symlink_path)
.expect("load via symlink must succeed and resolve the target");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
real_sidecar.exists(),
"sidecar must land at the canonical path inside cache, expected {}",
real_sidecar.display(),
);
assert!(
!lexical_sidecar.exists(),
"sidecar must NOT land next to the symlink in the source tree, \
got pollution at {}",
lexical_sidecar.display(),
);
}
#[test]
#[cfg(unix)]
fn load_btf_symlink_out_of_cache_writes_no_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache-root tempdir");
let _cache_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let real_vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &real_vmlinux).expect("copy vmlinux into source tree");
let symlink_in_cache = cache_root.path().join("vmlinux");
std::os::unix::fs::symlink(&real_vmlinux, &symlink_in_cache)
.expect("create symlink to source-tree vmlinux");
let btf = load_btf_from_path(&symlink_in_cache).expect("load via symlink must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let real_sidecar = btf_sidecar_path(&real_vmlinux);
let lexical_sidecar = btf_sidecar_path(&symlink_in_cache);
assert!(
!real_sidecar.exists(),
"sidecar must not land in source tree (outside cache), got {}",
real_sidecar.display(),
);
assert!(
!lexical_sidecar.exists(),
"sidecar must not land at the symlink path in cache either — \
canonicalize-at-top resolves to the source-tree real file, \
which is outside the cache",
);
}
#[test]
fn load_btf_relative_path_suppresses_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache-root tempdir");
let _cache_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", cache_root.path());
let outside = tempfile::TempDir::new().expect("outside tempdir");
let abs_vmlinux = outside.path().join("vmlinux");
std::fs::copy(&path, &abs_vmlinux).expect("copy vmlinux into outside dir");
let rel_str = abs_vmlinux
.to_str()
.expect("test vmlinux path must be UTF-8")
.strip_prefix('/')
.expect("absolute path expected to start with /");
let rel = std::path::Path::new(rel_str);
assert!(
!rel.is_absolute(),
"precondition: constructed path must be relative, got {}",
rel.display(),
);
let _ = load_btf_from_path(rel);
let abs_sidecar = btf_sidecar_path(&abs_vmlinux);
let rel_sidecar = btf_sidecar_path(rel);
assert!(
!abs_sidecar.exists(),
"sidecar must not appear at the absolute target, got {}",
abs_sidecar.display(),
);
assert!(
!rel_sidecar.exists(),
"sidecar must not appear at the relative path's lexical \
location, got {}",
rel_sidecar.display(),
);
}
#[test]
fn load_btf_empty_ktstr_cache_dir_falls_through() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let xdg = tempfile::TempDir::new().expect("xdg tempdir");
let _g_ktstr = crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", "");
let _g_xdg = crate::test_support::test_helpers::EnvVarGuard::set("XDG_CACHE_HOME", xdg.path());
let resolved_root = xdg.path().join("ktstr").join("kernels");
let entry = resolved_root.join("kentry");
std::fs::create_dir_all(&entry).expect("create cache entry under XDG fallback");
let vmlinux = entry.join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into XDG-derived cache");
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"precondition: sidecar must not pre-exist",
);
let btf =
load_btf_from_path(&vmlinux).expect("load must succeed inside XDG-derived cache root");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"sidecar must be written even when cascade resolves via XDG_CACHE_HOME \
(KTSTR_CACHE_DIR=\"\")",
);
}
#[test]
fn load_btf_fresh_resolution_per_call() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_a = tempfile::TempDir::new().expect("cache_a tempdir");
let cache_b = tempfile::TempDir::new().expect("cache_b tempdir");
let entry_a = cache_a.path().join("kentry");
std::fs::create_dir_all(&entry_a).expect("create cache_a entry");
let vmlinux = entry_a.join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into cache_a");
let sidecar = btf_sidecar_path(&vmlinux);
{
let _g =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", cache_a.path());
assert!(
!sidecar.exists(),
"precondition: sidecar must not pre-exist"
);
let btf = load_btf_from_path(&vmlinux).expect("first load must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"first load (KTSTR_CACHE_DIR=cache_a) must write sidecar",
);
std::fs::remove_file(&sidecar).expect("remove sidecar between calls");
}
{
let _g =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", cache_b.path());
let btf = load_btf_from_path(&vmlinux).expect("second load must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
!sidecar.exists(),
"second load (KTSTR_CACHE_DIR=cache_b) must NOT write sidecar — \
the vmlinux is now outside the active cache root",
);
}
}
#[test]
fn probe_bpf_object_emits_atomic_for_err_exit_latch() {
let probe_obj_path = std::path::PathBuf::from(env!("OUT_DIR")).join("probe.o");
let bytes = std::fs::read(&probe_obj_path).unwrap_or_else(|e| {
panic!(
"probe.o missing or unreadable at {}: {e}. \
build.rs failed to produce the BPF skeleton — fix the \
build pipeline before re-running this test.",
probe_obj_path.display()
)
});
let elf = goblin::elf::Elf::parse(&bytes).unwrap_or_else(|e| {
panic!(
"probe.o at {} is not a valid ELF: {e}. \
The BPF skeleton emitter changed format or the file is \
corrupted — re-run the build to regenerate.",
probe_obj_path.display()
)
});
const TARGET_SECTION: &str = "tp_btf/sched_ext_exit";
let mut found_section = false;
let mut atomic_count: usize = 0;
for sh in &elf.section_headers {
let Some(name) = elf.shdr_strtab.get_at(sh.sh_name) else {
continue;
};
if name != TARGET_SECTION {
continue;
}
found_section = true;
let off = sh.sh_offset as usize;
let sz = sh.sh_size as usize;
assert!(
sz.is_multiple_of(8),
"BPF section size {sz} must be a multiple of 8 (instruction width)"
);
let prog = &bytes[off..off + sz];
const STX_ATOMIC_W: u8 = 0xc3;
const STX_ATOMIC_DW: u8 = 0xdb;
const BPF_CMPXCHG_IMM: i32 = 0xf1;
for chunk in prog.chunks_exact(8) {
let opcode = chunk[0];
if opcode == STX_ATOMIC_W || opcode == STX_ATOMIC_DW {
let imm = i32::from_le_bytes([chunk[4], chunk[5], chunk[6], chunk[7]]);
if imm == BPF_CMPXCHG_IMM {
atomic_count += 1;
}
}
}
}
assert!(
found_section,
"probe.o is missing the expected `{TARGET_SECTION}` section — \
SEC() macro changed?"
);
assert!(
atomic_count >= 1,
"probe.o `{TARGET_SECTION}` section has no BPF_STX|BPF_ATOMIC|cmpxchg \
instruction — `__sync_val_compare_and_swap` was silently \
lowered to a non-atomic store. Cross-core ordering on aarch64 \
would be broken by this regression."
);
}
#[test]
fn scx_walker_missing_groups_pins_every_group_name() {
fn full() -> ScxWalkerOffsets {
ScxWalkerOffsets {
rq: Some(RqStructOffsets { scx: 0, curr: 0 }),
scx_rq: Some(ScxRqOffsets {
local_dsq: 0,
runnable_list: 0,
nr_running: 0,
flags: 0,
cpu_released: 0,
ops_qseq: 0,
kick_sync: Some(0),
nr_immed: Some(0),
clock: Some(0),
}),
task: Some(TaskStructCoreOffsets {
comm: 0,
pid: 0,
scx: 0,
}),
see: Some(SchedExtEntityOffsets {
runnable_node: 0,
runnable_at: 0,
weight: 0,
slice: 0,
dsq_vtime: 0,
dsq: 0,
dsq_list: 0,
flags: 0,
dsq_flags: 0,
sticky_cpu: 0,
holding_cpu: 0,
tasks_node: 0,
}),
dsq_lnode: Some(ScxDsqListNodeOffsets { node: 0, flags: 0 }),
dsq: Some(ScxDispatchQOffsets {
list: 0,
nr: 0,
seq: 0,
id: 0,
hash_node: 0,
}),
sched: Some(ScxSchedOffsets {
dsq_hash: 0,
pnode: Some(0),
pcpu: Some(0),
aborting: Some(0),
bypass_depth: Some(0),
exit_kind: 0,
}),
sched_pnode: Some(ScxSchedPnodeOffsets {
global_dsq: Some(0),
}),
sched_pcpu: Some(ScxSchedPcpuOffsets {
bypass_dsq: Some(0),
}),
rht: Some(RhashtableOffsets {
tbl: 0,
nelems: 0,
bucket_table_size: 0,
bucket_table_buckets: 0,
rhash_head_next: 0,
}),
}
}
let all = full();
assert!(
all.missing_groups().is_empty(),
"fully-populated offsets must report no missing groups; got {:?}",
all.missing_groups(),
);
#[allow(clippy::type_complexity)]
let cases: &[(fn(&mut ScxWalkerOffsets), &'static str)] = &[
(
(|o: &mut ScxWalkerOffsets| o.rq = None) as fn(&mut ScxWalkerOffsets),
"rq",
),
(|o: &mut ScxWalkerOffsets| o.scx_rq = None, "scx_rq"),
(|o: &mut ScxWalkerOffsets| o.task = None, "task_struct"),
(|o: &mut ScxWalkerOffsets| o.see = None, "sched_ext_entity"),
(
|o: &mut ScxWalkerOffsets| o.dsq_lnode = None,
"scx_dsq_list_node",
),
(|o: &mut ScxWalkerOffsets| o.dsq = None, "scx_dispatch_q"),
(|o: &mut ScxWalkerOffsets| o.sched = None, "scx_sched"),
(
|o: &mut ScxWalkerOffsets| o.sched_pnode = None,
"scx_sched_pnode",
),
(
|o: &mut ScxWalkerOffsets| o.sched_pcpu = None,
"scx_sched_pcpu",
),
(
|o: &mut ScxWalkerOffsets| o.rht = None,
"rhashtable/bucket_table/rhash_head",
),
];
for (drop_fn, expected_name) in cases {
let mut o = full();
drop_fn(&mut o);
let missing = o.missing_groups();
assert_eq!(
missing.len(),
1,
"exactly one group should be missing; expected {expected_name:?}, got {missing:?}",
);
assert_eq!(
missing[0], *expected_name,
"missing-group name string drifted: expected {expected_name:?}, got {:?}",
missing[0],
);
}
let empty = ScxWalkerOffsets {
rq: None,
scx_rq: None,
task: None,
see: None,
dsq_lnode: None,
dsq: None,
sched: None,
sched_pnode: None,
sched_pcpu: None,
rht: None,
};
let missing = empty.missing_groups();
assert_eq!(
missing,
vec![
"rq",
"scx_rq",
"task_struct",
"sched_ext_entity",
"scx_dsq_list_node",
"scx_dispatch_q",
"scx_sched",
"scx_sched_pnode",
"scx_sched_pcpu",
"rhashtable/bucket_table/rhash_head",
],
"all-missing order must match the if-chain order in `missing_groups()`",
);
}
#[test]
fn parse_struct_ops_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
let Some(so) = &offsets.struct_ops_offsets else {
return;
};
assert!(
so.kvalue > 0,
"kvalue must follow bpf_map prefix in bpf_struct_ops_map"
);
assert!(
so.value_data >= 8,
"value_data must follow bpf_struct_ops_common_value (refcnt + state)"
);
}
#[test]
fn parse_task_storage_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
let Some(ts) = &offsets.task_storage_offsets else {
return;
};
assert_eq!(
ts.hlist_node_next, 0,
"hlist_node.next must be at offset 0 (resolver invariant)"
);
assert!(
ts.smap_buckets > 0,
"smap_buckets must follow bpf_map prefix"
);
assert!(
ts.smap_bucket_log > 0,
"smap_bucket_log must follow bpf_map prefix"
);
assert_ne!(
ts.smap_buckets, ts.smap_bucket_log,
"buckets pointer and bucket_log must be distinct fields"
);
assert!(ts.bucket_size > 0, "bpf_local_storage_map_bucket size > 0");
assert!(
ts.elem_local_storage > 0,
"elem.local_storage follows map_node"
);
assert!(ts.elem_sdata > 0, "elem.sdata follows map_node");
assert_ne!(
ts.elem_local_storage, ts.elem_sdata,
"elem.local_storage and elem.sdata must be distinct fields"
);
}
#[test]
fn parse_ringbuf_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
let Some(rb) = &offsets.ringbuf_offsets else {
return;
};
assert!(
rb.rbm_rb > 0,
"bpf_ringbuf_map.rb must follow embedded bpf_map"
);
let position_fields = [
rb.rb_mask,
rb.rb_consumer_pos,
rb.rb_producer_pos,
rb.rb_pending_pos,
];
for i in 0..position_fields.len() {
for j in (i + 1)..position_fields.len() {
assert_ne!(
position_fields[i], position_fields[j],
"ringbuf position offsets must be distinct: \
mask={}, consumer_pos={}, producer_pos={}, pending_pos={}",
rb.rb_mask, rb.rb_consumer_pos, rb.rb_producer_pos, rb.rb_pending_pos,
);
}
}
assert!(
rb.rb_consumer_pos > rb.rb_mask,
"consumer_pos must follow mask in bpf_ringbuf"
);
}
#[test]
fn parse_stackmap_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
let Some(sm) = &offsets.stackmap_offsets else {
return;
};
assert!(
sm.smap_n_buckets > 0,
"n_buckets must follow embedded bpf_map"
);
assert!(sm.smap_buckets > 0, "buckets must follow embedded bpf_map");
assert_ne!(
sm.smap_n_buckets, sm.smap_buckets,
"n_buckets and buckets pointer must be distinct fields"
);
assert!(
sm.smb_data > sm.smb_nr || sm.smb_nr > 0,
"stack_map_bucket layout: nr and data must be distinguishable \
(data follows nr OR nr is past offset 0)"
);
assert_ne!(
sm.smb_nr, sm.smb_data,
"stack_map_bucket nr and data must be distinct fields"
);
}
#[test]
fn cached_vmlinux_btf_hits_on_second_call() {
let first = match super::cached_vmlinux_btf() {
Some(b) => b,
None => {
return;
}
};
let second = super::cached_vmlinux_btf().expect(
"second call must succeed when first did — the cache slot is populated and \
no error path is taken on cache hit",
);
assert!(
std::sync::Arc::ptr_eq(&first, &second),
"cached_vmlinux_btf must return the same Arc on every call once populated; \
got fresh allocations, indicating the cache hit path did not fire",
);
}