use std::path::Path;
use anyhow::{Context, Result, bail};
use btf_rs::{Btf, Type};
pub(crate) fn load_btf_from_path(path: &Path) -> Result<Btf> {
let data = std::fs::read(path).context("read file")?;
if is_raw_btf(&data) {
return Btf::from_bytes(&data).map_err(|e| anyhow::anyhow!("{e}"));
}
let (canon_path, sidecar_allowed) = match std::fs::canonicalize(path) {
Ok(c) => {
let inside = crate::cache::path_inside_cache_root(&c);
(c, inside)
}
Err(e) => {
tracing::debug!(
path = %path.display(),
err = %e,
"btf input path canonicalize failed; sidecar suppressed for this load",
);
(path.to_path_buf(), false)
}
};
let sidecar = btf_sidecar_path(&canon_path);
if sidecar_allowed {
if sidecar_fresh(&sidecar, &canon_path) {
match std::fs::read(&sidecar) {
Ok(cached) if is_raw_btf(&cached) => {
match Btf::from_bytes(&cached) {
Ok(btf) => return Ok(btf),
Err(e) => {
tracing::warn!(
path = %sidecar.display(),
err = %e,
"btf sidecar parse failed; falling back to ELF extraction",
);
}
}
}
Ok(_) => {
tracing::warn!(
path = %sidecar.display(),
"btf sidecar lacks 0x9FEB magic; falling back to ELF extraction",
);
}
Err(e) => {
tracing::warn!(
path = %sidecar.display(),
err = %e,
"btf sidecar read failed; falling back to ELF extraction",
);
}
}
}
} else {
tracing::debug!(
path = %canon_path.display(),
"btf sidecar suppressed: vmlinux path is outside the cache root",
);
}
let elf = goblin::elf::Elf::parse(&data).map_err(|_| {
anyhow::anyhow!(
"{}: not recognized as raw BTF (missing 0x9FEB magic) or ELF vmlinux",
path.display()
)
})?;
let btf_shdr = elf
.section_headers
.iter()
.find(|shdr| elf.shdr_strtab.get_at(shdr.sh_name) == Some(".BTF"));
let shdr = match btf_shdr {
Some(s) => s,
None => bail!("vmlinux ELF has no .BTF section"),
};
let offset = shdr.sh_offset as usize;
let size = shdr.sh_size as usize;
let btf_data = offset
.checked_add(size)
.and_then(|end| data.get(offset..end))
.context(".BTF section data out of bounds")?;
let btf = Btf::from_bytes(btf_data).map_err(|e| anyhow::anyhow!("{e}"))?;
if sidecar_allowed && let Err(e) = write_btf_sidecar(&sidecar, btf_data) {
tracing::warn!(
path = %sidecar.display(),
err = %e,
"btf sidecar write failed; BTF will be re-extracted from ELF on next load",
);
}
Ok(btf)
}
fn btf_sidecar_path(path: &Path) -> std::path::PathBuf {
let mut name = path.as_os_str().to_os_string();
name.push(".btf");
std::path::PathBuf::from(name)
}
fn is_raw_btf(data: &[u8]) -> bool {
data.len() >= 2 && data[0] == 0x9F && data[1] == 0xEB
}
fn sidecar_fresh(sidecar: &Path, vmlinux: &Path) -> bool {
let Ok(sidecar_mtime) = std::fs::metadata(sidecar).and_then(|m| m.modified()) else {
return false;
};
let Ok(vmlinux_mtime) = std::fs::metadata(vmlinux).and_then(|m| m.modified()) else {
return false;
};
sidecar_mtime >= vmlinux_mtime
}
fn write_btf_sidecar(sidecar: &Path, bytes: &[u8]) -> Result<()> {
use std::io::Write;
let parent = sidecar
.parent()
.context("btf sidecar path has no parent directory")?;
let mut tmp =
tempfile::NamedTempFile::new_in(parent).context("create tempfile for btf sidecar")?;
tmp.write_all(bytes).context("write btf sidecar contents")?;
tmp.as_file()
.sync_all()
.context("fsync btf sidecar before rename")?;
tmp.persist(sidecar)
.map_err(|e| anyhow::anyhow!("persist btf sidecar: {}", e.error))?;
Ok(())
}
#[derive(Debug, Clone)]
pub struct KernelOffsets {
pub rq_nr_running: usize,
pub rq_clock: usize,
pub rq_scx: usize,
pub scx_rq_nr_running: usize,
pub scx_rq_local_dsq: usize,
pub scx_rq_flags: usize,
pub dsq_nr: usize,
pub event_offsets: Option<ScxEventOffsets>,
pub schedstat_offsets: Option<SchedstatOffsets>,
pub sched_domain_offsets: Option<SchedDomainOffsets>,
pub watchdog_offsets: Option<ScxWatchdogOffsets>,
}
#[derive(Debug, Clone)]
pub struct ScxWatchdogOffsets {
pub scx_sched_watchdog_timeout_off: usize,
}
#[derive(Debug, Clone)]
pub struct ScxEventOffsets {
pub percpu_ptr_off: usize,
pub event_stats_off: usize,
pub ev_select_cpu_fallback: usize,
pub ev_dispatch_local_dsq_offline: usize,
pub ev_dispatch_keep_last: usize,
pub ev_enq_skip_exiting: usize,
pub ev_enq_skip_migration_disabled: usize,
pub ev_reenq_immed: Option<usize>,
pub ev_reenq_local_repeat: Option<usize>,
pub ev_refill_slice_dfl: Option<usize>,
pub ev_bypass_duration: Option<usize>,
pub ev_bypass_dispatch: Option<usize>,
pub ev_bypass_activate: Option<usize>,
pub ev_insert_not_owned: Option<usize>,
pub ev_sub_bypass_dispatch: Option<usize>,
}
impl KernelOffsets {
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
let (rq_struct, _) = find_struct(&btf, "rq")?;
let rq_nr_running = member_byte_offset(&btf, &rq_struct, "nr_running")?;
let rq_clock = member_byte_offset(&btf, &rq_struct, "clock")?;
let (rq_scx, scx_member) = member_byte_offset_with_member(&btf, &rq_struct, "scx")?;
let scx_rq_struct =
resolve_member_struct(&btf, &scx_member).context("btf: resolve type of rq.scx")?;
let scx_rq_nr_running = member_byte_offset(&btf, &scx_rq_struct, "nr_running")?;
let (scx_rq_local_dsq, local_dsq_member) =
member_byte_offset_with_member(&btf, &scx_rq_struct, "local_dsq")?;
let scx_rq_flags = member_byte_offset(&btf, &scx_rq_struct, "flags")?;
let dsq_struct = resolve_member_struct(&btf, &local_dsq_member)
.context("btf: resolve type of scx_rq.local_dsq")?;
let dsq_nr = member_byte_offset(&btf, &dsq_struct, "nr")?;
let event_offsets = resolve_event_offsets(&btf).ok();
let schedstat_offsets = resolve_schedstat_offsets(&btf).ok();
let sched_domain_offsets = resolve_sched_domain_offsets(&btf, &rq_struct).ok();
let watchdog_offsets = resolve_watchdog_offsets(&btf).ok();
Ok(Self {
rq_nr_running,
rq_clock,
rq_scx,
scx_rq_nr_running,
scx_rq_local_dsq,
scx_rq_flags,
dsq_nr,
event_offsets,
schedstat_offsets,
sched_domain_offsets,
watchdog_offsets,
})
}
}
fn resolve_event_offsets(btf: &Btf) -> Result<ScxEventOffsets> {
let (scx_sched_struct, _) = find_struct(btf, "scx_sched")?;
let pcpu_path = member_byte_offset(btf, &scx_sched_struct, "pcpu")
.ok()
.and_then(|pcpu_off| {
let (pcpu_struct, _) = find_struct(btf, "scx_sched_pcpu").ok()?;
let (stats_off, stats_member) =
member_byte_offset_with_member(btf, &pcpu_struct, "event_stats").ok()?;
let stats_struct = resolve_member_struct(btf, &stats_member).ok()?;
Some((pcpu_off, stats_off, stats_struct))
});
let (percpu_ptr_off, event_stats_off, event_stats_struct) = match pcpu_path {
Some(resolved) => resolved,
None => {
let (esc_off, esc_member) =
member_byte_offset_with_member(btf, &scx_sched_struct, "event_stats_cpu")
.context("btf: neither scx_sched.pcpu nor scx_sched.event_stats_cpu found")?;
let stats_struct = resolve_member_struct(btf, &esc_member)
.context("btf: resolve type of scx_sched.event_stats_cpu")?;
(esc_off, 0, stats_struct)
}
};
let ev_select_cpu_fallback =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_SELECT_CPU_FALLBACK")?;
let ev_dispatch_local_dsq_offline = member_byte_offset(
btf,
&event_stats_struct,
"SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
)?;
let ev_dispatch_keep_last =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_DISPATCH_KEEP_LAST")?;
let ev_enq_skip_exiting =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_ENQ_SKIP_EXITING")?;
let ev_enq_skip_migration_disabled = member_byte_offset(
btf,
&event_stats_struct,
"SCX_EV_ENQ_SKIP_MIGRATION_DISABLED",
)?;
let ev_reenq_immed = member_byte_offset(btf, &event_stats_struct, "SCX_EV_REENQ_IMMED").ok();
let ev_reenq_local_repeat =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_REENQ_LOCAL_REPEAT").ok();
let ev_refill_slice_dfl =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_REFILL_SLICE_DFL")
.or_else(|_| member_byte_offset(btf, &event_stats_struct, "SCX_EV_ENQ_SLICE_DFL"))
.ok();
let ev_bypass_duration =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_BYPASS_DURATION").ok();
let ev_bypass_dispatch =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_BYPASS_DISPATCH").ok();
let ev_bypass_activate =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_BYPASS_ACTIVATE").ok();
let ev_insert_not_owned =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_INSERT_NOT_OWNED").ok();
let ev_sub_bypass_dispatch =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_SUB_BYPASS_DISPATCH").ok();
Ok(ScxEventOffsets {
percpu_ptr_off,
event_stats_off,
ev_select_cpu_fallback,
ev_dispatch_local_dsq_offline,
ev_dispatch_keep_last,
ev_enq_skip_exiting,
ev_enq_skip_migration_disabled,
ev_reenq_immed,
ev_reenq_local_repeat,
ev_refill_slice_dfl,
ev_bypass_duration,
ev_bypass_dispatch,
ev_bypass_activate,
ev_insert_not_owned,
ev_sub_bypass_dispatch,
})
}
fn resolve_watchdog_offsets(btf: &Btf) -> Result<ScxWatchdogOffsets> {
let (scx_sched_struct, _) = find_struct(btf, "scx_sched")?;
let scx_sched_watchdog_timeout_off =
member_byte_offset(btf, &scx_sched_struct, "watchdog_timeout")?;
Ok(ScxWatchdogOffsets {
scx_sched_watchdog_timeout_off,
})
}
pub(crate) fn find_struct(btf: &Btf, name: &str) -> Result<(btf_rs::Struct, String)> {
let types = btf
.resolve_types_by_name(name)
.with_context(|| format!("btf: type '{name}' not found"))?;
for t in &types {
if let Type::Struct(s) = t {
let resolved_name = btf.resolve_name(s).unwrap_or_default();
return Ok((s.clone(), resolved_name));
}
}
bail!("btf: '{name}' exists but is not a struct");
}
pub(crate) fn member_byte_offset(btf: &Btf, s: &btf_rs::Struct, field: &str) -> Result<usize> {
member_byte_offset_recursive(btf, s, field, 0)
}
fn member_byte_offset_recursive(
btf: &Btf,
s: &btf_rs::Struct,
field: &str,
base_offset: usize,
) -> Result<usize> {
for member in &s.members {
let name = btf.resolve_name(member).unwrap_or_default();
let bits = member.bit_offset();
if bits % 8 != 0 {
if name == field {
bail!("btf: field '{field}' has non-byte-aligned offset ({bits} bits)");
}
continue;
}
let member_offset = base_offset + (bits / 8) as usize;
if name == field {
return Ok(member_offset);
}
if name.is_empty()
&& let Ok(inner) = resolve_member_composite(btf, member)
&& let Ok(offset) = member_byte_offset_recursive(btf, &inner, field, member_offset)
{
return Ok(offset);
}
}
bail!("btf: field '{field}' not found in struct");
}
fn resolve_member_composite(btf: &Btf, member: &btf_rs::Member) -> Result<btf_rs::Struct> {
let mut t = btf.resolve_chained_type(member)?;
for _ in 0..20 {
match t {
Type::Struct(s) | Type::Union(s) => return Ok(s),
Type::Const(_)
| Type::Volatile(_)
| Type::Typedef(_)
| Type::Restrict(_)
| Type::TypeTag(_) => {
t = btf.resolve_chained_type(t.as_btf_type().unwrap())?;
}
_ => bail!("btf: not a composite type"),
}
}
bail!("btf: type chain too deep")
}
fn member_byte_offset_with_member(
btf: &Btf,
s: &btf_rs::Struct,
field: &str,
) -> Result<(usize, btf_rs::Member)> {
for member in &s.members {
let name = btf.resolve_name(member).unwrap_or_default();
if name == field {
let bits = member.bit_offset();
if bits % 8 != 0 {
bail!("btf: field '{field}' has non-byte-aligned offset ({bits} bits)");
}
return Ok(((bits / 8) as usize, member.clone()));
}
}
bail!("btf: field '{field}' not found in struct");
}
fn resolve_member_struct(btf: &Btf, member: &btf_rs::Member) -> Result<btf_rs::Struct> {
use btf_rs::BtfType;
let tid = member.get_type_id().context("btf: member type_id")?;
super::bpf_map::resolve_to_struct(btf, tid).context("btf: could not resolve member to struct")
}
#[derive(Debug, Clone)]
pub struct SchedstatOffsets {
pub rq_sched_info: usize,
pub sched_info_run_delay: usize,
pub sched_info_pcount: usize,
pub rq_yld_count: usize,
pub rq_sched_count: usize,
pub rq_sched_goidle: usize,
pub rq_ttwu_count: usize,
pub rq_ttwu_local: usize,
}
fn resolve_schedstat_offsets(btf: &Btf) -> Result<SchedstatOffsets> {
let (rq_struct, _) = find_struct(btf, "rq")?;
let (rq_sched_info, sched_info_member) =
member_byte_offset_with_member(btf, &rq_struct, "rq_sched_info")?;
let sched_info_struct = resolve_member_struct(btf, &sched_info_member)
.context("btf: resolve type of rq.rq_sched_info")?;
let sched_info_run_delay = member_byte_offset(btf, &sched_info_struct, "run_delay")?;
let sched_info_pcount = member_byte_offset(btf, &sched_info_struct, "pcount")?;
let rq_yld_count = member_byte_offset(btf, &rq_struct, "yld_count")?;
let rq_sched_count = member_byte_offset(btf, &rq_struct, "sched_count")?;
let rq_sched_goidle = member_byte_offset(btf, &rq_struct, "sched_goidle")?;
let rq_ttwu_count = member_byte_offset(btf, &rq_struct, "ttwu_count")?;
let rq_ttwu_local = member_byte_offset(btf, &rq_struct, "ttwu_local")?;
Ok(SchedstatOffsets {
rq_sched_info,
sched_info_run_delay,
sched_info_pcount,
rq_yld_count,
rq_sched_count,
rq_sched_goidle,
rq_ttwu_count,
rq_ttwu_local,
})
}
#[derive(Debug, Clone)]
pub struct SchedDomainOffsets {
pub rq_sd: usize,
pub sd_parent: usize,
pub sd_level: usize,
pub sd_name: usize,
pub sd_flags: usize,
pub sd_span_weight: usize,
pub sd_balance_interval: usize,
pub sd_nr_balance_failed: usize,
pub sd_newidle_call: Option<usize>,
pub sd_newidle_success: Option<usize>,
pub sd_newidle_ratio: Option<usize>,
pub sd_max_newidle_lb_cost: usize,
pub stats_offsets: Option<SchedDomainStatsOffsets>,
}
#[derive(Debug, Clone)]
pub struct SchedDomainStatsOffsets {
pub sd_lb_count: usize,
pub sd_lb_failed: usize,
pub sd_lb_balanced: usize,
pub sd_lb_imbalance_load: usize,
pub sd_lb_imbalance_util: usize,
pub sd_lb_imbalance_task: usize,
pub sd_lb_imbalance_misfit: usize,
pub sd_lb_gained: usize,
pub sd_lb_hot_gained: usize,
pub sd_lb_nobusyg: usize,
pub sd_lb_nobusyq: usize,
pub sd_alb_count: usize,
pub sd_alb_failed: usize,
pub sd_alb_pushed: usize,
pub sd_sbe_count: usize,
pub sd_sbe_balanced: usize,
pub sd_sbe_pushed: usize,
pub sd_sbf_count: usize,
pub sd_sbf_balanced: usize,
pub sd_sbf_pushed: usize,
pub sd_ttwu_wake_remote: usize,
pub sd_ttwu_move_affine: usize,
pub sd_ttwu_move_balance: usize,
}
pub const CPU_MAX_IDLE_TYPES: usize = 3;
fn resolve_sched_domain_offsets(
btf: &Btf,
rq_struct: &btf_rs::Struct,
) -> Result<SchedDomainOffsets> {
let rq_sd = member_byte_offset(btf, rq_struct, "sd")?;
let (sd_struct, _) = find_struct(btf, "sched_domain")?;
let sd_parent = member_byte_offset(btf, &sd_struct, "parent")?;
let sd_level = member_byte_offset(btf, &sd_struct, "level")?;
let sd_name = member_byte_offset(btf, &sd_struct, "name")?;
let sd_flags = member_byte_offset(btf, &sd_struct, "flags")?;
let sd_span_weight = member_byte_offset(btf, &sd_struct, "span_weight")?;
let sd_balance_interval = member_byte_offset(btf, &sd_struct, "balance_interval")?;
let sd_nr_balance_failed = member_byte_offset(btf, &sd_struct, "nr_balance_failed")?;
let sd_max_newidle_lb_cost = member_byte_offset(btf, &sd_struct, "max_newidle_lb_cost")?;
let (sd_newidle_call, sd_newidle_success, sd_newidle_ratio) = match (
member_byte_offset(btf, &sd_struct, "newidle_call").ok(),
member_byte_offset(btf, &sd_struct, "newidle_success").ok(),
member_byte_offset(btf, &sd_struct, "newidle_ratio").ok(),
) {
(Some(c), Some(s), Some(r)) => (Some(c), Some(s), Some(r)),
_ => (None, None, None),
};
let stats_offsets = resolve_sched_domain_stats_offsets(btf, &sd_struct).ok();
Ok(SchedDomainOffsets {
rq_sd,
sd_parent,
sd_level,
sd_name,
sd_flags,
sd_span_weight,
sd_balance_interval,
sd_nr_balance_failed,
sd_newidle_call,
sd_newidle_success,
sd_newidle_ratio,
sd_max_newidle_lb_cost,
stats_offsets,
})
}
fn resolve_sched_domain_stats_offsets(
btf: &Btf,
sd_struct: &btf_rs::Struct,
) -> Result<SchedDomainStatsOffsets> {
Ok(SchedDomainStatsOffsets {
sd_lb_count: member_byte_offset(btf, sd_struct, "lb_count")?,
sd_lb_failed: member_byte_offset(btf, sd_struct, "lb_failed")?,
sd_lb_balanced: member_byte_offset(btf, sd_struct, "lb_balanced")?,
sd_lb_imbalance_load: member_byte_offset(btf, sd_struct, "lb_imbalance_load")?,
sd_lb_imbalance_util: member_byte_offset(btf, sd_struct, "lb_imbalance_util")?,
sd_lb_imbalance_task: member_byte_offset(btf, sd_struct, "lb_imbalance_task")?,
sd_lb_imbalance_misfit: member_byte_offset(btf, sd_struct, "lb_imbalance_misfit")?,
sd_lb_gained: member_byte_offset(btf, sd_struct, "lb_gained")?,
sd_lb_hot_gained: member_byte_offset(btf, sd_struct, "lb_hot_gained")?,
sd_lb_nobusyg: member_byte_offset(btf, sd_struct, "lb_nobusyg")?,
sd_lb_nobusyq: member_byte_offset(btf, sd_struct, "lb_nobusyq")?,
sd_alb_count: member_byte_offset(btf, sd_struct, "alb_count")?,
sd_alb_failed: member_byte_offset(btf, sd_struct, "alb_failed")?,
sd_alb_pushed: member_byte_offset(btf, sd_struct, "alb_pushed")?,
sd_sbe_count: member_byte_offset(btf, sd_struct, "sbe_count")?,
sd_sbe_balanced: member_byte_offset(btf, sd_struct, "sbe_balanced")?,
sd_sbe_pushed: member_byte_offset(btf, sd_struct, "sbe_pushed")?,
sd_sbf_count: member_byte_offset(btf, sd_struct, "sbf_count")?,
sd_sbf_balanced: member_byte_offset(btf, sd_struct, "sbf_balanced")?,
sd_sbf_pushed: member_byte_offset(btf, sd_struct, "sbf_pushed")?,
sd_ttwu_wake_remote: member_byte_offset(btf, sd_struct, "ttwu_wake_remote")?,
sd_ttwu_move_affine: member_byte_offset(btf, sd_struct, "ttwu_move_affine")?,
sd_ttwu_move_balance: member_byte_offset(btf, sd_struct, "ttwu_move_balance")?,
})
}
#[derive(Debug, Clone, Copy)]
pub struct IdrOffsets {
pub xa_node_slots: usize,
pub xa_node_shift: usize,
pub idr_xa_head: usize,
pub idr_next: usize,
}
impl IdrOffsets {
pub fn from_btf(btf: &Btf) -> Result<Self> {
let (xa_node, _) = find_struct(btf, "xa_node")?;
let xa_node_slots = member_byte_offset(btf, &xa_node, "slots")?;
let xa_node_shift = member_byte_offset(btf, &xa_node, "shift")?;
let (idr_struct, _) = find_struct(btf, "idr")?;
let (idr_rt_off, idr_rt_member) =
member_byte_offset_with_member(btf, &idr_struct, "idr_rt")?;
let xa_struct = resolve_member_struct(btf, &idr_rt_member)
.context("btf: resolve type of idr.idr_rt")?;
let xa_head_off = member_byte_offset(btf, &xa_struct, "xa_head")?;
let idr_xa_head = idr_rt_off + xa_head_off;
let idr_next = member_byte_offset(btf, &idr_struct, "idr_next")?;
Ok(Self {
xa_node_slots,
xa_node_shift,
idr_xa_head,
idr_next,
})
}
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct BpfMapOffsets {
pub map_name: usize,
pub map_type: usize,
pub map_flags: usize,
pub key_size: usize,
pub value_size: usize,
pub max_entries: usize,
pub array_value: usize,
pub xa_node_slots: usize,
pub xa_node_shift: usize,
pub idr_xa_head: usize,
pub idr_next: usize,
pub map_btf: usize,
pub map_btf_value_type_id: usize,
pub btf_data: usize,
pub btf_data_size: usize,
pub htab_offsets: Option<HtabOffsets>,
}
impl BpfMapOffsets {
#[cfg(all(test, target_arch = "x86_64"))]
pub(crate) const EMPTY: Self = Self {
map_name: 0,
map_type: 0,
map_flags: 0,
key_size: 0,
value_size: 0,
max_entries: 0,
array_value: 0,
xa_node_slots: 0,
xa_node_shift: 0,
idr_xa_head: 0,
idr_next: 0,
map_btf: 0,
map_btf_value_type_id: 0,
btf_data: 0,
btf_data_size: 0,
htab_offsets: None,
};
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
Self::from_btf(&btf)
}
pub fn from_btf(btf: &Btf) -> Result<Self> {
let (bpf_map, _) = find_struct(btf, "bpf_map")?;
let map_name = member_byte_offset(btf, &bpf_map, "name")?;
let map_type = member_byte_offset(btf, &bpf_map, "map_type")?;
let map_flags = member_byte_offset(btf, &bpf_map, "map_flags")?;
let key_size = member_byte_offset(btf, &bpf_map, "key_size")?;
let value_size = member_byte_offset(btf, &bpf_map, "value_size")?;
let max_entries = member_byte_offset(btf, &bpf_map, "max_entries")?;
let (bpf_array, _) = find_struct(btf, "bpf_array")?;
let array_value = member_byte_offset(btf, &bpf_array, "value")?;
let idr = IdrOffsets::from_btf(btf)?;
let map_btf = member_byte_offset(btf, &bpf_map, "btf")?;
let map_btf_value_type_id = member_byte_offset(btf, &bpf_map, "btf_value_type_id")?;
let (btf_struct, _) = find_struct(btf, "btf")?;
let btf_data = member_byte_offset(btf, &btf_struct, "data")?;
let btf_data_size = member_byte_offset(btf, &btf_struct, "data_size")?;
let htab_offsets = resolve_htab_offsets(btf).ok();
Ok(Self {
map_name,
map_type,
map_flags,
key_size,
value_size,
max_entries,
array_value,
xa_node_slots: idr.xa_node_slots,
xa_node_shift: idr.xa_node_shift,
idr_xa_head: idr.idr_xa_head,
idr_next: idr.idr_next,
map_btf,
map_btf_value_type_id,
btf_data,
btf_data_size,
htab_offsets,
})
}
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct HtabOffsets {
pub htab_buckets: usize,
pub htab_n_buckets: usize,
pub bucket_size: usize,
pub bucket_head: usize,
pub hlist_nulls_head_first: usize,
pub hlist_nulls_node_next: usize,
pub htab_elem_size_base: usize,
}
fn find_bucket_struct(btf: &Btf) -> Result<(btf_rs::Struct, usize)> {
let types = btf
.resolve_types_by_name("bucket")
.with_context(|| "btf: type 'bucket' not found")?;
for t in &types {
if let Type::Struct(s) = t
&& let Ok(head_off) = member_byte_offset(btf, s, "head")
{
return Ok((s.clone(), head_off));
}
}
bail!("btf: no 'bucket' struct with 'head' field found");
}
fn resolve_htab_offsets(btf: &Btf) -> Result<HtabOffsets> {
let (bpf_htab, _) = find_struct(btf, "bpf_htab")?;
let htab_buckets = member_byte_offset(btf, &bpf_htab, "buckets")?;
let htab_n_buckets = member_byte_offset(btf, &bpf_htab, "n_buckets")?;
let (bucket_struct, bucket_head) = find_bucket_struct(btf)?;
let bucket_size = bucket_struct.size();
let (hlist_nulls_head, _) = find_struct(btf, "hlist_nulls_head")?;
let hlist_nulls_head_first = member_byte_offset(btf, &hlist_nulls_head, "first")?;
let (hlist_nulls_node, _) = find_struct(btf, "hlist_nulls_node")?;
let hlist_nulls_node_next = member_byte_offset(btf, &hlist_nulls_node, "next")?;
let (htab_elem, _) = find_struct(btf, "htab_elem")?;
let htab_elem_size_base = htab_elem.size();
Ok(HtabOffsets {
htab_buckets,
htab_n_buckets,
bucket_size,
bucket_head,
hlist_nulls_head_first,
hlist_nulls_node_next,
htab_elem_size_base,
})
}
#[derive(Debug, Clone)]
pub struct BpfProgOffsets {
pub prog_type: usize,
pub prog_aux: usize,
pub aux_verified_insns: usize,
pub aux_name: usize,
pub xa_node_slots: usize,
pub xa_node_shift: usize,
pub idr_xa_head: usize,
pub idr_next: usize,
pub prog_stats: usize,
pub stats_cnt: usize,
pub stats_nsecs: usize,
}
impl BpfProgOffsets {
pub fn from_btf(btf: &Btf) -> Result<Self> {
let (bpf_prog, _) = find_struct(btf, "bpf_prog")?;
let prog_type = member_byte_offset(btf, &bpf_prog, "type")?;
let prog_aux = member_byte_offset(btf, &bpf_prog, "aux")?;
let (bpf_prog_aux, _) = find_struct(btf, "bpf_prog_aux")?;
let aux_verified_insns = member_byte_offset(btf, &bpf_prog_aux, "verified_insns")?;
let aux_name = member_byte_offset(btf, &bpf_prog_aux, "name")?;
let idr = IdrOffsets::from_btf(btf)?;
let prog_stats = member_byte_offset(btf, &bpf_prog, "stats")?;
let (bpf_prog_stats, _) = find_struct(btf, "bpf_prog_stats")?;
let stats_cnt = member_byte_offset(btf, &bpf_prog_stats, "cnt")?;
let stats_nsecs = member_byte_offset(btf, &bpf_prog_stats, "nsecs")?;
Ok(Self {
prog_type,
prog_aux,
aux_verified_insns,
aux_name,
xa_node_slots: idr.xa_node_slots,
xa_node_shift: idr.xa_node_shift,
idr_xa_head: idr.idr_xa_head,
idr_next: idr.idr_next,
prog_stats,
stats_cnt,
stats_nsecs,
})
}
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
Self::from_btf(&btf)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_rq_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
assert_ne!(
offsets.rq_nr_running, offsets.rq_clock,
"rq_nr_running and rq_clock offsets must be distinct"
);
assert!(offsets.rq_clock > 0);
assert!(offsets.rq_scx > 0);
assert!(offsets.dsq_nr > 0);
}
#[test]
fn parse_event_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(ev) = &offsets.event_offsets {
let mut all = vec![
ev.ev_select_cpu_fallback,
ev.ev_dispatch_local_dsq_offline,
ev.ev_dispatch_keep_last,
ev.ev_enq_skip_exiting,
ev.ev_enq_skip_migration_disabled,
];
for off in [
ev.ev_reenq_immed,
ev.ev_reenq_local_repeat,
ev.ev_refill_slice_dfl,
ev.ev_bypass_duration,
ev.ev_bypass_dispatch,
ev.ev_bypass_activate,
ev.ev_insert_not_owned,
ev.ev_sub_bypass_dispatch,
]
.into_iter()
.flatten()
{
all.push(off);
}
for i in 0..all.len() {
for j in (i + 1)..all.len() {
assert_ne!(all[i], all[j], "event counter offsets must be distinct");
}
}
}
}
#[test]
fn parse_schedstat_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(ss) = &offsets.schedstat_offsets {
assert!(ss.rq_sched_info > 0);
assert!(
ss.sched_info_run_delay > 0,
"run_delay must follow pcount in struct sched_info"
);
assert_ne!(
ss.sched_info_pcount, ss.sched_info_run_delay,
"pcount and run_delay offsets must be distinct"
);
let rq_fields = [
ss.rq_yld_count,
ss.rq_sched_count,
ss.rq_sched_goidle,
ss.rq_ttwu_count,
ss.rq_ttwu_local,
];
for &off in &rq_fields {
assert!(off > 0, "schedstat rq field offset must be nonzero");
}
for i in 0..rq_fields.len() {
for j in (i + 1)..rq_fields.len() {
assert_ne!(
rq_fields[i], rq_fields[j],
"schedstat rq field offsets must be distinct"
);
}
}
}
}
#[test]
fn parse_sched_domain_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_kernel_offsets(&path);
if let Some(sd) = &offsets.sched_domain_offsets {
assert!(sd.rq_sd > 0, "rq.sd must be at nonzero offset");
assert_ne!(
sd.sd_level, sd.sd_parent,
"level and parent offsets must be distinct"
);
assert_ne!(
sd.sd_name, sd.sd_parent,
"name and parent offsets must be distinct"
);
let always_present = [
sd.sd_balance_interval,
sd.sd_nr_balance_failed,
sd.sd_max_newidle_lb_cost,
];
for &off in &always_present {
assert!(off > 0, "sched_domain runtime field offset must be nonzero");
}
for off in [
sd.sd_newidle_call,
sd.sd_newidle_success,
sd.sd_newidle_ratio,
]
.into_iter()
.flatten()
{
assert!(
off > 0,
"optional newidle field offset must be nonzero when present"
);
}
if let Some(so) = &sd.stats_offsets {
let array_fields = [
so.sd_lb_count,
so.sd_lb_failed,
so.sd_lb_balanced,
so.sd_lb_imbalance_load,
so.sd_lb_imbalance_util,
so.sd_lb_imbalance_task,
so.sd_lb_imbalance_misfit,
so.sd_lb_gained,
so.sd_lb_hot_gained,
so.sd_lb_nobusyg,
so.sd_lb_nobusyq,
];
for i in 0..array_fields.len() {
for j in (i + 1)..array_fields.len() {
assert_ne!(
array_fields[i], array_fields[j],
"sched_domain array field offsets must be distinct"
);
}
}
let scalar_fields = [
so.sd_alb_count,
so.sd_alb_failed,
so.sd_alb_pushed,
so.sd_ttwu_wake_remote,
so.sd_ttwu_move_affine,
so.sd_ttwu_move_balance,
];
for &off in &scalar_fields {
assert!(off > 0, "sched_domain scalar field offset must be nonzero");
}
for i in 0..scalar_fields.len() {
for j in (i + 1)..scalar_fields.len() {
assert_ne!(
scalar_fields[i], scalar_fields[j],
"sched_domain scalar field offsets must be distinct"
);
}
}
}
}
}
#[test]
fn parse_bpf_map_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_map_offsets(&path);
assert!(offsets.map_name > 0);
assert!(offsets.map_type > 0);
assert!(offsets.value_size > 0);
assert!(offsets.array_value > 0);
assert!(offsets.map_btf > 0);
assert!(offsets.map_btf_value_type_id > 0);
assert!(offsets.btf_data_size > offsets.btf_data);
}
#[test]
fn parse_bpf_prog_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = crate::test_support::require_bpf_prog_offsets(&path);
assert!(offsets.prog_aux > 0);
assert!(offsets.aux_verified_insns > 0);
assert!(offsets.aux_name > 0);
}
#[test]
fn btf_optional_offsets_consistent() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = match KernelOffsets::from_vmlinux(&path) {
Ok(o) => o,
Err(e) => skip!("vmlinux BTF resolution failed: {e}"),
};
assert_ne!(
offsets.rq_nr_running, offsets.rq_scx,
"rq_nr_running and rq_scx offsets must be distinct"
);
if let Some(ref ev) = offsets.event_offsets {
assert!(ev.percpu_ptr_off > 0);
}
if let Some(ref wd) = offsets.watchdog_offsets {
assert!(
wd.scx_sched_watchdog_timeout_off > 0,
"watchdog_timeout offset must be nonzero within scx_sched"
);
assert!(
offsets.event_offsets.is_some(),
"watchdog_offsets present implies event_offsets must also resolve"
);
}
}
#[test]
fn from_vmlinux_nonexistent() {
let path = std::path::Path::new("/nonexistent/vmlinux");
assert!(KernelOffsets::from_vmlinux(path).is_err());
}
#[test]
fn from_vmlinux_empty_file() {
let dir = std::env::temp_dir().join(format!("ktstr-btf-empty-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let f = dir.join("vmlinux");
std::fs::write(&f, b"").unwrap();
assert!(KernelOffsets::from_vmlinux(&f).is_err());
let _ = std::fs::remove_dir_all(&dir);
}
#[test]
fn btf_sidecar_path_appends_dot_btf() {
let p = std::path::Path::new("/cache/vmlinux");
assert_eq!(
btf_sidecar_path(p),
std::path::PathBuf::from("/cache/vmlinux.btf"),
);
}
#[test]
fn btf_sidecar_path_preserves_existing_extension() {
let p = std::path::Path::new("/cache/vmlinux.elf");
assert_eq!(
btf_sidecar_path(p),
std::path::PathBuf::from("/cache/vmlinux.elf.btf"),
);
}
#[test]
fn is_raw_btf_accepts_little_endian_magic() {
assert!(is_raw_btf(&[0x9F, 0xEB, 0x01, 0x00]));
}
#[test]
fn is_raw_btf_rejects_wrong_magic_and_short_input() {
assert!(!is_raw_btf(&[0x7F, b'E', b'L', b'F']));
assert!(!is_raw_btf(&[0xEB, 0x9F, 0x01, 0x00]));
assert!(!is_raw_btf(&[0x9F]));
assert!(!is_raw_btf(&[]));
}
#[test]
fn sidecar_fresh_false_when_either_file_missing() {
let dir =
std::env::temp_dir().join(format!("ktstr-btf-sidecar-missing-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let vmlinux = dir.join("vmlinux");
let sidecar = dir.join("vmlinux.btf");
std::fs::write(&vmlinux, b"vmlinux-bytes").unwrap();
assert!(!sidecar_fresh(&sidecar, &vmlinux));
std::fs::write(&sidecar, b"cached-btf").unwrap();
assert!(sidecar_fresh(&sidecar, &vmlinux));
std::fs::remove_file(&vmlinux).unwrap();
assert!(!sidecar_fresh(&sidecar, &vmlinux));
let _ = std::fs::remove_dir_all(&dir);
}
struct CacheStagedVmlinux {
_cache_env: crate::test_support::test_helpers::EnvVarGuard,
entry_dir: std::path::PathBuf,
vmlinux: std::path::PathBuf,
_root: tempfile::TempDir,
}
fn stage_in_cache(src: &std::path::Path) -> CacheStagedVmlinux {
let root = tempfile::TempDir::new().expect("cache-root tempdir");
let entry_dir = root.path().join("kentry");
std::fs::create_dir_all(&entry_dir).expect("create cache entry dir");
let vmlinux = entry_dir.join("vmlinux");
std::fs::copy(src, &vmlinux).expect("copy vmlinux into cache-staged dir");
let _cache_env =
crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", root.path());
CacheStagedVmlinux {
_cache_env,
entry_dir,
vmlinux,
_root: root,
}
}
#[test]
fn load_btf_writes_sidecar_then_hits_cache_on_second_load() {
use std::time::Duration;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::thread::sleep(Duration::from_millis(10));
assert!(
!sidecar.exists(),
"precondition: sidecar should not exist before first load",
);
let btf1 = load_btf_from_path(vmlinux).expect("first load must succeed");
let _ = format!("{:?}", btf1.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"first load must write sidecar at {}",
sidecar.display(),
);
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"sidecar contents must carry the raw BTF 0x9FEB magic",
);
assert!(
sidecar_fresh(&sidecar, vmlinux),
"sidecar mtime must be ≥ vmlinux mtime after first load",
);
let sidecar_mtime_before = std::fs::metadata(&sidecar).unwrap().modified().unwrap();
std::thread::sleep(Duration::from_millis(50));
let btf2 = load_btf_from_path(vmlinux).expect("second load must succeed");
let _ = format!("{:?}", btf2.resolve_types_by_name("task_struct").is_ok());
let sidecar_mtime_after = std::fs::metadata(&sidecar).unwrap().modified().unwrap();
assert_eq!(
sidecar_mtime_before, sidecar_mtime_after,
"second load must hit sidecar cache — mtime bump proves a \
redundant rewrite",
);
}
#[test]
fn load_btf_rejects_stale_sidecar() {
use std::time::{Duration, SystemTime};
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::fs::write(&sidecar, b"stale-sidecar-bytes").unwrap();
let past = SystemTime::now() - Duration::from_secs(3600);
let f = std::fs::File::options().write(true).open(&sidecar).unwrap();
f.set_modified(past).unwrap();
drop(f);
assert!(
!sidecar_fresh(&sidecar, vmlinux),
"precondition: planted sidecar must be stale",
);
let btf = load_btf_from_path(vmlinux)
.expect("load must succeed via ELF fallback despite stale sidecar");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"load must overwrite stale sidecar with fresh BTF bytes",
);
assert!(
sidecar_fresh(&sidecar, vmlinux),
"sidecar must be fresh again after re-extraction",
);
}
#[test]
fn load_btf_recovers_from_corrupt_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
std::fs::write(&sidecar, b"not-btf-bytes").unwrap();
assert!(
sidecar_fresh(&sidecar, vmlinux),
"precondition: planted sidecar must be mtime-fresh",
);
let btf = load_btf_from_path(vmlinux)
.expect("load must recover when sidecar is fresh-but-corrupt");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar_bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&sidecar_bytes),
"corrupt sidecar must be overwritten on next load",
);
}
#[test]
#[cfg(unix)]
fn load_btf_survives_readonly_sidecar_dir() {
use std::os::unix::fs::PermissionsExt;
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
if unsafe { libc::geteuid() } == 0 {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let entry_dir = staged.entry_dir.as_path();
std::fs::set_permissions(entry_dir, std::fs::Permissions::from_mode(0o555)).unwrap();
let btf = load_btf_from_path(vmlinux)
.expect("load must succeed even when sidecar dir is read-only");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not exist after write to read-only dir",
);
let _ = std::fs::set_permissions(entry_dir, std::fs::Permissions::from_mode(0o755));
}
#[test]
fn load_btf_skips_sidecar_for_raw_btf_input() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if !path.starts_with("/sys/") {
let dir =
std::env::temp_dir().join(format!("ktstr-btf-sidecar-raw-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let src_data = std::fs::read(&path).unwrap();
let elf = match goblin::elf::Elf::parse(&src_data) {
Ok(e) => e,
Err(_) => {
let raw = dir.join("vmlinux.btf-raw");
std::fs::copy(&path, &raw).unwrap();
let _ = load_btf_from_path(&raw).expect("raw-BTF load must succeed");
let sidecar = btf_sidecar_path(&raw);
assert!(
!sidecar.exists(),
"raw-BTF input must not produce a sidecar",
);
let _ = std::fs::remove_dir_all(&dir);
return;
}
};
let btf_shdr = elf
.section_headers
.iter()
.find(|sh| elf.shdr_strtab.get_at(sh.sh_name) == Some(".BTF"));
let shdr = match btf_shdr {
Some(s) => s,
None => {
let _ = std::fs::remove_dir_all(&dir);
return;
}
};
let offset = shdr.sh_offset as usize;
let size = shdr.sh_size as usize;
let raw_bytes = &src_data[offset..offset + size];
let raw = dir.join("vmlinux.btf-raw");
std::fs::write(&raw, raw_bytes).unwrap();
let _ = load_btf_from_path(&raw).expect("raw-BTF load must succeed");
let sidecar = btf_sidecar_path(&raw);
assert!(
!sidecar.exists(),
"raw-BTF input must not produce a sidecar at {}",
sidecar.display(),
);
let _ = std::fs::remove_dir_all(&dir);
}
}
#[test]
fn sidecar_skipped_when_path_outside_cache_root() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache root tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_root.path(),
);
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into source-tree dir");
let btf = load_btf_from_path(&vmlinux)
.expect("load must succeed even when sidecar is suppressed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not be written when vmlinux path is outside cache root, got {}",
sidecar.display(),
);
}
#[test]
fn sidecar_written_when_path_inside_cache_root() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let vmlinux = staged.vmlinux.as_path();
let sidecar = btf_sidecar_path(vmlinux);
assert!(
!sidecar.exists(),
"precondition: sidecar must not exist before the load — \
a leftover from a prior test would falsely pass the post-load \
existence check",
);
let btf = load_btf_from_path(vmlinux).expect("load must succeed inside cache root");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"sidecar must be written when vmlinux path is inside cache root, expected at {}",
sidecar.display(),
);
let bytes = std::fs::read(&sidecar).unwrap();
assert!(
is_raw_btf(&bytes),
"sidecar must contain raw BTF (0x9FEB magic) when written inside cache root",
);
}
#[test]
fn sidecar_skipped_when_cache_root_unresolvable() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let _no_ktstr = crate::test_support::test_helpers::EnvVarGuard::remove("KTSTR_CACHE_DIR");
let _no_xdg = crate::test_support::test_helpers::EnvVarGuard::remove("XDG_CACHE_HOME");
let _no_home = crate::test_support::test_helpers::EnvVarGuard::remove("HOME");
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux");
let btf = load_btf_from_path(&vmlinux)
.expect("load must succeed when cache root is unresolvable");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"sidecar must not be written when cache root is unresolvable, got {}",
sidecar.display(),
);
}
#[test]
#[cfg(unix)]
fn load_btf_symlink_into_cache_writes_sidecar_in_cache_only() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let staged = stage_in_cache(&path);
let real_vmlinux = staged.vmlinux.as_path();
let real_sidecar = btf_sidecar_path(real_vmlinux);
assert!(
!real_sidecar.exists(),
"precondition: real sidecar must not exist before the load",
);
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let symlink_path = source_tree.path().join("vmlinux");
std::os::unix::fs::symlink(real_vmlinux, &symlink_path)
.expect("create symlink to real vmlinux");
let lexical_sidecar = btf_sidecar_path(&symlink_path);
let btf = load_btf_from_path(&symlink_path)
.expect("load via symlink must succeed and resolve the target");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
real_sidecar.exists(),
"sidecar must land at the canonical path inside cache, expected {}",
real_sidecar.display(),
);
assert!(
!lexical_sidecar.exists(),
"sidecar must NOT land next to the symlink in the source tree, \
got pollution at {}",
lexical_sidecar.display(),
);
}
#[test]
#[cfg(unix)]
fn load_btf_symlink_out_of_cache_writes_no_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache-root tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_root.path(),
);
let source_tree = tempfile::TempDir::new().expect("source-tree tempdir");
let real_vmlinux = source_tree.path().join("vmlinux");
std::fs::copy(&path, &real_vmlinux).expect("copy vmlinux into source tree");
let symlink_in_cache = cache_root.path().join("vmlinux");
std::os::unix::fs::symlink(&real_vmlinux, &symlink_in_cache)
.expect("create symlink to source-tree vmlinux");
let btf = load_btf_from_path(&symlink_in_cache).expect("load via symlink must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
let real_sidecar = btf_sidecar_path(&real_vmlinux);
let lexical_sidecar = btf_sidecar_path(&symlink_in_cache);
assert!(
!real_sidecar.exists(),
"sidecar must not land in source tree (outside cache), got {}",
real_sidecar.display(),
);
assert!(
!lexical_sidecar.exists(),
"sidecar must not land at the symlink path in cache either — \
canonicalize-at-top resolves to the source-tree real file, \
which is outside the cache",
);
}
#[test]
fn load_btf_relative_path_suppresses_sidecar() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_root = tempfile::TempDir::new().expect("cache-root tempdir");
let _cache_env = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_root.path(),
);
let outside = tempfile::TempDir::new().expect("outside tempdir");
let abs_vmlinux = outside.path().join("vmlinux");
std::fs::copy(&path, &abs_vmlinux).expect("copy vmlinux into outside dir");
let rel_str = abs_vmlinux
.to_str()
.expect("test vmlinux path must be UTF-8")
.strip_prefix('/')
.expect("absolute path expected to start with /");
let rel = std::path::Path::new(rel_str);
assert!(
!rel.is_absolute(),
"precondition: constructed path must be relative, got {}",
rel.display(),
);
let _ = load_btf_from_path(rel);
let abs_sidecar = btf_sidecar_path(&abs_vmlinux);
let rel_sidecar = btf_sidecar_path(rel);
assert!(
!abs_sidecar.exists(),
"sidecar must not appear at the absolute target, got {}",
abs_sidecar.display(),
);
assert!(
!rel_sidecar.exists(),
"sidecar must not appear at the relative path's lexical \
location, got {}",
rel_sidecar.display(),
);
}
#[test]
fn load_btf_empty_ktstr_cache_dir_falls_through() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let xdg = tempfile::TempDir::new().expect("xdg tempdir");
let _g_ktstr = crate::test_support::test_helpers::EnvVarGuard::set("KTSTR_CACHE_DIR", "");
let _g_xdg =
crate::test_support::test_helpers::EnvVarGuard::set("XDG_CACHE_HOME", xdg.path());
let resolved_root = xdg.path().join("ktstr").join("kernels");
let entry = resolved_root.join("kentry");
std::fs::create_dir_all(&entry).expect("create cache entry under XDG fallback");
let vmlinux = entry.join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into XDG-derived cache");
let sidecar = btf_sidecar_path(&vmlinux);
assert!(
!sidecar.exists(),
"precondition: sidecar must not pre-exist",
);
let btf =
load_btf_from_path(&vmlinux).expect("load must succeed inside XDG-derived cache root");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"sidecar must be written even when cascade resolves via XDG_CACHE_HOME \
(KTSTR_CACHE_DIR=\"\")",
);
}
#[test]
fn load_btf_fresh_resolution_per_call() {
let Some(path) = crate::monitor::find_test_vmlinux() else {
return;
};
if path.starts_with("/sys/") {
return;
}
let _env = crate::test_support::test_helpers::lock_env();
let cache_a = tempfile::TempDir::new().expect("cache_a tempdir");
let cache_b = tempfile::TempDir::new().expect("cache_b tempdir");
let entry_a = cache_a.path().join("kentry");
std::fs::create_dir_all(&entry_a).expect("create cache_a entry");
let vmlinux = entry_a.join("vmlinux");
std::fs::copy(&path, &vmlinux).expect("copy vmlinux into cache_a");
let sidecar = btf_sidecar_path(&vmlinux);
{
let _g = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_a.path(),
);
assert!(
!sidecar.exists(),
"precondition: sidecar must not pre-exist"
);
let btf = load_btf_from_path(&vmlinux).expect("first load must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
sidecar.exists(),
"first load (KTSTR_CACHE_DIR=cache_a) must write sidecar",
);
std::fs::remove_file(&sidecar).expect("remove sidecar between calls");
}
{
let _g = crate::test_support::test_helpers::EnvVarGuard::set(
"KTSTR_CACHE_DIR",
cache_b.path(),
);
let btf = load_btf_from_path(&vmlinux).expect("second load must succeed");
let _ = format!("{:?}", btf.resolve_types_by_name("task_struct").is_ok());
assert!(
!sidecar.exists(),
"second load (KTSTR_CACHE_DIR=cache_b) must NOT write sidecar — \
the vmlinux is now outside the active cache root",
);
}
}
}