use std::path::Path;
use anyhow::{Context, Result, bail};
use btf_rs::{Btf, Type};
pub(crate) fn load_btf_from_path(path: &Path) -> Result<Btf> {
let data = std::fs::read(path).context("read file")?;
if data.len() >= 4 && data[0] == 0x9F && data[1] == 0xEB {
return Btf::from_bytes(&data).map_err(|e| anyhow::anyhow!("{e}"));
}
if let Ok(elf) = goblin::elf::Elf::parse(&data) {
let btf_shdr = elf
.section_headers
.iter()
.find(|shdr| elf.shdr_strtab.get_at(shdr.sh_name) == Some(".BTF"));
if let Some(shdr) = btf_shdr {
let offset = shdr.sh_offset as usize;
let size = shdr.sh_size as usize;
let btf_data = data
.get(offset..offset + size)
.context(".BTF section data out of bounds")?;
return Btf::from_bytes(btf_data).map_err(|e| anyhow::anyhow!("{e}"));
}
bail!("vmlinux ELF has no .BTF section");
}
Btf::from_file(path).map_err(|e| anyhow::anyhow!("{e}"))
}
#[derive(Debug, Clone)]
pub struct KernelOffsets {
pub rq_nr_running: usize,
pub rq_clock: usize,
pub rq_scx: usize,
pub scx_rq_nr_running: usize,
pub scx_rq_local_dsq: usize,
pub scx_rq_flags: usize,
pub dsq_nr: usize,
pub event_offsets: Option<ScxEventOffsets>,
pub schedstat_offsets: Option<SchedstatOffsets>,
pub sched_domain_offsets: Option<SchedDomainOffsets>,
}
#[derive(Debug, Clone)]
pub struct ScxEventOffsets {
pub scx_sched_pcpu_off: usize,
pub event_stats_off: usize,
pub ev_select_cpu_fallback: usize,
pub ev_dispatch_local_dsq_offline: usize,
pub ev_dispatch_keep_last: usize,
pub ev_enq_skip_exiting: usize,
pub ev_enq_skip_migration_disabled: usize,
}
impl KernelOffsets {
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
let (rq_struct, _) = find_struct(&btf, "rq")?;
let rq_nr_running = member_byte_offset(&btf, &rq_struct, "nr_running")?;
let rq_clock = member_byte_offset(&btf, &rq_struct, "clock")?;
let (rq_scx, scx_member) = member_byte_offset_with_member(&btf, &rq_struct, "scx")?;
let scx_rq_struct =
resolve_member_struct(&btf, &scx_member).context("btf: resolve type of rq.scx")?;
let scx_rq_nr_running = member_byte_offset(&btf, &scx_rq_struct, "nr_running")?;
let (scx_rq_local_dsq, local_dsq_member) =
member_byte_offset_with_member(&btf, &scx_rq_struct, "local_dsq")?;
let scx_rq_flags = member_byte_offset(&btf, &scx_rq_struct, "flags")?;
let dsq_struct = resolve_member_struct(&btf, &local_dsq_member)
.context("btf: resolve type of scx_rq.local_dsq")?;
let dsq_nr = member_byte_offset(&btf, &dsq_struct, "nr")?;
let event_offsets = resolve_event_offsets(&btf).ok();
let schedstat_offsets = resolve_schedstat_offsets(&btf).ok();
let sched_domain_offsets = resolve_sched_domain_offsets(&btf, &rq_struct).ok();
Ok(Self {
rq_nr_running,
rq_clock,
rq_scx,
scx_rq_nr_running,
scx_rq_local_dsq,
scx_rq_flags,
dsq_nr,
event_offsets,
schedstat_offsets,
sched_domain_offsets,
})
}
}
fn resolve_event_offsets(btf: &Btf) -> Result<ScxEventOffsets> {
let (scx_sched_struct, _) = find_struct(btf, "scx_sched")?;
let scx_sched_pcpu_off = member_byte_offset(btf, &scx_sched_struct, "pcpu")?;
let (pcpu_struct, _) = find_struct(btf, "scx_sched_pcpu")?;
let (event_stats_off, event_stats_member) =
member_byte_offset_with_member(btf, &pcpu_struct, "event_stats")?;
let event_stats_struct = resolve_member_struct(btf, &event_stats_member)
.context("btf: resolve type of scx_sched_pcpu.event_stats")?;
let ev_select_cpu_fallback =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_SELECT_CPU_FALLBACK")?;
let ev_dispatch_local_dsq_offline = member_byte_offset(
btf,
&event_stats_struct,
"SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
)?;
let ev_dispatch_keep_last =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_DISPATCH_KEEP_LAST")?;
let ev_enq_skip_exiting =
member_byte_offset(btf, &event_stats_struct, "SCX_EV_ENQ_SKIP_EXITING")?;
let ev_enq_skip_migration_disabled = member_byte_offset(
btf,
&event_stats_struct,
"SCX_EV_ENQ_SKIP_MIGRATION_DISABLED",
)?;
Ok(ScxEventOffsets {
scx_sched_pcpu_off,
event_stats_off,
ev_select_cpu_fallback,
ev_dispatch_local_dsq_offline,
ev_dispatch_keep_last,
ev_enq_skip_exiting,
ev_enq_skip_migration_disabled,
})
}
pub(crate) fn find_struct(btf: &Btf, name: &str) -> Result<(btf_rs::Struct, String)> {
let types = btf
.resolve_types_by_name(name)
.with_context(|| format!("btf: type '{name}' not found"))?;
for t in &types {
if let Type::Struct(s) = t {
let resolved_name = btf.resolve_name(s).unwrap_or_default();
return Ok((s.clone(), resolved_name));
}
}
bail!("btf: '{name}' exists but is not a struct");
}
pub(crate) fn member_byte_offset(btf: &Btf, s: &btf_rs::Struct, field: &str) -> Result<usize> {
member_byte_offset_recursive(btf, s, field, 0)
}
fn member_byte_offset_recursive(
btf: &Btf,
s: &btf_rs::Struct,
field: &str,
base_offset: usize,
) -> Result<usize> {
for member in &s.members {
let name = btf.resolve_name(member).unwrap_or_default();
let bits = member.bit_offset();
if bits % 8 != 0 {
if name == field {
bail!("btf: field '{field}' has non-byte-aligned offset ({bits} bits)");
}
continue;
}
let member_offset = base_offset + (bits / 8) as usize;
if name == field {
return Ok(member_offset);
}
if name.is_empty()
&& let Ok(inner) = resolve_member_composite(btf, member)
&& let Ok(offset) = member_byte_offset_recursive(btf, &inner, field, member_offset)
{
return Ok(offset);
}
}
bail!("btf: field '{field}' not found in struct");
}
fn resolve_member_composite(btf: &Btf, member: &btf_rs::Member) -> Result<btf_rs::Struct> {
let mut t = btf.resolve_chained_type(member)?;
for _ in 0..20 {
match t {
Type::Struct(s) | Type::Union(s) => return Ok(s),
Type::Const(_)
| Type::Volatile(_)
| Type::Typedef(_)
| Type::Restrict(_)
| Type::TypeTag(_) => {
t = btf.resolve_chained_type(t.as_btf_type().unwrap())?;
}
_ => bail!("btf: not a composite type"),
}
}
bail!("btf: type chain too deep")
}
fn member_byte_offset_with_member(
btf: &Btf,
s: &btf_rs::Struct,
field: &str,
) -> Result<(usize, btf_rs::Member)> {
for member in &s.members {
let name = btf.resolve_name(member).unwrap_or_default();
if name == field {
let bits = member.bit_offset();
if bits % 8 != 0 {
bail!("btf: field '{field}' has non-byte-aligned offset ({bits} bits)");
}
return Ok(((bits / 8) as usize, member.clone()));
}
}
bail!("btf: field '{field}' not found in struct");
}
fn resolve_member_struct(btf: &Btf, member: &btf_rs::Member) -> Result<btf_rs::Struct> {
use btf_rs::BtfType;
let tid = member.get_type_id().context("btf: member type_id")?;
super::bpf_map::resolve_to_struct(btf, tid).context("btf: could not resolve member to struct")
}
#[derive(Debug, Clone)]
pub struct SchedstatOffsets {
pub rq_sched_info: usize,
pub sched_info_run_delay: usize,
pub sched_info_pcount: usize,
pub rq_yld_count: usize,
pub rq_sched_count: usize,
pub rq_sched_goidle: usize,
pub rq_ttwu_count: usize,
pub rq_ttwu_local: usize,
}
fn resolve_schedstat_offsets(btf: &Btf) -> Result<SchedstatOffsets> {
let (rq_struct, _) = find_struct(btf, "rq")?;
let (rq_sched_info, sched_info_member) =
member_byte_offset_with_member(btf, &rq_struct, "rq_sched_info")?;
let sched_info_struct = resolve_member_struct(btf, &sched_info_member)
.context("btf: resolve type of rq.rq_sched_info")?;
let sched_info_run_delay = member_byte_offset(btf, &sched_info_struct, "run_delay")?;
let sched_info_pcount = member_byte_offset(btf, &sched_info_struct, "pcount")?;
let rq_yld_count = member_byte_offset(btf, &rq_struct, "yld_count")?;
let rq_sched_count = member_byte_offset(btf, &rq_struct, "sched_count")?;
let rq_sched_goidle = member_byte_offset(btf, &rq_struct, "sched_goidle")?;
let rq_ttwu_count = member_byte_offset(btf, &rq_struct, "ttwu_count")?;
let rq_ttwu_local = member_byte_offset(btf, &rq_struct, "ttwu_local")?;
Ok(SchedstatOffsets {
rq_sched_info,
sched_info_run_delay,
sched_info_pcount,
rq_yld_count,
rq_sched_count,
rq_sched_goidle,
rq_ttwu_count,
rq_ttwu_local,
})
}
#[derive(Debug, Clone)]
pub struct SchedDomainOffsets {
pub rq_sd: usize,
pub sd_parent: usize,
pub sd_level: usize,
pub sd_name: usize,
pub sd_flags: usize,
pub sd_span_weight: usize,
pub sd_balance_interval: usize,
pub sd_nr_balance_failed: usize,
pub sd_newidle_call: usize,
pub sd_newidle_success: usize,
pub sd_newidle_ratio: usize,
pub sd_max_newidle_lb_cost: usize,
pub stats_offsets: Option<SchedDomainStatsOffsets>,
}
#[derive(Debug, Clone)]
pub struct SchedDomainStatsOffsets {
pub sd_lb_count: usize,
pub sd_lb_failed: usize,
pub sd_lb_balanced: usize,
pub sd_lb_imbalance_load: usize,
pub sd_lb_imbalance_util: usize,
pub sd_lb_imbalance_task: usize,
pub sd_lb_imbalance_misfit: usize,
pub sd_lb_gained: usize,
pub sd_lb_hot_gained: usize,
pub sd_lb_nobusyg: usize,
pub sd_lb_nobusyq: usize,
pub sd_alb_count: usize,
pub sd_alb_failed: usize,
pub sd_alb_pushed: usize,
pub sd_sbe_count: usize,
pub sd_sbe_balanced: usize,
pub sd_sbe_pushed: usize,
pub sd_sbf_count: usize,
pub sd_sbf_balanced: usize,
pub sd_sbf_pushed: usize,
pub sd_ttwu_wake_remote: usize,
pub sd_ttwu_move_affine: usize,
pub sd_ttwu_move_balance: usize,
}
pub const CPU_MAX_IDLE_TYPES: usize = 3;
fn resolve_sched_domain_offsets(
btf: &Btf,
rq_struct: &btf_rs::Struct,
) -> Result<SchedDomainOffsets> {
let rq_sd = member_byte_offset(btf, rq_struct, "sd")?;
let (sd_struct, _) = find_struct(btf, "sched_domain")?;
let sd_parent = member_byte_offset(btf, &sd_struct, "parent")?;
let sd_level = member_byte_offset(btf, &sd_struct, "level")?;
let sd_name = member_byte_offset(btf, &sd_struct, "name")?;
let sd_flags = member_byte_offset(btf, &sd_struct, "flags")?;
let sd_span_weight = member_byte_offset(btf, &sd_struct, "span_weight")?;
let sd_balance_interval = member_byte_offset(btf, &sd_struct, "balance_interval")?;
let sd_nr_balance_failed = member_byte_offset(btf, &sd_struct, "nr_balance_failed")?;
let sd_newidle_call = member_byte_offset(btf, &sd_struct, "newidle_call")?;
let sd_newidle_success = member_byte_offset(btf, &sd_struct, "newidle_success")?;
let sd_newidle_ratio = member_byte_offset(btf, &sd_struct, "newidle_ratio")?;
let sd_max_newidle_lb_cost = member_byte_offset(btf, &sd_struct, "max_newidle_lb_cost")?;
let stats_offsets = resolve_sched_domain_stats_offsets(btf, &sd_struct).ok();
Ok(SchedDomainOffsets {
rq_sd,
sd_parent,
sd_level,
sd_name,
sd_flags,
sd_span_weight,
sd_balance_interval,
sd_nr_balance_failed,
sd_newidle_call,
sd_newidle_success,
sd_newidle_ratio,
sd_max_newidle_lb_cost,
stats_offsets,
})
}
fn resolve_sched_domain_stats_offsets(
btf: &Btf,
sd_struct: &btf_rs::Struct,
) -> Result<SchedDomainStatsOffsets> {
Ok(SchedDomainStatsOffsets {
sd_lb_count: member_byte_offset(btf, sd_struct, "lb_count")?,
sd_lb_failed: member_byte_offset(btf, sd_struct, "lb_failed")?,
sd_lb_balanced: member_byte_offset(btf, sd_struct, "lb_balanced")?,
sd_lb_imbalance_load: member_byte_offset(btf, sd_struct, "lb_imbalance_load")?,
sd_lb_imbalance_util: member_byte_offset(btf, sd_struct, "lb_imbalance_util")?,
sd_lb_imbalance_task: member_byte_offset(btf, sd_struct, "lb_imbalance_task")?,
sd_lb_imbalance_misfit: member_byte_offset(btf, sd_struct, "lb_imbalance_misfit")?,
sd_lb_gained: member_byte_offset(btf, sd_struct, "lb_gained")?,
sd_lb_hot_gained: member_byte_offset(btf, sd_struct, "lb_hot_gained")?,
sd_lb_nobusyg: member_byte_offset(btf, sd_struct, "lb_nobusyg")?,
sd_lb_nobusyq: member_byte_offset(btf, sd_struct, "lb_nobusyq")?,
sd_alb_count: member_byte_offset(btf, sd_struct, "alb_count")?,
sd_alb_failed: member_byte_offset(btf, sd_struct, "alb_failed")?,
sd_alb_pushed: member_byte_offset(btf, sd_struct, "alb_pushed")?,
sd_sbe_count: member_byte_offset(btf, sd_struct, "sbe_count")?,
sd_sbe_balanced: member_byte_offset(btf, sd_struct, "sbe_balanced")?,
sd_sbe_pushed: member_byte_offset(btf, sd_struct, "sbe_pushed")?,
sd_sbf_count: member_byte_offset(btf, sd_struct, "sbf_count")?,
sd_sbf_balanced: member_byte_offset(btf, sd_struct, "sbf_balanced")?,
sd_sbf_pushed: member_byte_offset(btf, sd_struct, "sbf_pushed")?,
sd_ttwu_wake_remote: member_byte_offset(btf, sd_struct, "ttwu_wake_remote")?,
sd_ttwu_move_affine: member_byte_offset(btf, sd_struct, "ttwu_move_affine")?,
sd_ttwu_move_balance: member_byte_offset(btf, sd_struct, "ttwu_move_balance")?,
})
}
#[derive(Debug, Clone)]
pub struct BpfMapOffsets {
pub map_name: usize,
pub map_type: usize,
pub map_flags: usize,
pub key_size: usize,
pub value_size: usize,
pub max_entries: usize,
pub array_value: usize,
pub xa_node_slots: usize,
pub xa_node_shift: usize,
pub idr_xa_head: usize,
pub idr_next: usize,
pub map_btf: usize,
pub map_btf_value_type_id: usize,
pub btf_data: usize,
pub btf_data_size: usize,
pub htab_offsets: Option<HtabOffsets>,
}
impl BpfMapOffsets {
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
Self::from_btf(&btf)
}
pub fn from_btf(btf: &Btf) -> Result<Self> {
let (bpf_map, _) = find_struct(btf, "bpf_map")?;
let map_name = member_byte_offset(btf, &bpf_map, "name")?;
let map_type = member_byte_offset(btf, &bpf_map, "map_type")?;
let map_flags = member_byte_offset(btf, &bpf_map, "map_flags")?;
let key_size = member_byte_offset(btf, &bpf_map, "key_size")?;
let value_size = member_byte_offset(btf, &bpf_map, "value_size")?;
let max_entries = member_byte_offset(btf, &bpf_map, "max_entries")?;
let (bpf_array, _) = find_struct(btf, "bpf_array")?;
let array_value = member_byte_offset(btf, &bpf_array, "value")?;
let (xa_node, _) = find_struct(btf, "xa_node")?;
let xa_node_slots = member_byte_offset(btf, &xa_node, "slots")?;
let xa_node_shift = member_byte_offset(btf, &xa_node, "shift")?;
let (idr_struct, _) = find_struct(btf, "idr")?;
let (idr_rt_off, idr_rt_member) =
member_byte_offset_with_member(btf, &idr_struct, "idr_rt")?;
let xa_struct = resolve_member_struct(btf, &idr_rt_member)
.context("btf: resolve type of idr.idr_rt")?;
let xa_head_off = member_byte_offset(btf, &xa_struct, "xa_head")?;
let idr_xa_head = idr_rt_off + xa_head_off;
let idr_next = member_byte_offset(btf, &idr_struct, "idr_next")?;
let map_btf = member_byte_offset(btf, &bpf_map, "btf")?;
let map_btf_value_type_id = member_byte_offset(btf, &bpf_map, "btf_value_type_id")?;
let (btf_struct, _) = find_struct(btf, "btf")?;
let btf_data = member_byte_offset(btf, &btf_struct, "data")?;
let btf_data_size = member_byte_offset(btf, &btf_struct, "data_size")?;
let htab_offsets = resolve_htab_offsets(btf).ok();
Ok(Self {
map_name,
map_type,
map_flags,
key_size,
value_size,
max_entries,
array_value,
xa_node_slots,
xa_node_shift,
idr_xa_head,
idr_next,
map_btf,
map_btf_value_type_id,
btf_data,
btf_data_size,
htab_offsets,
})
}
}
#[derive(Debug, Clone)]
pub struct HtabOffsets {
pub htab_buckets: usize,
pub htab_n_buckets: usize,
pub bucket_size: usize,
pub bucket_head: usize,
pub hlist_nulls_head_first: usize,
pub hlist_nulls_node_next: usize,
pub htab_elem_size_base: usize,
}
fn find_bucket_struct(btf: &Btf) -> Result<(btf_rs::Struct, usize)> {
let types = btf
.resolve_types_by_name("bucket")
.with_context(|| "btf: type 'bucket' not found")?;
for t in &types {
if let Type::Struct(s) = t
&& let Ok(head_off) = member_byte_offset(btf, s, "head")
{
return Ok((s.clone(), head_off));
}
}
bail!("btf: no 'bucket' struct with 'head' field found");
}
fn resolve_htab_offsets(btf: &Btf) -> Result<HtabOffsets> {
let (bpf_htab, _) = find_struct(btf, "bpf_htab")?;
let htab_buckets = member_byte_offset(btf, &bpf_htab, "buckets")?;
let htab_n_buckets = member_byte_offset(btf, &bpf_htab, "n_buckets")?;
let (bucket_struct, bucket_head) = find_bucket_struct(btf)?;
let bucket_size = bucket_struct.size();
let (hlist_nulls_head, _) = find_struct(btf, "hlist_nulls_head")?;
let hlist_nulls_head_first = member_byte_offset(btf, &hlist_nulls_head, "first")?;
let (hlist_nulls_node, _) = find_struct(btf, "hlist_nulls_node")?;
let hlist_nulls_node_next = member_byte_offset(btf, &hlist_nulls_node, "next")?;
let (htab_elem, _) = find_struct(btf, "htab_elem")?;
let htab_elem_size_base = htab_elem.size();
Ok(HtabOffsets {
htab_buckets,
htab_n_buckets,
bucket_size,
bucket_head,
hlist_nulls_head_first,
hlist_nulls_node_next,
htab_elem_size_base,
})
}
#[derive(Debug, Clone)]
pub struct BpfProgOffsets {
pub prog_type: usize,
pub prog_aux: usize,
pub aux_verified_insns: usize,
pub aux_name: usize,
pub xa_node_slots: usize,
pub xa_node_shift: usize,
pub idr_xa_head: usize,
pub idr_next: usize,
pub prog_stats: usize,
pub stats_cnt: usize,
pub stats_nsecs: usize,
}
impl BpfProgOffsets {
pub fn from_btf(btf: &Btf) -> Result<Self> {
let (bpf_prog, _) = find_struct(btf, "bpf_prog")?;
let prog_type = member_byte_offset(btf, &bpf_prog, "type")?;
let prog_aux = member_byte_offset(btf, &bpf_prog, "aux")?;
let (bpf_prog_aux, _) = find_struct(btf, "bpf_prog_aux")?;
let aux_verified_insns = member_byte_offset(btf, &bpf_prog_aux, "verified_insns")?;
let aux_name = member_byte_offset(btf, &bpf_prog_aux, "name")?;
let (xa_node, _) = find_struct(btf, "xa_node")?;
let xa_node_slots = member_byte_offset(btf, &xa_node, "slots")?;
let xa_node_shift = member_byte_offset(btf, &xa_node, "shift")?;
let (idr_struct, _) = find_struct(btf, "idr")?;
let (idr_rt_off, idr_rt_member) =
member_byte_offset_with_member(btf, &idr_struct, "idr_rt")?;
let xa_struct = resolve_member_struct(btf, &idr_rt_member)
.context("btf: resolve type of idr.idr_rt")?;
let xa_head_off = member_byte_offset(btf, &xa_struct, "xa_head")?;
let idr_xa_head = idr_rt_off + xa_head_off;
let idr_next = member_byte_offset(btf, &idr_struct, "idr_next")?;
let prog_stats = member_byte_offset(btf, &bpf_prog, "stats")?;
let (bpf_prog_stats, _) = find_struct(btf, "bpf_prog_stats")?;
let stats_cnt = member_byte_offset(btf, &bpf_prog_stats, "cnt")?;
let stats_nsecs = member_byte_offset(btf, &bpf_prog_stats, "nsecs")?;
Ok(Self {
prog_type,
prog_aux,
aux_verified_insns,
aux_name,
xa_node_slots,
xa_node_shift,
idr_xa_head,
idr_next,
prog_stats,
stats_cnt,
stats_nsecs,
})
}
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let btf =
load_btf_from_path(path).with_context(|| format!("btf: open {}", path.display()))?;
Self::from_btf(&btf)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_rq_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = KernelOffsets::from_vmlinux(&path).unwrap();
assert!(offsets.rq_nr_running > 0);
assert!(offsets.rq_clock > 0);
assert!(offsets.rq_scx > 0);
assert!(offsets.dsq_nr > 0);
}
#[test]
fn parse_event_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = KernelOffsets::from_vmlinux(&path).unwrap();
if let Some(ev) = &offsets.event_offsets {
let all = [
ev.ev_select_cpu_fallback,
ev.ev_dispatch_local_dsq_offline,
ev.ev_dispatch_keep_last,
ev.ev_enq_skip_exiting,
ev.ev_enq_skip_migration_disabled,
];
for i in 0..all.len() {
for j in (i + 1)..all.len() {
assert_ne!(all[i], all[j], "event counter offsets must be distinct");
}
}
}
}
#[test]
fn parse_schedstat_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = KernelOffsets::from_vmlinux(&path).unwrap();
if let Some(ss) = &offsets.schedstat_offsets {
assert!(ss.rq_sched_info > 0);
assert!(
ss.sched_info_run_delay > 0,
"run_delay must follow pcount in struct sched_info"
);
assert_ne!(
ss.sched_info_pcount, ss.sched_info_run_delay,
"pcount and run_delay offsets must be distinct"
);
let rq_fields = [
ss.rq_yld_count,
ss.rq_sched_count,
ss.rq_sched_goidle,
ss.rq_ttwu_count,
ss.rq_ttwu_local,
];
for &off in &rq_fields {
assert!(off > 0, "schedstat rq field offset must be nonzero");
}
for i in 0..rq_fields.len() {
for j in (i + 1)..rq_fields.len() {
assert_ne!(
rq_fields[i], rq_fields[j],
"schedstat rq field offsets must be distinct"
);
}
}
}
}
#[test]
fn parse_sched_domain_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = KernelOffsets::from_vmlinux(&path).unwrap();
if let Some(sd) = &offsets.sched_domain_offsets {
assert!(sd.rq_sd > 0, "rq.sd must be at nonzero offset");
assert_ne!(
sd.sd_level, sd.sd_parent,
"level and parent offsets must be distinct"
);
assert_ne!(
sd.sd_name, sd.sd_parent,
"name and parent offsets must be distinct"
);
let runtime_fields = [
sd.sd_balance_interval,
sd.sd_nr_balance_failed,
sd.sd_newidle_call,
sd.sd_newidle_success,
sd.sd_newidle_ratio,
sd.sd_max_newidle_lb_cost,
];
for &off in &runtime_fields {
assert!(off > 0, "sched_domain runtime field offset must be nonzero");
}
if let Some(so) = &sd.stats_offsets {
let array_fields = [
so.sd_lb_count,
so.sd_lb_failed,
so.sd_lb_balanced,
so.sd_lb_imbalance_load,
so.sd_lb_imbalance_util,
so.sd_lb_imbalance_task,
so.sd_lb_imbalance_misfit,
so.sd_lb_gained,
so.sd_lb_hot_gained,
so.sd_lb_nobusyg,
so.sd_lb_nobusyq,
];
for i in 0..array_fields.len() {
for j in (i + 1)..array_fields.len() {
assert_ne!(
array_fields[i], array_fields[j],
"sched_domain array field offsets must be distinct"
);
}
}
let scalar_fields = [
so.sd_alb_count,
so.sd_alb_failed,
so.sd_alb_pushed,
so.sd_ttwu_wake_remote,
so.sd_ttwu_move_affine,
so.sd_ttwu_move_balance,
];
for &off in &scalar_fields {
assert!(off > 0, "sched_domain scalar field offset must be nonzero");
}
for i in 0..scalar_fields.len() {
for j in (i + 1)..scalar_fields.len() {
assert_ne!(
scalar_fields[i], scalar_fields[j],
"sched_domain scalar field offsets must be distinct"
);
}
}
}
}
}
#[test]
fn parse_bpf_map_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = BpfMapOffsets::from_vmlinux(&path).unwrap();
assert!(offsets.map_name > 0);
assert!(offsets.map_type > 0);
assert!(offsets.value_size > 0);
assert!(offsets.array_value > 0);
assert!(offsets.map_btf > 0);
assert!(offsets.map_btf_value_type_id > 0);
assert!(offsets.btf_data_size > offsets.btf_data);
}
#[test]
fn parse_bpf_prog_offsets_from_vmlinux() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
let offsets = BpfProgOffsets::from_vmlinux(&path).unwrap();
assert!(offsets.prog_aux > 0);
assert!(offsets.aux_verified_insns > 0);
assert!(offsets.aux_name > 0);
}
#[test]
fn from_vmlinux_nonexistent() {
let path = std::path::Path::new("/nonexistent/vmlinux");
assert!(KernelOffsets::from_vmlinux(path).is_err());
}
#[test]
fn from_vmlinux_empty_file() {
let dir = std::env::temp_dir().join(format!("ktstr-btf-empty-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let f = dir.join("vmlinux");
std::fs::write(&f, b"").unwrap();
assert!(KernelOffsets::from_vmlinux(&f).is_err());
let _ = std::fs::remove_dir_all(&dir);
}
}