use crate::monitor::bpf_map::GuestMemMapAccessorOwned;
use crate::monitor::btf_offsets::ScxWalkerOffsets;
use crate::monitor::symbols::KernelSymbols;
pub(crate) struct ScxWalkerOwned {
pub(crate) rq_kvas: Vec<u64>,
pub(crate) rq_pas: Vec<u64>,
pub(crate) scx_root_kva: u64,
pub(crate) scx_tasks_kva: u64,
}
#[allow(dead_code)]
pub(crate) fn build(
owned_accessor: &GuestMemMapAccessorOwned,
offsets: Option<&ScxWalkerOffsets>,
symbols: Option<&KernelSymbols>,
per_cpu_offsets: Option<&[u64]>,
) -> Option<ScxWalkerOwned> {
let _offs = offsets?;
let syms = symbols?;
let page_offset = owned_accessor.guest_kernel().page_offset();
let scx_root_kva = syms.scx_root.unwrap_or(0);
let scx_tasks_kva = syms.scx_tasks.unwrap_or(0);
let pco = match per_cpu_offsets {
Some(pco) => pco,
None => {
tracing::debug!(
"capture_scx::build: per_cpu_offsets absent — degraded \
capture with no rq arrays; global scx_tasks walk and \
*scx_root read still active",
);
return Some(ScxWalkerOwned {
rq_kvas: Vec::new(),
rq_pas: Vec::new(),
scx_root_kva,
scx_tasks_kva,
});
}
};
Some(compute_owned(
page_offset,
syms.runqueues,
scx_root_kva,
scx_tasks_kva,
pco,
))
}
fn compute_owned(
page_offset: u64,
runqueues_off: u64,
scx_root_kva: u64,
scx_tasks_kva: u64,
per_cpu_offsets: &[u64],
) -> ScxWalkerOwned {
let n = per_cpu_offsets.len();
let mut rq_pas: Vec<u64> = Vec::with_capacity(n);
let mut rq_kvas: Vec<u64> = Vec::with_capacity(n);
for &offset in per_cpu_offsets {
let kva = runqueues_off.wrapping_add(offset);
let pa = crate::monitor::symbols::kva_to_pa(kva, page_offset);
rq_pas.push(pa);
rq_kvas.push(pa.wrapping_add(page_offset));
}
ScxWalkerOwned {
rq_kvas,
rq_pas,
scx_root_kva,
scx_tasks_kva,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::monitor::symbols::DEFAULT_PAGE_OFFSET;
#[test]
fn compute_owned_happy_path() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x20_0000;
let per_cpu = [0x10_0000u64, 0x14_0000u64, 0x18_0000u64];
let scx_root_kva = 0xffff_ffff_8230_0000;
let scx_tasks_kva = 0xffff_ffff_8240_0000;
let owned = compute_owned(
page_offset,
runqueues_off,
scx_root_kva,
scx_tasks_kva,
&per_cpu,
);
assert_eq!(owned.scx_root_kva, scx_root_kva);
assert_eq!(owned.scx_tasks_kva, scx_tasks_kva);
assert_eq!(owned.rq_kvas.len(), 3);
assert_eq!(owned.rq_pas.len(), 3);
let expected_pas =
crate::monitor::symbols::compute_rq_pas(runqueues_off, &per_cpu, page_offset);
assert_eq!(owned.rq_pas, expected_pas);
for (cpu, expected_pa) in expected_pas.iter().enumerate() {
assert_eq!(owned.rq_kvas[cpu], expected_pa.wrapping_add(page_offset),);
}
}
#[test]
fn compute_owned_partial_scx_root_zero() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x20_0000;
let per_cpu = [0x10_0000u64, 0x14_0000u64];
let owned = compute_owned(page_offset, runqueues_off, 0, 0, &per_cpu);
assert_eq!(owned.scx_root_kva, 0);
assert_eq!(owned.scx_tasks_kva, 0);
assert_eq!(owned.rq_kvas.len(), 2);
assert_eq!(owned.rq_pas.len(), 2);
let expected_pas =
crate::monitor::symbols::compute_rq_pas(runqueues_off, &per_cpu, page_offset);
assert_eq!(owned.rq_pas, expected_pas);
}
#[test]
fn compute_owned_partial_scx_tasks_zero() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x20_0000;
let per_cpu = [0x10_0000u64];
let scx_root_kva = 0xffff_ffff_8230_0000;
let owned = compute_owned(page_offset, runqueues_off, scx_root_kva, 0, &per_cpu);
assert_eq!(owned.scx_root_kva, scx_root_kva);
assert_eq!(owned.scx_tasks_kva, 0);
assert_eq!(owned.rq_kvas.len(), 1);
assert_eq!(owned.rq_pas.len(), 1);
}
#[test]
fn degraded_build_shape_empty_rq_with_symbol_kvas() {
let scx_root_kva = 0xffff_ffff_8230_0000;
let scx_tasks_kva = 0xffff_ffff_8240_0000;
let owned = ScxWalkerOwned {
rq_kvas: Vec::new(),
rq_pas: Vec::new(),
scx_root_kva,
scx_tasks_kva,
};
assert!(owned.rq_kvas.is_empty());
assert!(owned.rq_pas.is_empty());
assert_eq!(owned.scx_root_kva, scx_root_kva);
assert_eq!(owned.scx_tasks_kva, scx_tasks_kva);
let zipped: Vec<_> = owned.rq_kvas.iter().zip(owned.rq_pas.iter()).collect();
assert!(zipped.is_empty());
}
#[test]
fn degraded_build_scx_tasks_kva_independent_of_rq_arrays() {
let scx_tasks_kva = 0xffff_ffff_82e5_e840;
let owned = ScxWalkerOwned {
rq_kvas: Vec::new(),
rq_pas: Vec::new(),
scx_root_kva: 0,
scx_tasks_kva,
};
assert_eq!(owned.scx_tasks_kva, scx_tasks_kva);
assert_eq!(owned.scx_root_kva, 0);
}
#[test]
fn compute_owned_empty_per_cpu_offsets() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x20_0000;
let scx_root_kva = 0xffff_ffff_8230_0000;
let scx_tasks_kva = 0xffff_ffff_8240_0000;
let owned = compute_owned(page_offset, runqueues_off, scx_root_kva, scx_tasks_kva, &[]);
assert!(owned.rq_kvas.is_empty());
assert!(owned.rq_pas.is_empty());
assert_eq!(owned.scx_root_kva, scx_root_kva);
assert_eq!(owned.scx_tasks_kva, scx_tasks_kva);
}
#[test]
fn compute_owned_wrapping_arithmetic() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x1000;
let per_cpu = [page_offset.wrapping_sub(runqueues_off)];
let owned = compute_owned(page_offset, runqueues_off, 0, 0, &per_cpu);
assert_eq!(owned.rq_pas, vec![0u64]);
assert_eq!(owned.rq_kvas, vec![page_offset]);
}
#[test]
fn compute_owned_kva_pa_pairwise_consistent() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues_off: u64 = 0x4_0000;
let per_cpu = [
0x10_0000u64,
0x14_0000u64,
0x18_0000u64,
0x1c_0000u64,
0x20_0000u64,
];
let owned = compute_owned(
page_offset,
runqueues_off,
0xffff_ffff_8000_0000,
0xffff_ffff_8001_0000,
&per_cpu,
);
assert_eq!(owned.rq_kvas.len(), per_cpu.len());
assert_eq!(owned.rq_pas.len(), per_cpu.len());
for cpu in 0..per_cpu.len() {
assert_eq!(
owned.rq_kvas[cpu],
owned.rq_pas[cpu].wrapping_add(page_offset),
"kva/pa pair mismatch on cpu {cpu}",
);
}
}
}