use crate::monitor::bpf_map::GuestMemMapAccessorOwned;
use crate::monitor::btf_offsets::{ScxWalkerOffsets, TaskEnrichmentOffsets};
use crate::monitor::dump::TaskWalkerEntry;
use crate::monitor::idr::translate_any_kva;
use crate::monitor::reader::GuestMem;
use crate::monitor::task_enrichment::{LockSlowpathRegistry, SchedClassRegistry};
use crate::vmm::exit_dispatch::VcpuRegSnapshot;
use super::capture_scx::ScxWalkerOwned;
pub(crate) struct TaskEnrichmentOwned {
pub(crate) tasks: Vec<TaskWalkerEntry>,
pub(crate) sched_classes: SchedClassRegistry,
pub(crate) lock_slowpaths: LockSlowpathRegistry,
}
const MAX_NODES_PER_LIST: u32 = 4096;
pub(crate) fn build(
owned_accessor: &GuestMemMapAccessorOwned,
scx_owned: Option<&ScxWalkerOwned>,
scx_walker_offsets: Option<&ScxWalkerOffsets>,
offsets: Option<&TaskEnrichmentOffsets>,
vcpu_regs: &[Option<VcpuRegSnapshot>],
) -> Option<TaskEnrichmentOwned> {
let scx_owned = scx_owned?;
let scx_offs = scx_walker_offsets?;
let _ = offsets?;
let rq_offs = scx_offs.rq.as_ref()?;
let scx_rq_offs = scx_offs.scx_rq.as_ref()?;
let task_offs = scx_offs.task.as_ref()?;
let see_offs = scx_offs.see.as_ref()?;
let kernel = owned_accessor.guest_kernel();
let mem = kernel.mem();
let walk = kernel.walk_context();
let tasks_node_off_in_task = task_offs.scx.checked_add(see_offs.tasks_node)?;
let runnable_node_off_in_task = task_offs.scx.checked_add(see_offs.runnable_node)?;
let runnable_list_off = rq_offs.scx.checked_add(scx_rq_offs.runnable_list)?;
let mut all_task_kvas: Vec<u64> = crate::monitor::scx_walker::walk_scx_tasks_global(
kernel,
scx_owned.scx_tasks_kva,
tasks_node_off_in_task,
see_offs.tasks_node,
see_offs.flags,
);
let mut all_task_kvas_set: std::collections::HashSet<u64> =
all_task_kvas.iter().copied().collect();
let mut runnable_on_cpu: std::collections::HashMap<u64, Option<u64>> =
std::collections::HashMap::with_capacity(all_task_kvas.len());
for (cpu, (&rq_kva, &rq_pa)) in scx_owned
.rq_kvas
.iter()
.zip(scx_owned.rq_pas.iter())
.enumerate()
{
let curr_kva = mem.read_u64(rq_pa, rq_offs.curr);
let vcpu_pc = vcpu_regs
.get(cpu)
.and_then(|reg| reg.as_ref())
.map(|reg| reg.instruction_pointer);
let Some(head_kva) = rq_kva.checked_add(runnable_list_off as u64) else {
continue;
};
let Some(head_pa) = rq_pa.checked_add(runnable_list_off as u64) else {
continue;
};
let task_kvas = walk_runnable_list(mem, walk, head_kva, head_pa, runnable_node_off_in_task);
for task_kva in task_kvas {
let running_pc = if task_kva == curr_kva && curr_kva != 0 {
vcpu_pc
} else {
None
};
runnable_on_cpu
.entry(task_kva)
.and_modify(|e| {
if e.is_none() {
*e = running_pc;
}
})
.or_insert(running_pc);
if all_task_kvas_set.insert(task_kva) {
all_task_kvas.push(task_kva);
}
}
}
let tasks: Vec<TaskWalkerEntry> = all_task_kvas
.into_iter()
.map(|task_kva| match runnable_on_cpu.get(&task_kva) {
Some(running_pc) => TaskWalkerEntry {
task_kva,
is_runnable_in_scx: true,
running_pc: *running_pc,
},
None => TaskWalkerEntry {
task_kva,
is_runnable_in_scx: false,
running_pc: None,
},
})
.collect();
let sched_classes = SchedClassRegistry::from_guest_kernel(kernel);
let lock_slowpaths = LockSlowpathRegistry::from_guest_kernel(kernel);
Some(TaskEnrichmentOwned {
tasks,
sched_classes,
lock_slowpaths,
})
}
fn walk_runnable_list(
mem: &GuestMem,
walk: crate::monitor::reader::WalkContext,
head_kva: u64,
head_pa: u64,
runnable_node_off_in_task: usize,
) -> Vec<u64> {
let mut task_kvas: Vec<u64> = Vec::new();
let mut node_kva = mem.read_u64(head_pa, 0);
if node_kva == 0 {
return task_kvas;
}
let mut visited: u32 = 0;
while node_kva != head_kva {
if visited >= MAX_NODES_PER_LIST {
return task_kvas;
}
visited += 1;
let task_kva = node_kva.wrapping_sub(runnable_node_off_in_task as u64);
task_kvas.push(task_kva);
let Some(node_pa) = translate_any_kva(
mem,
walk.cr3_pa,
walk.page_offset,
node_kva,
walk.l5,
walk.tcr_el1,
) else {
return task_kvas;
};
let next_kva = mem.read_u64(node_pa, 0);
if next_kva == 0 {
return task_kvas;
}
node_kva = next_kva;
}
task_kvas
}
#[cfg(test)]
mod tests {
use super::*;
use crate::monitor::btf_offsets::{
RqStructOffsets, SchedExtEntityOffsets, ScxRqOffsets, ScxWalkerOffsets,
TaskStructCoreOffsets,
};
use crate::monitor::guest::GuestKernel;
use crate::monitor::reader::GuestMem;
use std::collections::HashMap;
const TEST_PAGE_OFFSET: u64 = 0;
fn make_kernel<'a>(mem: &'a GuestMem) -> GuestKernel<'a> {
GuestKernel::new_for_test(mem, HashMap::new(), TEST_PAGE_OFFSET, 0, false)
}
fn test_scx_offsets() -> ScxWalkerOffsets {
ScxWalkerOffsets {
rq: Some(RqStructOffsets { scx: 0, curr: 8 }),
scx_rq: Some(ScxRqOffsets {
local_dsq: 0,
runnable_list: 0,
nr_running: 96,
flags: 100,
cpu_released: 104,
ops_qseq: 112,
kick_sync: Some(120),
nr_immed: Some(128),
clock: Some(136),
}),
task: Some(TaskStructCoreOffsets {
comm: 100,
pid: 200,
scx: 0,
}),
see: Some(SchedExtEntityOffsets {
runnable_node: 0,
runnable_at: 16,
weight: 24,
slice: 32,
dsq_vtime: 40,
dsq: 48,
dsq_list: 56,
flags: 72,
dsq_flags: 76,
sticky_cpu: 80,
holding_cpu: 84,
tasks_node: 88,
}),
dsq_lnode: None,
dsq: None,
sched: None,
sched_pnode: None,
sched_pcpu: None,
rht: None,
}
}
#[test]
fn walk_runnable_list_basic_two_tasks() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
let n2 = 0x300usize;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(n2 as u64).to_le_bytes());
buf[n2..n2 + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert_eq!(kvas, vec![n1 as u64, n2 as u64]);
}
#[test]
fn walk_runnable_list_empty() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
buf[head..head + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert!(kvas.is_empty());
}
#[test]
fn walk_runnable_list_null_next_bails() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
buf[head..head + 8].copy_from_slice(&0u64.to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert!(kvas.is_empty());
}
#[test]
fn walk_runnable_list_container_of_subtraction() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
let off = 0x40usize;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
off,
);
assert_eq!(kvas, vec![(n1 - off) as u64]);
}
#[test]
fn walk_runnable_list_truncates_at_cap() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
let n2 = 0x300usize;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(n2 as u64).to_le_bytes());
buf[n2..n2 + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert_eq!(kvas.len() as u32, MAX_NODES_PER_LIST);
}
#[test]
fn walk_runnable_list_unmapped_memory_terminates_walk() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
let garbage_next: u64 = 0xffff_ffff_ffff_0000;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&garbage_next.to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert_eq!(kvas, vec![n1 as u64, garbage_next]);
}
#[test]
fn multi_cpu_walk_concatenates_tasks() {
let mut buf = vec![0u8; 0x2000];
let cpu0_head = 0x100usize;
let cpu0_n1 = 0x200usize;
let cpu1_head = 0x800usize;
let cpu1_n1 = 0x900usize;
buf[cpu0_head..cpu0_head + 8].copy_from_slice(&(cpu0_n1 as u64).to_le_bytes());
buf[cpu0_n1..cpu0_n1 + 8].copy_from_slice(&(cpu0_head as u64).to_le_bytes());
buf[cpu1_head..cpu1_head + 8].copy_from_slice(&(cpu1_n1 as u64).to_le_bytes());
buf[cpu1_n1..cpu1_n1 + 8].copy_from_slice(&(cpu1_head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kvas0 = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
cpu0_head as u64,
cpu0_head as u64,
0,
);
let kvas1 = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
cpu1_head as u64,
cpu1_head as u64,
0,
);
let mut combined: Vec<u64> = Vec::new();
combined.extend(kvas0);
combined.extend(kvas1);
assert_eq!(combined, vec![cpu0_n1 as u64, cpu1_n1 as u64]);
}
#[test]
fn scx_offsets_with_optional_fields_none_still_walks() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let offsets = ScxWalkerOffsets {
rq: Some(RqStructOffsets { scx: 0, curr: 8 }),
scx_rq: Some(ScxRqOffsets {
local_dsq: 0,
runnable_list: 0,
nr_running: 96,
flags: 100,
cpu_released: 104,
ops_qseq: 112,
kick_sync: None,
nr_immed: None,
clock: None,
}),
task: Some(TaskStructCoreOffsets {
comm: 100,
pid: 200,
scx: 0,
}),
see: Some(SchedExtEntityOffsets {
runnable_node: 0,
runnable_at: 16,
weight: 24,
slice: 32,
dsq_vtime: 40,
dsq: 48,
dsq_list: 56,
flags: 72,
dsq_flags: 76,
sticky_cpu: 80,
holding_cpu: 84,
tasks_node: 88,
}),
dsq_lnode: None,
dsq: None,
sched: None,
sched_pnode: None,
sched_pcpu: None,
rht: None,
};
let runnable_list_off =
offsets.rq.as_ref().unwrap().scx + offsets.scx_rq.as_ref().unwrap().runnable_list;
let runnable_node_off_in_task =
offsets.task.as_ref().unwrap().scx + offsets.see.as_ref().unwrap().runnable_node;
assert_eq!(runnable_list_off, 0);
let kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
runnable_node_off_in_task,
);
assert_eq!(kvas, vec![n1 as u64]);
}
#[test]
fn missing_required_subgroup_gates_to_none() {
let offsets = ScxWalkerOffsets {
rq: Some(RqStructOffsets { scx: 0, curr: 8 }),
scx_rq: Some(ScxRqOffsets {
local_dsq: 0,
runnable_list: 0,
nr_running: 96,
flags: 100,
cpu_released: 104,
ops_qseq: 112,
kick_sync: None,
nr_immed: None,
clock: None,
}),
task: Some(TaskStructCoreOffsets {
comm: 100,
pid: 200,
scx: 0,
}),
see: None, dsq_lnode: None,
dsq: None,
sched: None,
sched_pnode: None,
sched_pcpu: None,
rht: None,
};
assert!(offsets.see.is_none());
}
#[test]
fn sched_class_registry_empty_symbols_yields_none_slots() {
let mut buf = vec![0u8; 0x1000];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let r = SchedClassRegistry::from_guest_kernel(&kernel);
assert!(r.fair.is_none());
assert!(r.rt.is_none());
assert!(r.dl.is_none());
assert!(r.idle.is_none());
assert!(r.stop.is_none());
assert!(r.ext.is_none());
assert!(r.decode(0xffff_ffff_8000_1000).is_none());
}
#[test]
fn lock_slowpath_registry_empty_symbols_yields_none_slots() {
let mut buf = vec![0u8; 0x1000];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let r = LockSlowpathRegistry::from_guest_kernel(&kernel);
assert!(r.queued_spin_lock_slowpath.is_none());
assert!(r.mutex_lock_slowpath.is_none());
assert!(r.rwsem_down_read_slowpath.is_none());
assert!(r.rwsem_down_write_slowpath.is_none());
assert!(r.match_pc(0xdeadbeef).is_none());
}
#[test]
fn sched_class_registry_populated_symbols_decode_known() {
let fair_kva: u64 = 0xffff_ffff_8000_1000;
let mut symbols = HashMap::new();
symbols.insert("fair_sched_class".to_string(), fair_kva);
symbols.insert("ext_sched_class".to_string(), 0xffff_ffff_8000_1300);
let mut buf = vec![0u8; 0x1000];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = GuestKernel::new_for_test(&mem, symbols, TEST_PAGE_OFFSET, 0, false);
let r = SchedClassRegistry::from_guest_kernel(&kernel);
assert_eq!(r.fair, Some(fair_kva));
assert_eq!(r.ext, Some(0xffff_ffff_8000_1300));
assert_eq!(r.rt, None);
assert_eq!(r.decode(fair_kva), Some("fair"));
assert_eq!(r.decode(0xffff_ffff_8000_1300), Some("ext"));
}
#[test]
fn offset_arithmetic_stable() {
let off = test_scx_offsets();
let rq = off.rq.as_ref().unwrap();
let scx_rq = off.scx_rq.as_ref().unwrap();
let task = off.task.as_ref().unwrap();
let see = off.see.as_ref().unwrap();
assert_eq!(rq.scx + scx_rq.runnable_list, 0);
assert_eq!(task.scx + see.runnable_node, 0);
assert_eq!(task.scx + see.tasks_node, 88);
assert_eq!(rq.curr, 8);
}
#[test]
fn merge_global_walk_and_runnable_list_flags_correctly() {
let t1: u64 = 0x1000;
let t2: u64 = 0x2000;
let global = vec![t1, t2];
let runnable_per_cpu: Vec<(u64, u64)> = vec![(t1, 0xdead_beef)];
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 2);
let e1 = entries.iter().find(|e| e.task_kva == t1).unwrap();
assert!(e1.is_runnable_in_scx);
assert_eq!(e1.running_pc, Some(0xdead_beef));
let e2 = entries.iter().find(|e| e.task_kva == t2).unwrap();
assert!(!e2.is_runnable_in_scx);
assert_eq!(e2.running_pc, None);
}
#[test]
fn merge_includes_runnable_only_task() {
let t1: u64 = 0x1000;
let global: Vec<u64> = vec![]; let runnable_per_cpu: Vec<(u64, u64)> = vec![(t1, 0)];
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].task_kva, t1);
assert!(entries[0].is_runnable_in_scx);
}
#[test]
fn merge_both_empty_yields_empty() {
let entries = merge_global_and_runnable(&[], &[]);
assert!(entries.is_empty());
}
#[test]
fn merge_promotes_running_pc_across_cpus() {
let t: u64 = 0x1000;
let global = vec![t];
let runnable_per_cpu: Vec<(u64, u64)> = vec![(t, 0), (t, 0xc0de)];
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 1);
assert!(entries[0].is_runnable_in_scx);
assert_eq!(entries[0].running_pc, Some(0xc0de));
}
fn merge_global_and_runnable(
global: &[u64],
runnable_per_cpu: &[(u64, u64)],
) -> Vec<TaskWalkerEntry> {
let mut runnable_on_cpu: HashMap<u64, Option<u64>> = HashMap::with_capacity(global.len());
for &(task_kva, pc) in runnable_per_cpu {
let pc_opt = if pc == 0 { None } else { Some(pc) };
runnable_on_cpu
.entry(task_kva)
.and_modify(|e| {
if e.is_none() {
*e = pc_opt;
}
})
.or_insert(pc_opt);
}
let mut all: Vec<u64> = global.to_vec();
let mut all_set: std::collections::HashSet<u64> = all.iter().copied().collect();
for &(task_kva, _) in runnable_per_cpu {
if all_set.insert(task_kva) {
all.push(task_kva);
}
}
all.into_iter()
.map(|task_kva| match runnable_on_cpu.get(&task_kva) {
Some(running_pc) => TaskWalkerEntry {
task_kva,
is_runnable_in_scx: true,
running_pc: *running_pc,
},
None => TaskWalkerEntry {
task_kva,
is_runnable_in_scx: false,
running_pc: None,
},
})
.collect()
}
#[test]
fn build_uses_global_scx_tasks_as_primary_source() {
let head_kva = crate::monitor::symbols::START_KERNEL_MAP + 0x100;
let head_pa = 0x100usize;
let t1_node_kva: u64 = 0x800;
let t2_node_kva: u64 = 0xa00;
let t3_node_kva: u64 = 0xc00;
let tasks_node_off_in_task: usize = 0x40;
let tasks_node_off_in_see: usize = 0x60;
let flags_off_in_see: usize = 0x44;
let mut buf = vec![0u8; 0x2000];
buf[head_pa..head_pa + 8].copy_from_slice(&t1_node_kva.to_le_bytes());
buf[t1_node_kva as usize..t1_node_kva as usize + 8]
.copy_from_slice(&t2_node_kva.to_le_bytes());
buf[t2_node_kva as usize..t2_node_kva as usize + 8]
.copy_from_slice(&t3_node_kva.to_le_bytes());
buf[t3_node_kva as usize..t3_node_kva as usize + 8]
.copy_from_slice(&head_kva.to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let global = crate::monitor::scx_walker::walk_scx_tasks_global(
&kernel,
head_kva,
tasks_node_off_in_task,
tasks_node_off_in_see,
flags_off_in_see,
);
assert_eq!(
global.len(),
3,
"global walk must surface all 3 tasks regardless of runnable_list state"
);
let t1_kva = t1_node_kva.wrapping_sub(tasks_node_off_in_task as u64);
let runnable_per_cpu: Vec<(u64, u64)> = vec![(t1_kva, 0xdead_beef)];
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 3, "merge must preserve all 3 global tasks");
let e1 = entries.iter().find(|e| e.task_kva == t1_kva).unwrap();
assert!(e1.is_runnable_in_scx, "T1 must be flagged runnable");
assert_eq!(e1.running_pc, Some(0xdead_beef));
let t2_kva = t2_node_kva.wrapping_sub(tasks_node_off_in_task as u64);
let t3_kva = t3_node_kva.wrapping_sub(tasks_node_off_in_task as u64);
let e2 = entries.iter().find(|e| e.task_kva == t2_kva).unwrap();
let e3 = entries.iter().find(|e| e.task_kva == t3_kva).unwrap();
assert!(!e2.is_runnable_in_scx, "T2 absent from runnable_list");
assert!(!e3.is_runnable_in_scx, "T3 absent from runnable_list");
assert_eq!(e2.running_pc, None);
assert_eq!(e3.running_pc, None);
}
#[test]
fn build_falls_back_to_runnable_list_when_scx_tasks_kva_zero() {
let mut buf = vec![0u8; 0x1000];
buf[0..8].copy_from_slice(&0xdead_u64.to_le_bytes());
let head = 0x100usize;
let n1 = 0x200usize;
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let global =
crate::monitor::scx_walker::walk_scx_tasks_global(&kernel, 0, 0x40, 0x60, 0x44);
assert!(
global.is_empty(),
"scx_tasks_kva=0 must NOT produce phantom entries"
);
let runnable_kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert_eq!(runnable_kvas, vec![n1 as u64]);
let runnable_per_cpu: Vec<(u64, u64)> =
runnable_kvas.iter().map(|&k| (k, 0xfeedu64)).collect();
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].task_kva, n1 as u64);
assert!(entries[0].is_runnable_in_scx);
assert_eq!(entries[0].running_pc, Some(0xfeed));
}
#[test]
fn build_skips_cursor_entries_via_global_walk() {
let head_kva = crate::monitor::symbols::START_KERNEL_MAP + 0x100;
let head_pa = 0x100usize;
let t1_node_kva: u64 = 0x800;
let cursor_node_kva: u64 = 0xa00;
let t2_node_kva: u64 = 0xc00;
let tasks_node_off_in_task: usize = 0x40;
let tasks_node_off_in_see: usize = 0x60;
let flags_off_in_see: usize = 0x44;
let mut buf = vec![0u8; 0x1000];
buf[head_pa..head_pa + 8].copy_from_slice(&t1_node_kva.to_le_bytes());
buf[t1_node_kva as usize..t1_node_kva as usize + 8]
.copy_from_slice(&cursor_node_kva.to_le_bytes());
buf[cursor_node_kva as usize..cursor_node_kva as usize + 8]
.copy_from_slice(&t2_node_kva.to_le_bytes());
buf[t2_node_kva as usize..t2_node_kva as usize + 8]
.copy_from_slice(&head_kva.to_le_bytes());
let cursor_see_kva = cursor_node_kva.wrapping_sub(tasks_node_off_in_see as u64);
let cursor_flags_pa = (cursor_see_kva as usize).wrapping_add(flags_off_in_see);
let cursor_flags: u32 = 1 << 31;
buf[cursor_flags_pa..cursor_flags_pa + 4].copy_from_slice(&cursor_flags.to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let global = crate::monitor::scx_walker::walk_scx_tasks_global(
&kernel,
head_kva,
tasks_node_off_in_task,
tasks_node_off_in_see,
flags_off_in_see,
);
assert_eq!(
global.len(),
2,
"cursor entry must be filtered before reaching the merge"
);
let entries = merge_global_and_runnable(&global, &[]);
assert_eq!(entries.len(), 2);
let cursor_task_kva = cursor_node_kva.wrapping_sub(tasks_node_off_in_task as u64);
assert!(
!entries.iter().any(|e| e.task_kva == cursor_task_kva),
"cursor's container_of result must NOT appear in merged entries"
);
}
#[test]
fn build_with_nonzero_task_scx_offset() {
let head_kva = crate::monitor::symbols::START_KERNEL_MAP + 0x100;
let head_pa = 0x100usize;
let t1_node_kva: u64 = 0x800;
let task_scx_off: usize = 0x300;
let tasks_node_off_in_see: usize = 0x60;
let tasks_node_off_in_task: usize = task_scx_off + tasks_node_off_in_see;
let flags_off_in_see: usize = 0x44;
let mut buf = vec![0u8; 0x1000];
buf[head_pa..head_pa + 8].copy_from_slice(&t1_node_kva.to_le_bytes());
buf[t1_node_kva as usize..t1_node_kva as usize + 8]
.copy_from_slice(&head_kva.to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let global = crate::monitor::scx_walker::walk_scx_tasks_global(
&kernel,
head_kva,
tasks_node_off_in_task,
tasks_node_off_in_see,
flags_off_in_see,
);
let expected_task_kva = t1_node_kva.wrapping_sub(tasks_node_off_in_task as u64);
assert_eq!(global.len(), 1);
assert_eq!(
global[0], expected_task_kva,
"container_of must subtract task.scx + see.tasks_node, not just see.tasks_node"
);
let buggy_value = t1_node_kva.wrapping_sub(tasks_node_off_in_see as u64);
assert_ne!(
global[0], buggy_value,
"regression guard: recovered task_kva must NOT match the see-only-arithmetic result"
);
}
#[test]
fn build_runnable_only_task_added_to_global() {
let mut buf = vec![0u8; 0x1000];
let head = 0x100usize;
let n1 = 0x200usize;
let global_head_kva = crate::monitor::symbols::START_KERNEL_MAP + 0x500;
let global_head_pa = 0x500usize;
buf[global_head_pa..global_head_pa + 8].copy_from_slice(&global_head_kva.to_le_bytes());
buf[head..head + 8].copy_from_slice(&(n1 as u64).to_le_bytes());
buf[n1..n1 + 8].copy_from_slice(&(head as u64).to_le_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let kernel = make_kernel(&mem);
let global = crate::monitor::scx_walker::walk_scx_tasks_global(
&kernel,
global_head_kva,
0x40,
0x60,
0x44,
);
assert!(global.is_empty());
let runnable_kvas = walk_runnable_list(
&mem,
crate::monitor::reader::WalkContext::default(),
head as u64,
head as u64,
0,
);
assert_eq!(runnable_kvas, vec![n1 as u64]);
let runnable_per_cpu: Vec<(u64, u64)> =
runnable_kvas.iter().map(|&k| (k, 0xc0deu64)).collect();
let entries = merge_global_and_runnable(&global, &runnable_per_cpu);
assert_eq!(entries.len(), 1, "race-window task must surface");
assert_eq!(entries[0].task_kva, n1 as u64);
assert!(entries[0].is_runnable_in_scx);
assert_eq!(entries[0].running_pc, Some(0xc0de));
}
}