use anyhow::{Context, Result};
use std::path::Path;
#[cfg(target_arch = "x86_64")]
pub(crate) const START_KERNEL_MAP: u64 = 0xffff_ffff_8000_0000;
#[cfg(target_arch = "aarch64")]
pub(crate) const START_KERNEL_MAP: u64 = 0xffff_8000_8000_0000;
#[cfg(target_arch = "x86_64")]
pub(crate) const DEFAULT_PAGE_OFFSET: u64 = 0xffff_8880_0000_0000;
#[cfg(target_arch = "aarch64")]
pub(crate) const DEFAULT_PAGE_OFFSET: u64 = 0xffff_0000_0000_0000;
pub(crate) fn default_page_offset_for_tcr(tcr_el1: u64) -> u64 {
#[cfg(target_arch = "x86_64")]
{
let _ = tcr_el1;
DEFAULT_PAGE_OFFSET
}
#[cfg(target_arch = "aarch64")]
{
if tcr_el1 == 0 {
return DEFAULT_PAGE_OFFSET;
}
let t1sz = (tcr_el1 >> 16) & 0x3F;
if t1sz == 0 || t1sz > 60 {
return DEFAULT_PAGE_OFFSET;
}
let va_bits: u32 = 64u32 - t1sz as u32;
0u64.wrapping_sub(1u64 << va_bits)
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
let _ = tcr_el1;
DEFAULT_PAGE_OFFSET
}
}
pub(crate) const VMLINUX_KEEP_SECTIONS: &[&[u8]] = &[
b".symtab", b".strtab", b".bss", ];
pub(crate) const VMLINUX_ZERO_DATA_SECTIONS: &[&[u8]] = &[
b".data", b".data..percpu", ];
#[derive(Debug, Clone)]
pub(crate) struct KernelSymbols {
pub runqueues: u64,
pub per_cpu_offset: u64,
pub page_offset_base_kva: Option<u64>,
pub phys_base_kva: Option<u64>,
pub scx_root: Option<u64>,
pub scx_tasks: Option<u64>,
pub init_top_pgt: Option<u64>,
pub pgtable_l5_enabled: Option<u64>,
pub prog_idr: Option<u64>,
pub scx_watchdog_timeout: Option<u64>,
pub scx_watchdog_timestamp: Option<u64>,
pub jiffies_64: Option<u64>,
pub kernel_cpustat: Option<u64>,
pub kstat: Option<u64>,
pub tick_cpu_sched: Option<u64>,
#[allow(dead_code)]
pub node_data: Option<u64>,
}
impl KernelSymbols {
pub fn from_vmlinux(path: &Path) -> Result<Self> {
let data =
std::fs::read(path).with_context(|| format!("read vmlinux: {}", path.display()))?;
Self::from_vmlinux_bytes(&data)
}
pub fn from_vmlinux_bytes(data: &[u8]) -> Result<Self> {
let elf = goblin::elf::Elf::parse(data).context("parse vmlinux ELF")?;
Self::from_elf(&elf)
}
pub fn from_elf(elf: &goblin::elf::Elf<'_>) -> Result<Self> {
const SHN_UNDEF: u16 = 0;
let sym_addr = |name: &str| -> Option<u64> {
elf.syms
.iter()
.find(|s| {
s.st_shndx as u16 != SHN_UNDEF && elf.strtab.get_at(s.st_name) == Some(name)
})
.map(|s| s.st_value)
};
let runqueues = sym_addr("runqueues").context("symbol 'runqueues' not found in vmlinux")?;
let per_cpu_offset = sym_addr("__per_cpu_offset")
.context("symbol '__per_cpu_offset' not found in vmlinux")?;
let page_offset_base_kva = sym_addr("page_offset_base");
let phys_base_kva = sym_addr("phys_base");
let scx_root = sym_addr("scx_root");
let scx_tasks = sym_addr("scx_tasks");
let init_top_pgt = sym_addr("init_top_pgt").or_else(|| sym_addr("swapper_pg_dir"));
let pgtable_l5_enabled = sym_addr("__pgtable_l5_enabled");
let prog_idr = sym_addr("prog_idr");
let scx_watchdog_timeout = sym_addr("scx_watchdog_timeout");
let scx_watchdog_timestamp = sym_addr("scx_watchdog_timestamp");
let jiffies_64 = sym_addr("jiffies_64");
let kernel_cpustat = sym_addr("kernel_cpustat");
let kstat = sym_addr("kstat");
let tick_cpu_sched = sym_addr("tick_cpu_sched");
let node_data = sym_addr("node_data");
Ok(Self {
runqueues,
per_cpu_offset,
page_offset_base_kva,
phys_base_kva,
scx_root,
scx_tasks,
init_top_pgt,
pgtable_l5_enabled,
prog_idr,
scx_watchdog_timeout,
scx_watchdog_timestamp,
jiffies_64,
kernel_cpustat,
kstat,
tick_cpu_sched,
node_data,
})
}
}
#[allow(dead_code)]
pub(crate) fn resolve_page_offset(
mem: &super::reader::GuestMem,
symbols: &KernelSymbols,
start_kernel_map: u64,
) -> u64 {
resolve_page_offset_with_tcr(mem, symbols, start_kernel_map, 0, 0)
}
pub(crate) fn resolve_page_offset_with_tcr(
mem: &super::reader::GuestMem,
symbols: &KernelSymbols,
start_kernel_map: u64,
tcr_el1: u64,
phys_base: u64,
) -> u64 {
let Some(pob_kva) = symbols.page_offset_base_kva else {
return default_page_offset_for_tcr(tcr_el1);
};
let pob_pa = text_kva_to_pa_with_base(pob_kva, start_kernel_map, phys_base);
let val = mem.read_u64(pob_pa, 0);
if val & (1u64 << 63) != 0 {
val
} else {
default_page_offset_for_tcr(tcr_el1)
}
}
pub(crate) fn resolve_phys_base(
mem: &super::reader::GuestMem,
symbols: &KernelSymbols,
cr3_pa: u64,
l5: bool,
tcr_el1: u64,
) -> Option<u64> {
let kva = symbols.phys_base_kva?;
let cr3_pa_masked = cr3_pa & !0xFFFu64;
let pa = mem.translate_kva(cr3_pa_masked, super::Kva(kva), l5, tcr_el1)?;
Some(mem.read_u64(pa, 0))
}
pub(crate) fn resolve_pgtable_l5(
mem: &super::reader::GuestMem,
symbols: &KernelSymbols,
start_kernel_map: u64,
phys_base: u64,
) -> bool {
let Some(kva) = symbols.pgtable_l5_enabled else {
return false;
};
let pa = text_kva_to_pa_with_base(kva, start_kernel_map, phys_base);
mem.read_u32(pa, 0) != 0
}
pub(crate) fn kva_to_pa(kva: u64, page_offset: u64) -> u64 {
kva.wrapping_sub(page_offset)
}
pub(crate) fn text_kva_to_pa_with_base(kva: u64, start_kernel_map: u64, phys_base: u64) -> u64 {
kva.wrapping_sub(start_kernel_map).wrapping_add(phys_base)
}
pub(crate) fn start_kernel_map_for_tcr(tcr_el1: u64) -> Option<u64> {
#[cfg(target_arch = "x86_64")]
{
let _ = tcr_el1;
Some(START_KERNEL_MAP)
}
#[cfg(target_arch = "aarch64")]
{
if tcr_el1 == 0 {
return None;
}
let t1sz = (tcr_el1 >> 16) & 0x3F;
if t1sz == 0 {
return None;
}
let tg1 = (tcr_el1 >> 30) & 0x3;
if tg1 == 0 {
return None;
}
let va_bits_runtime: u32 = (64u32).saturating_sub(t1sz as u32);
let is_16k_granule = tg1 == 0b01;
let va_bits_min: u32 = if va_bits_runtime <= 48 {
va_bits_runtime
} else if is_16k_granule {
47
} else {
48
};
if va_bits_min == 0 || va_bits_min > 64 {
return None;
}
let page_end = 0u64.wrapping_sub(1u64 << (va_bits_min - 1));
Some(page_end.wrapping_add(0x8000_0000))
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
let _ = tcr_el1;
compile_error!("unsupported architecture for start_kernel_map_for_tcr")
}
}
pub(crate) fn read_per_cpu_offsets(
mem: &super::reader::GuestMem,
per_cpu_offset_pa: u64,
num_cpus: u32,
) -> Vec<u64> {
(0..num_cpus)
.map(|cpu| mem.read_u64(per_cpu_offset_pa + (cpu as u64) * 8, 0))
.collect()
}
pub(crate) fn compute_rq_pas(
runqueues_kva: u64,
per_cpu_offsets: &[u64],
page_offset: u64,
) -> Vec<u64> {
per_cpu_offsets
.iter()
.map(|&offset| kva_to_pa(runqueues_kva.wrapping_add(offset), page_offset))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn find_runqueues_symbol() {
let path = match crate::monitor::find_test_vmlinux() {
Some(p) => p,
None => return,
};
if path.starts_with("/sys/") {
skip!("vmlinux is raw BTF (not ELF), cannot parse symbols");
}
let syms = KernelSymbols::from_vmlinux(&path).unwrap();
assert_ne!(syms.runqueues, 0);
assert_ne!(syms.per_cpu_offset, 0);
assert!(syms.per_cpu_offset > 0xffff_0000_0000_0000);
}
#[test]
fn kva_to_pa_basic() {
let page_offset = DEFAULT_PAGE_OFFSET;
let dram_kva = page_offset.wrapping_add(0x10_0000);
assert_eq!(kva_to_pa(dram_kva, page_offset), 0x10_0000);
assert_eq!(kva_to_pa(page_offset, page_offset), 0);
}
#[test]
fn compute_rq_pas_two_cpus() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues = page_offset.wrapping_add(0x20_0000);
let offsets = vec![0, 0x4_0000]; let pas = compute_rq_pas(runqueues, &offsets, page_offset);
assert_eq!(pas[0], 0x20_0000);
assert_eq!(pas[1], 0x24_0000);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_returns_none_on_zero() {
assert_eq!(start_kernel_map_for_tcr(0), None);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn start_kernel_map_for_tcr_x86_64_constant() {
assert_eq!(start_kernel_map_for_tcr(0x12345), Some(START_KERNEL_MAP));
assert_eq!(start_kernel_map_for_tcr(u64::MAX), Some(START_KERNEL_MAP));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_48bit() {
let tcr = (0b10u64 << 30) | (16u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_8000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_47bit_16k() {
let tcr = (0b01u64 << 30) | (17u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_C000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_52bit_4k() {
let tcr = (0b10u64 << 30) | (12u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_8000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_52bit_16k() {
let tcr = (0b01u64 << 30) | (12u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), Some(0xFFFF_C000_8000_0000));
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_rejects_reserved_tg1() {
let tcr = (0b00u64 << 30) | (16u64 << 16);
assert_eq!(start_kernel_map_for_tcr(tcr), None);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_rejects_t1sz_zero() {
let tcr = 0b10u64 << 30;
assert_eq!(start_kernel_map_for_tcr(tcr), None);
}
#[test]
#[cfg(target_arch = "aarch64")]
fn start_kernel_map_for_tcr_aarch64_va_bits_52_reduced_runtime_returns_48bit() {
let tcr_52_16k_reduced = (0b01u64 << 30) | (16u64 << 16);
assert_eq!(
start_kernel_map_for_tcr(tcr_52_16k_reduced),
Some(0xFFFF_8000_8000_0000),
"TCR_EL1 alone cannot distinguish VA_BITS=48 from \
VA_BITS=52+16K with HW fallback; 0xFFFF_C000_8000_0000 \
would be correct for VA_BITS=52+16K"
);
let tcr_52_4k_reduced = (0b10u64 << 30) | (16u64 << 16);
assert_eq!(
start_kernel_map_for_tcr(tcr_52_4k_reduced),
Some(0xFFFF_8000_8000_0000)
);
}
#[test]
fn from_vmlinux_nonexistent() {
let path = std::path::Path::new("/nonexistent/vmlinux");
assert!(KernelSymbols::from_vmlinux(path).is_err());
}
#[test]
fn read_per_cpu_offsets_zero_cpus() {
use crate::monitor::reader::GuestMem;
let mut buf = [0u8; 64];
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let result = read_per_cpu_offsets(&mem, 0, 0);
assert!(result.is_empty());
}
#[test]
fn read_per_cpu_offsets_known_buffer() {
use crate::monitor::reader::GuestMem;
let mut buf = [0u8; 24];
buf[0..8].copy_from_slice(&0x1000u64.to_ne_bytes());
buf[8..16].copy_from_slice(&0x2000u64.to_ne_bytes());
buf[16..24].copy_from_slice(&0x3000u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let result = read_per_cpu_offsets(&mem, 0, 3);
assert_eq!(result.len(), 3);
assert_eq!(result[0], 0x1000);
assert_eq!(result[1], 0x2000);
assert_eq!(result[2], 0x3000);
}
#[test]
fn read_per_cpu_offsets_nonzero_pa() {
use crate::monitor::reader::GuestMem;
let mut buf = [0u8; 40]; buf[16..24].copy_from_slice(&0xAAu64.to_ne_bytes());
buf[24..32].copy_from_slice(&0xBBu64.to_ne_bytes());
buf[32..40].copy_from_slice(&0xCCu64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let result = read_per_cpu_offsets(&mem, 16, 3);
assert_eq!(result, vec![0xAA, 0xBB, 0xCC]);
}
#[test]
fn read_per_cpu_offsets_misaligned_pa() {
use crate::monitor::reader::GuestMem;
let mut buf = [0u8; 32];
buf[1..9].copy_from_slice(&0x1122_3344_5566_7788u64.to_ne_bytes());
buf[9..17].copy_from_slice(&0x99AA_BBCC_DDEE_FF00u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let result = read_per_cpu_offsets(&mem, 1, 2);
assert_eq!(result.len(), 2);
assert_eq!(result[0], 0x1122_3344_5566_7788);
assert_eq!(result[1], 0x99AA_BBCC_DDEE_FF00);
}
#[test]
fn read_per_cpu_offsets_out_of_bounds_returns_zero() {
use crate::monitor::reader::GuestMem;
let mut buf = [0u8; 16];
buf[0..8].copy_from_slice(&0x1111u64.to_ne_bytes());
buf[8..16].copy_from_slice(&0x2222u64.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let result = read_per_cpu_offsets(&mem, 0, 4);
assert_eq!(result, vec![0x1111, 0x2222, 0, 0]);
}
#[test]
fn text_kva_to_pa_with_base_basic() {
assert_eq!(
text_kva_to_pa_with_base(START_KERNEL_MAP + 0x10_0000, START_KERNEL_MAP, 0),
0x10_0000
);
assert_eq!(
text_kva_to_pa_with_base(START_KERNEL_MAP, START_KERNEL_MAP, 0),
0
);
assert_eq!(
text_kva_to_pa_with_base(START_KERNEL_MAP + 0x10_0000, START_KERNEL_MAP, 0x4000_0000),
0x4010_0000
);
}
#[test]
fn kva_to_pa_wrapping() {
let page_offset = DEFAULT_PAGE_OFFSET;
let kva = 0x0000_0000_0001_0000u64;
let pa = kva_to_pa(kva, page_offset);
assert_eq!(pa, kva.wrapping_sub(page_offset));
}
#[test]
fn compute_rq_pas_empty_offsets() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues = page_offset.wrapping_add(0x20_0000);
let pas = compute_rq_pas(runqueues, &[], page_offset);
assert!(pas.is_empty());
}
#[test]
fn compute_rq_pas_single_cpu() {
let page_offset = DEFAULT_PAGE_OFFSET;
let runqueues = page_offset.wrapping_add(0x20_0000);
let pas = compute_rq_pas(runqueues, &[0], page_offset);
assert_eq!(pas.len(), 1);
assert_eq!(pas[0], 0x20_0000);
}
#[test]
fn resolve_page_offset_with_symbol() {
use crate::monitor::reader::GuestMem;
let pob_kva = START_KERNEL_MAP + 0x1000;
let expected_page_offset = 0xffff_8880_0000_0000u64;
let mut buf = [0u8; 0x2000];
buf[0x1000..0x1008].copy_from_slice(&expected_page_offset.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: Some(pob_kva),
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert_eq!(
resolve_page_offset(&mem, &symbols, START_KERNEL_MAP),
expected_page_offset
);
}
#[test]
fn resolve_page_offset_without_symbol() {
use crate::monitor::reader::GuestMem;
let buf = [0u8; 64];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: None,
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert_eq!(
resolve_page_offset(&mem, &symbols, START_KERNEL_MAP),
DEFAULT_PAGE_OFFSET
);
}
#[test]
fn resolve_page_offset_zero_value_falls_back() {
use crate::monitor::reader::GuestMem;
let pob_kva = START_KERNEL_MAP + 0x100;
let buf = [0u8; 0x200];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: Some(pob_kva),
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert_eq!(
resolve_page_offset(&mem, &symbols, START_KERNEL_MAP),
DEFAULT_PAGE_OFFSET
);
}
#[test]
fn resolve_page_offset_garbage_value_falls_back() {
use crate::monitor::reader::GuestMem;
let pob_kva = START_KERNEL_MAP + 0x1000;
let mut buf = [0u8; 0x2000];
let garbage: u64 = 0x1234_5678_DEAD_BEEF;
buf[0x1000..0x1008].copy_from_slice(&garbage.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: Some(pob_kva),
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert_eq!(
resolve_page_offset(&mem, &symbols, START_KERNEL_MAP),
DEFAULT_PAGE_OFFSET
);
}
#[test]
fn resolve_page_offset_randomized_memory() {
use crate::monitor::reader::GuestMem;
let pob_kva = START_KERNEL_MAP + 0x1000;
let randomized_page_offset = 0xff11_0000_0000_0000u64;
let mut buf = [0u8; 0x2000];
buf[0x1000..0x1008].copy_from_slice(&randomized_page_offset.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: Some(pob_kva),
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert_eq!(
resolve_page_offset(&mem, &symbols, START_KERNEL_MAP),
randomized_page_offset
);
}
#[test]
fn resolve_pgtable_l5_enabled() {
use crate::monitor::reader::GuestMem;
let l5_kva = START_KERNEL_MAP + 0x1000;
let mut buf = [0u8; 0x2000];
buf[0x1000..0x1004].copy_from_slice(&1u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: None,
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: Some(l5_kva),
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert!(resolve_pgtable_l5(&mem, &symbols, START_KERNEL_MAP, 0));
}
#[test]
fn resolve_pgtable_l5_disabled() {
use crate::monitor::reader::GuestMem;
let l5_kva = START_KERNEL_MAP + 0x1000;
let mut buf = [0u8; 0x2000];
buf[0x1000..0x1004].copy_from_slice(&0u32.to_ne_bytes());
let mem = unsafe { GuestMem::new(buf.as_mut_ptr(), buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: None,
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: Some(l5_kva),
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert!(!resolve_pgtable_l5(&mem, &symbols, START_KERNEL_MAP, 0));
}
#[test]
fn resolve_pgtable_l5_absent_symbol() {
use crate::monitor::reader::GuestMem;
let buf = [0u8; 64];
let mem = unsafe { GuestMem::new(buf.as_ptr() as *mut u8, buf.len() as u64) };
let symbols = KernelSymbols {
runqueues: 0,
per_cpu_offset: 0,
page_offset_base_kva: None,
phys_base_kva: None,
scx_root: None,
scx_tasks: None,
init_top_pgt: None,
pgtable_l5_enabled: None,
prog_idr: None,
scx_watchdog_timeout: None,
scx_watchdog_timestamp: None,
jiffies_64: None,
kernel_cpustat: None,
kstat: None,
tick_cpu_sched: None,
node_data: None,
};
assert!(!resolve_pgtable_l5(&mem, &symbols, START_KERNEL_MAP, 0));
}
#[test]
fn default_page_offset_for_tcr_derives_va_bits() {
#[cfg(target_arch = "x86_64")]
{
assert_eq!(default_page_offset_for_tcr(0), DEFAULT_PAGE_OFFSET);
assert_eq!(default_page_offset_for_tcr(0x12345), DEFAULT_PAGE_OFFSET);
assert_eq!(default_page_offset_for_tcr(u64::MAX), DEFAULT_PAGE_OFFSET);
}
#[cfg(target_arch = "aarch64")]
{
let tcr_48 = 16u64 << 16;
assert_eq!(default_page_offset_for_tcr(tcr_48), 0xffff_0000_0000_0000);
let tcr_47 = 17u64 << 16;
assert_eq!(default_page_offset_for_tcr(tcr_47), 0xffff_8000_0000_0000);
let tcr_52 = 12u64 << 16;
assert_eq!(default_page_offset_for_tcr(tcr_52), 0xfff0_0000_0000_0000);
assert_eq!(default_page_offset_for_tcr(0), DEFAULT_PAGE_OFFSET);
let tcr_t1sz_0 = 0u64;
assert_eq!(default_page_offset_for_tcr(tcr_t1sz_0), DEFAULT_PAGE_OFFSET);
let tcr_t1sz_63 = 63u64 << 16;
assert_eq!(
default_page_offset_for_tcr(tcr_t1sz_63),
DEFAULT_PAGE_OFFSET
);
}
}
}